Commit 60f29555 authored by Andrey Filippov's avatar Andrey Filippov

merged with master

parents 50e586e1 0f25f13b
......@@ -13,6 +13,9 @@
* more details.
*/
/* this one is required for printk_ratelimited */
#define CONFIG_PRINK
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
......@@ -24,7 +27,13 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/sysfs.h>
#include <elphel/exifa.h>
#include <elphel/elphel393-mem.h>
#include "ahci.h"
#include "ahci_elphel.h"
#include "../elphel/exif393.h"
#include "../elphel/jpeghead.h"
#define DRV_NAME "elphel-ahci"
/*
......@@ -45,12 +54,29 @@ static const struct of_device_id ahci_elphel_of_match[];
static const struct attribute_group dev_attr_root_group;
static bool load_driver = false;
static unsigned char app15[ALIGNMENT_SIZE] = {0xff, 0xef};
struct elphel_ahci_priv {
u32 clb_offs;
u32 fb_offs;
u32 base_addr;
};
static void elphel_cmd_issue(struct ata_port *ap, uint64_t start, uint16_t count, struct fvec *sgl, unsigned int elem, uint8_t cmd);
static int init_buffers(struct device *dev, struct frame_buffers *buffs);
static void init_vectors(struct frame_buffers *buffs, struct fvec *chunks);
static void deinit_buffers(struct device *dev, struct frame_buffers *buffs);
static inline struct elphel_ahci_priv *dev_get_dpriv(struct device *dev);
static void finish_cmd(struct elphel_ahci_priv *dpriv);
static void finish_rec(struct elphel_ahci_priv *dpriv);
static int process_cmd(struct elphel_ahci_priv *dpriv);
static inline size_t get_size_from(const struct fvec *vects, int index, size_t offset, int all);
static inline void vectmov(struct fvec *vec, size_t len);
static inline void vectsplit(struct fvec *vect, struct fvec *parts, size_t *n_elem);
static int move_tail(struct elphel_ahci_priv *dpriv);
static int move_head(struct elphel_ahci_priv *dpriv);
static size_t get_prev_slot(const struct elphel_ahci_priv *dpriv);
static int is_cmdq_empty(const struct elphel_ahci_priv *dpriv);
void process_queue(unsigned long data);
static void set_flag(struct elphel_ahci_priv *drpiv, uint32_t flag);
static void reset_flag(struct elphel_ahci_priv *dpriv, uint32_t flag);
/* debug functions */
static int check_chunks(struct fvec *vects);
static void dump_sg_list(const struct device *dev, const struct fvec *sgl, size_t elems);
static ssize_t set_load_flag(struct device *dev, struct device_attribute *attr,
const char *buff, size_t buff_sz)
......@@ -94,6 +120,70 @@ static void elphel_defer_load(struct device *dev)
iounmap(ctrl_ptr);
}
static irqreturn_t elphel_irq_handler(int irq, void * dev_instance)
{
unsigned long irq_flags;
irqreturn_t handled;
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv = host->private_data;
struct ata_port *port = host->ports[DEFAULT_PORT_NUM];
void __iomem *port_mmio = ahci_port_base(port);
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
uint32_t irq_stat, host_irq_stat;
if (dpriv->flags & IRQ_SIMPLE) {
/* handle interrupt from internal command */
host_irq_stat = readl(hpriv->mmio + HOST_IRQ_STAT);
if (!host_irq_stat)
return IRQ_NONE;
dpriv->flags &= ~IRQ_SIMPLE;
irq_stat = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(host->dev, "irq_stat = 0x%x, host irq_stat = 0x%x\n", irq_stat, host_irq_stat);
writel(irq_stat, port_mmio + PORT_IRQ_STAT);
writel(host_irq_stat, hpriv->mmio + HOST_IRQ_STAT);
handled = IRQ_HANDLED;
tasklet_schedule(&dpriv->bh);
} else {
/* pass handling to AHCI level and then decide if the resource should be freed */
handled = ahci_single_irq_intr(irq, dev_instance);
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
if (is_cmdq_empty(dpriv)) {
dpriv->flags &= ~DISK_BUSY;
} else {
tasklet_schedule(&dpriv->bh);
}
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
return handled;
}
/** Command queue processing tasklet */
void process_queue(unsigned long data)
{
unsigned long irq_flags;
struct elphel_ahci_priv *dpriv = (struct elphel_ahci_priv *)data;
if (process_cmd(dpriv) == 0) {
finish_cmd(dpriv);
if (move_head(dpriv) != -1) {
process_cmd(dpriv);
} else {
if (dpriv->flags & DELAYED_FINISH) {
dpriv->flags &= ~DELAYED_FINISH;
finish_rec(dpriv);
} else {
/* all commands have been processed */
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
dpriv->flags &= ~DISK_BUSY;
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
}
}
}
// What about port_stop and freeing/unmapping ?
// Or at least check if it is re-started and memory is already allocated/mapped
static int elphel_port_start(struct ata_port *ap)
......@@ -177,12 +267,12 @@ static int elphel_parse_prop(const struct device_node *devn,
static int elphel_drv_probe(struct platform_device *pdev)
{
int ret;
int ret, i, irq_num;
struct ahci_host_priv *hpriv;
struct elphel_ahci_priv *dpriv;
struct device *dev = &pdev->dev;
const struct of_device_id *match;
unsigned int reg_val;
struct ata_host *host;
if (&dev->kobj) {
ret = sysfs_create_group(&dev->kobj, &dev_attr_root_group);
......@@ -197,6 +287,17 @@ static int elphel_drv_probe(struct platform_device *pdev)
if (!dpriv)
return -ENOMEM;
dpriv->dev = dev;
spin_lock_init(&dpriv->flags_lock);
tasklet_init(&dpriv->bh, process_queue, (unsigned long)dpriv);
for (i = 0; i < MAX_CMD_SLOTS; i++) {
ret = init_buffers(dev, &dpriv->fbuffs[i]);
if (ret != 0)
return ret;
init_vectors(&dpriv->fbuffs[i], dpriv->data_chunks[i]);
}
match = of_match_device(ahci_elphel_of_match, &pdev->dev);
if (!match)
return -EINVAL;
......@@ -219,12 +320,28 @@ static int elphel_drv_probe(struct platform_device *pdev)
return ret;
}
/* reassign automatically assigned interrupt handler */
irq_num = platform_get_irq(pdev, 0);
host = platform_get_drvdata(pdev);
devm_free_irq(dev, irq_num, host);
ret = devm_request_irq(dev, irq_num, elphel_irq_handler, IRQF_SHARED, dev_name(dev), host);
if (ret) {
dev_err(dev, "failed to reassign default IRQ handler to Elphel handler\n");
return ret;
}
return 0;
}
static int elphel_drv_remove(struct platform_device *pdev)
{
int i;
struct elphel_ahci_priv *dpriv = dev_get_dpriv(&pdev->dev);
dev_info(&pdev->dev, "removing Elphel AHCI driver");
tasklet_kill(&dpriv->bh);
for (i = 0; i < MAX_CMD_SLOTS; i++)
deinit_buffers(&pdev->dev, &dpriv->fbuffs[i]);
sysfs_remove_group(&pdev->dev.kobj, &dev_attr_root_group);
ata_platform_remove_one(pdev);
......@@ -291,9 +408,1003 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc)
AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE);
}
/** Set flag @e flag in driver private structure. This function uses spin lock to access the flags variable. */
static void set_flag(struct elphel_ahci_priv *dpriv, uint32_t flag)
{
unsigned long irq_flags;
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
dpriv->flags |= flag;
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
/** Reset flag @e flag in driver private structure. This function uses spin lock to access the flags variable. */
static void reset_flag(struct elphel_ahci_priv *dpriv, uint32_t flag)
{
unsigned long irq_flags;
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
dpriv->flags &= ~flag;
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
/** Map buffer vectors to S/G list and return the number of vectors mapped */
static int map_vectors(struct elphel_ahci_priv *dpriv)
{
int i;
int index = 0;
int finish = 0;
size_t total_sz = 0;
size_t tail;
struct fvec *chunks;
struct fvec vect;
chunks = dpriv->data_chunks[dpriv->head_ptr];
for (i = dpriv->curr_data_chunk; i < MAX_DATA_CHUNKS; i++) {
if (i == CHUNK_REM)
/* remainder should never be processed */
continue;
if (i == dpriv->curr_data_chunk) {
total_sz = chunks[i].iov_len - dpriv->curr_data_offset;
vect.iov_base = (unsigned char *)chunks[i].iov_base + dpriv->curr_data_offset;
vect.iov_dma = chunks[i].iov_dma + dpriv->curr_data_offset;
vect.iov_len = chunks[i].iov_len - dpriv->curr_data_offset;
} else {
total_sz += chunks[i].iov_len;
vect = chunks[i];
}
if (total_sz > dpriv->max_data_sz) {
/* truncate current buffer and finish mapping */
tail = total_sz - dpriv->max_data_sz;
vect.iov_len -= tail;
dpriv->curr_data_chunk = i;
dpriv->curr_data_offset = chunks[i].iov_len - tail;
finish = 1;
} else if (unlikely(total_sz == dpriv->max_data_sz)) {
dpriv->curr_data_chunk = i;
dpriv->curr_data_offset = chunks[i].iov_len;
finish = 1;
}
if (vect.iov_len != 0) {
if (vect.iov_len < MAX_PRDT_LEN) {
dpriv->sgl[index++] = vect;
} else {
/* current vector is too long and can not be mapped to a single PRDT entry, split it */
vectsplit(&vect, dpriv->sgl, &index);
if (vect.iov_len < MAX_PRDT_LEN) {
dpriv->sgl[index++] = vect;
} else {
/* free slots in PRDT table have ended */
dpriv->curr_data_chunk = i;
dpriv->curr_data_offset = (unsigned char *)vect.iov_base - (unsigned char *)chunks[i].iov_base;
finish = 1;
}
}
if (index == (MAX_SGL_LEN - 1))
finish = 1;
}
if (finish)
break;
}
if (finish == 0) {
/* frame vectors have been fully processed, stop calling me */
dpriv->curr_data_chunk = MAX_DATA_CHUNKS;
dpriv->curr_data_offset = 0;
}
return index;
}
/** Split buffer pointed by vector @e vect into several smaller buffer. Each part will be less than #MAX_PRDT_LEN bytes */
static inline void vectsplit(struct fvec *vect, struct fvec *parts, size_t *n_elem)
{
size_t len;
struct fvec split;
while (vect->iov_len > MAX_PRDT_LEN && *n_elem < MAX_SGL_LEN) {
len = MAX_PRDT_LEN - MAX_PRDT_LEN % PHY_BLOCK_SIZE;
split.iov_base = vect->iov_base;
split.iov_dma = vect->iov_dma;
split.iov_len = len;
vectmov(vect, len);
parts[*n_elem] = split;
*n_elem = *n_elem + 1;
}
}
/** Copy @e len bytes from buffer pointed by @e src vector to buffer pointed by @e dest vector */
static inline void vectcpy(struct fvec *dest, void *src, size_t len)
{
unsigned char *d = (unsigned char *)dest->iov_base;
memcpy(d + dest->iov_len, src, len);
dest->iov_len += len;
}
/** Move vector forward by @e len bytes decreasing its length */
static inline void vectmov(struct fvec *vec, size_t len)
{
if (vec->iov_len >= len) {
vec->iov_base = (unsigned char *)vec->iov_base + len;
vec->iov_dma += len;
vec->iov_len -= len;
}
}
/** Shrink vector length by @len bytes */
static inline void vectshrink(struct fvec *vec, size_t len)
{
if (vec->iov_len >= len) {
vec->iov_len -= len;
}
}
/** Return the number of bytes needed to align @e data_len to @e align_len boundary */
static inline size_t align_bytes_num(size_t data_len, size_t align_len)
{
size_t rem = data_len % align_len;
if (rem == 0)
return 0;
else
return align_len - rem;
}
/** This helper function is used to position a pointer @e offset bytes from the end
* of a buffer. DMA handle is not updated intentionally as it is not needed during copying */
static inline unsigned char *vectrpos(struct fvec *vec, size_t offset)
{
return (unsigned char *)vec->iov_base + (vec->iov_len - offset);
}
/** Align current frame to disk sector boundary and each individual buffer to #ALIGNMENT_SIZE boundary */
static void align_frame(struct elphel_ahci_priv *dpriv)
{
unsigned char *src;
size_t len, total_sz, data_len;
size_t cmd_slot = dpriv->tail_ptr;
size_t prev_slot = get_prev_slot(dpriv);
size_t max_len = dpriv->fbuffs[cmd_slot].common_buff.iov_len;
struct device *dev = dpriv->dev;
struct frame_buffers *fbuffs = &dpriv->fbuffs[cmd_slot];
struct fvec *chunks = dpriv->data_chunks[cmd_slot];
struct fvec *cbuff = &chunks[CHUNK_COMMON];
struct fvec *rbuff = &dpriv->data_chunks[prev_slot][CHUNK_REM];
total_sz = get_size_from(chunks, 0, 0, INCLUDE_REM) + rbuff->iov_len;
if (total_sz < PHY_BLOCK_SIZE) {
/* the frame length is less than sector size, delay this frame */
if (prev_slot != cmd_slot) {
/* some data may be left from previous frame */
vectcpy(&chunks[CHUNK_REM], rbuff->iov_base, rbuff->iov_len);
vectshrink(rbuff, rbuff->iov_len);
}
dev_dbg(dev, "frame size is less than sector size: %u bytes; delay recording\n", total_sz);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_LEADER].iov_base, chunks[CHUNK_LEADER].iov_len);
vectshrink(&chunks[CHUNK_LEADER], chunks[CHUNK_LEADER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_EXIF].iov_base, chunks[CHUNK_EXIF].iov_len);
vectshrink(&chunks[CHUNK_EXIF], chunks[CHUNK_EXIF].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_HEADER].iov_base, chunks[CHUNK_HEADER].iov_len);
vectshrink(&chunks[CHUNK_HEADER], chunks[CHUNK_HEADER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
return;
}
dma_sync_single_for_cpu(dev, fbuffs->common_buff.iov_dma, fbuffs->common_buff.iov_len, DMA_TO_DEVICE);
/* copy remainder of previous frame to the beginning of common buffer */
if (likely(rbuff->iov_len != 0)) {
len = rbuff->iov_len;
dev_dbg(dev, "copy %u bytes from REM #%u to common buffer\n", len, prev_slot);
vectcpy(cbuff, rbuff->iov_base, len);
vectshrink(rbuff, rbuff->iov_len);
}
/* copy JPEG marker */
len = chunks[CHUNK_LEADER].iov_len;
vectcpy(cbuff, chunks[CHUNK_LEADER].iov_base, len);
vectshrink(&chunks[CHUNK_LEADER], chunks[CHUNK_LEADER].iov_len);
/* copy Exif if present */
if (chunks[CHUNK_EXIF].iov_len != 0) {
len = chunks[CHUNK_EXIF].iov_len;
dev_dbg(dev, "copy %u bytes from EXIF to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_EXIF].iov_base, len);
vectshrink(&chunks[CHUNK_EXIF], chunks[CHUNK_EXIF].iov_len);
}
/* align common buffer to ALIGNMENT boundary, APP15 marker should be placed before header data */
data_len = cbuff->iov_len + chunks[CHUNK_HEADER].iov_len;
len = align_bytes_num(data_len, ALIGNMENT_SIZE);
if (len < JPEG_MARKER_LEN + JPEG_SIZE_LEN && len != 0) {
/* the number of bytes needed for alignment is less than the length of the marker itself, increase the number of stuffing bytes */
len += ALIGNMENT_SIZE;
}
dev_dbg(dev, "total number of stuffing bytes in APP15 marker: %u\n", len);
app15[3] = len - JPEG_MARKER_LEN;
vectcpy(cbuff, app15, len);
/* copy JPEG header */
len = chunks[CHUNK_HEADER].iov_len;
dev_dbg(dev, "copy %u bytes from HEADER to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_HEADER].iov_base, len);
vectshrink(&chunks[CHUNK_HEADER], chunks[CHUNK_HEADER].iov_len);
/* check if there is enough data to continue - JPEG data length can be too short */
len = get_size_from(chunks, CHUNK_DATA_0, 0, EXCLUDE_REM);
if (len < PHY_BLOCK_SIZE) {
size_t num = align_bytes_num(cbuff->iov_len, PHY_BLOCK_SIZE);
dev_dbg(dev, "jpeg data is too short, delay this frame\n");
if (len >= num) {
/* there is enough data to align common buffer to sector boundary */
if (num >= chunks[CHUNK_DATA_0].iov_len) {
vectcpy(cbuff, chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
num -= chunks[CHUNK_DATA_0].iov_len;
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
} else {
src = vectrpos(&chunks[CHUNK_DATA_0], num);
vectcpy(cbuff, chunks[CHUNK_DATA_0].iov_base, num);
vectshrink(&chunks[CHUNK_DATA_0], num);
num = 0;
}
if (num >= chunks[CHUNK_DATA_1].iov_len) {
vectcpy(cbuff, chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
num -= chunks[CHUNK_DATA_1].iov_len;
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
} else {
src = vectrpos(&chunks[CHUNK_DATA_1], num);
vectcpy(cbuff, chunks[CHUNK_DATA_1].iov_base, num);
vectshrink(&chunks[CHUNK_DATA_1], num);
num = 0;
}
if (num >= chunks[CHUNK_TRAILER].iov_len) {
vectcpy(cbuff, chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
num -= chunks[CHUNK_TRAILER].iov_len;
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
src = vectrpos(&chunks[CHUNK_TRAILER], num);
vectcpy(cbuff, chunks[CHUNK_TRAILER].iov_base, num);
vectshrink(&chunks[CHUNK_TRAILER], num);
num = 0;
}
} else {
/* there is not enough data to align common buffer to sector boundary, truncate common buffer */
data_len = cbuff->iov_len % PHY_BLOCK_SIZE;
src = vectrpos(cbuff, data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(cbuff, data_len);
}
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
return;
}
/* align frame to sector size boundary; total size could have changed by the moment - recalculate */
total_sz = get_size_from(chunks, 0, 0, INCLUDE_REM);
len = total_sz % PHY_BLOCK_SIZE;
dev_dbg(dev, "number of bytes crossing sector boundary: %u\n", len);
if (len != 0) {
if (len >= (chunks[CHUNK_DATA_1].iov_len + chunks[CHUNK_TRAILER].iov_len)) {
/* current frame is not split or the second part of JPEG data is too short */
data_len = len - chunks[CHUNK_DATA_1].iov_len - chunks[CHUNK_TRAILER].iov_len;
src = vectrpos(&chunks[CHUNK_DATA_0], data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(&chunks[CHUNK_DATA_0], data_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else if (len >= chunks[CHUNK_TRAILER].iov_len) {
/* there is enough data in second part to align the frame */
data_len = len - chunks[CHUNK_TRAILER].iov_len;
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
/* the trailing marker is split by sector boundary, copy (PHY_BLOCK_SIZE - 1) bytes from
* JPEG data block(s) to remainder buffer and then add trailing marker */
data_len = PHY_BLOCK_SIZE - (chunks[CHUNK_TRAILER].iov_len - len);
if (data_len >= chunks[CHUNK_DATA_1].iov_len) {
size_t cut_len = data_len - chunks[CHUNK_DATA_1].iov_len;
src = vectrpos(&chunks[CHUNK_DATA_0], cut_len);
vectcpy(&chunks[CHUNK_REM], src, cut_len);
vectshrink(&chunks[CHUNK_DATA_0], cut_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
}
} else {
/* the frame is aligned to sector boundary but some buffers may be not */
chunks[CHUNK_ALIGN].iov_base = vectrpos(cbuff, 0);
chunks[CHUNK_ALIGN].iov_dma = cbuff->iov_dma + cbuff->iov_len;
chunks[CHUNK_ALIGN].iov_len = 0;
if (chunks[CHUNK_DATA_1].iov_len == 0) {
data_len = chunks[CHUNK_DATA_0].iov_len % ALIGNMENT_SIZE;
src = vectrpos(&chunks[CHUNK_DATA_0], data_len);
vectcpy(&chunks[CHUNK_ALIGN], src, data_len);
vectshrink(&chunks[CHUNK_DATA_0], data_len);
} else {
data_len = chunks[CHUNK_DATA_1].iov_len % ALIGNMENT_SIZE;
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_ALIGN], src, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
}
vectcpy(&chunks[CHUNK_ALIGN], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
/* debug sanity check, should not happen */
if (cbuff->iov_len >= max_len) {
dev_err(NULL, "ERROR: the number of bytes copied to common buffer exceeds its size\n");
}
}
/** Calculate the number of blocks this frame will occupy. The frame must be aligned to block size */
static inline size_t get_blocks_num(struct fvec *sgl, size_t n_elem)
{
int num;
size_t total = 0;
for (num = 0; num < n_elem; num++) {
total += sgl[num].iov_len;
}
return total / PHY_BLOCK_SIZE;
}
/** Calculate the size of current frame in bytes starting from vector and offset given */
static inline size_t get_size_from(const struct fvec *vects, int index, size_t offset, int all)
{
int i;
size_t total = 0;
if (index >= MAX_DATA_CHUNKS || offset > vects[index].iov_len) {
return 0;
}
for (i = index; i < MAX_DATA_CHUNKS; i++) {
if (i == CHUNK_REM && all == EXCLUDE_REM)
/* remainder should not be processed */
continue;
if (i == index)
total += vects[i].iov_len - offset;
else
total += vects[i].iov_len;
}
return total;
}
/** Set vectors pointing to data buffers except for JPEG data - those are set in circbuf driver */
static void init_vectors(struct frame_buffers *buffs, struct fvec *chunks)
{
chunks[CHUNK_EXIF].iov_base = buffs->exif_buff.iov_base;
chunks[CHUNK_EXIF].iov_len = 0;
chunks[CHUNK_LEADER].iov_base = buffs->jpheader_buff.iov_base;
chunks[CHUNK_LEADER].iov_len = 0;
chunks[CHUNK_HEADER].iov_base = (unsigned char *)chunks[CHUNK_LEADER].iov_base + JPEG_MARKER_LEN;
chunks[CHUNK_HEADER].iov_len = 0;
chunks[CHUNK_TRAILER].iov_base = buffs->trailer_buff.iov_base;
chunks[CHUNK_TRAILER].iov_len = 0;
chunks[CHUNK_REM].iov_base = buffs->rem_buff.iov_base;
chunks[CHUNK_REM].iov_len = 0;
/* this is the only DMA mapped buffer and its DMA address should be set */
chunks[CHUNK_COMMON].iov_base = buffs->common_buff.iov_base;
chunks[CHUNK_COMMON].iov_dma = buffs->common_buff.iov_dma;
chunks[CHUNK_COMMON].iov_len = 0;
}
/** Allocate memory for frame buffers */
static int init_buffers(struct device *dev, struct frame_buffers *buffs)
{
int mult;
int total_sz;
unsigned char *ptr;
buffs->exif_buff.iov_base = kmalloc(MAX_EXIF_SIZE, GFP_KERNEL);
if (!buffs->exif_buff.iov_base)
return -ENOMEM;
buffs->exif_buff.iov_len = MAX_EXIF_SIZE;
buffs->jpheader_buff.iov_base = kmalloc(JPEG_HEADER_MAXSIZE, GFP_KERNEL);
if (!buffs->jpheader_buff.iov_base)
goto err_header;
buffs->jpheader_buff.iov_len = JPEG_HEADER_MAXSIZE;
buffs->trailer_buff.iov_base = kmalloc(JPEG_MARKER_LEN, GFP_KERNEL);
if (!buffs->trailer_buff.iov_base)
goto err_trailer;
buffs->trailer_buff.iov_len = JPEG_MARKER_LEN;
ptr = buffs->trailer_buff.iov_base;
ptr[0] = 0xff;
ptr[1] = 0xd9;
/* common buffer should be large enough to contain JPEG header, Exif, some alignment bytes and
* remainder from previous frame */
total_sz = MAX_EXIF_SIZE + JPEG_HEADER_MAXSIZE + ALIGNMENT_SIZE + 2 * PHY_BLOCK_SIZE;
if (total_sz > PAGE_SIZE) {
mult = total_sz / PAGE_SIZE + 1;
total_sz = mult * PAGE_SIZE;
} else {
total_sz = PAGE_SIZE;
}
buffs->common_buff.iov_base = kmalloc(total_sz, GFP_KERNEL);
if (!buffs->common_buff.iov_base)
goto err_common;
buffs->common_buff.iov_len = total_sz;
/* this is the only buffer which needs DMA mapping as all other data will be collected in it */
buffs->common_buff.iov_dma = dma_map_single(dev, buffs->common_buff.iov_base, buffs->common_buff.iov_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, buffs->common_buff.iov_dma))
goto err_common_dma;
buffs->rem_buff.iov_base = kmalloc(2 * PHY_BLOCK_SIZE, GFP_KERNEL);
if (!buffs->rem_buff.iov_base)
goto err_remainder;
buffs->rem_buff.iov_len = 2 * PHY_BLOCK_SIZE;
return 0;
err_remainder:
dma_unmap_single(dev, buffs->common_buff.iov_dma, buffs->common_buff.iov_len, DMA_TO_DEVICE);
err_common_dma:
kfree(buffs->common_buff.iov_base);
err_common:
kfree(buffs->trailer_buff.iov_base);
err_trailer:
kfree(buffs->jpheader_buff.iov_base);
err_header:
kfree(buffs->exif_buff.iov_base);
return -ENOMEM;
}
/** Free allocated frame buffers */
static void deinit_buffers(struct device *dev, struct frame_buffers *buffs)
{
kfree(buffs->jpheader_buff.iov_base);
kfree(buffs->exif_buff.iov_base);
kfree(buffs->trailer_buff.iov_base);
dma_unmap_single(dev, buffs->common_buff.iov_dma, buffs->common_buff.iov_len, DMA_TO_DEVICE);
kfree(buffs->common_buff.iov_base);
kfree(buffs->rem_buff.iov_base);
}
/** Discard buffer pointers which makes the command slot marked as empty */
static inline void reset_chunks(struct fvec *vects, int all)
{
int i;
for (i = 0; i < MAX_DATA_CHUNKS; i++) {
if (i != CHUNK_REM)
vects[i].iov_len = 0;
}
if (all) {
vects[CHUNK_REM].iov_len = 0;
}
}
/** Get driver private structure from pointer to device structure */
static inline struct elphel_ahci_priv *dev_get_dpriv(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
return dpriv;
}
/** Process command and return the number of S/G entries mapped */
static int process_cmd(struct elphel_ahci_priv *dpriv)
{
struct fvec *cbuff;