Commit ba32b44b authored by Mikhail Karpenko's avatar Mikhail Karpenko

Merge AHCI driver from 'origin/master-initial'

parents 13453273 c9b13370
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
* more details. * more details.
*/ */
/* this one is required for printk_ratelimited */
#define CONFIG_PRINK
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -24,7 +27,13 @@ ...@@ -24,7 +27,13 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <elphel/exifa.h>
#include <elphel/elphel393-mem.h>
#include "ahci.h" #include "ahci.h"
#include "ahci_elphel.h"
#include "../elphel/exif393.h"
#include "../elphel/jpeghead.h"
#define DRV_NAME "elphel-ahci" #define DRV_NAME "elphel-ahci"
/* /*
...@@ -45,12 +54,29 @@ static const struct of_device_id ahci_elphel_of_match[]; ...@@ -45,12 +54,29 @@ static const struct of_device_id ahci_elphel_of_match[];
static const struct attribute_group dev_attr_root_group; static const struct attribute_group dev_attr_root_group;
static bool load_driver = false; static bool load_driver = false;
static unsigned char app15[ALIGNMENT_SIZE] = {0xff, 0xef};
struct elphel_ahci_priv { static void elphel_cmd_issue(struct ata_port *ap, uint64_t start, uint16_t count, struct fvec *sgl, unsigned int elem, uint8_t cmd);
u32 clb_offs; static int init_buffers(struct device *dev, struct frame_buffers *buffs);
u32 fb_offs; static void init_vectors(struct frame_buffers *buffs, struct fvec *chunks);
u32 base_addr; static void deinit_buffers(struct device *dev, struct frame_buffers *buffs);
}; static inline struct elphel_ahci_priv *dev_get_dpriv(struct device *dev);
static void finish_cmd(struct elphel_ahci_priv *dpriv);
static void finish_rec(struct elphel_ahci_priv *dpriv);
static int process_cmd(struct elphel_ahci_priv *dpriv);
static inline size_t get_size_from(const struct fvec *vects, int index, size_t offset, int all);
static inline void vectmov(struct fvec *vec, size_t len);
static inline void vectsplit(struct fvec *vect, struct fvec *parts, size_t *n_elem);
static int move_tail(struct elphel_ahci_priv *dpriv);
static int move_head(struct elphel_ahci_priv *dpriv);
static size_t get_prev_slot(const struct elphel_ahci_priv *dpriv);
static int is_cmdq_empty(const struct elphel_ahci_priv *dpriv);
void process_queue(unsigned long data);
static void set_flag(struct elphel_ahci_priv *drpiv, uint32_t flag);
static void reset_flag(struct elphel_ahci_priv *dpriv, uint32_t flag);
/* debug functions */
static int check_chunks(struct fvec *vects);
static void dump_sg_list(const struct device *dev, const struct fvec *sgl, size_t elems);
static ssize_t set_load_flag(struct device *dev, struct device_attribute *attr, static ssize_t set_load_flag(struct device *dev, struct device_attribute *attr,
const char *buff, size_t buff_sz) const char *buff, size_t buff_sz)
...@@ -94,6 +120,70 @@ static void elphel_defer_load(struct device *dev) ...@@ -94,6 +120,70 @@ static void elphel_defer_load(struct device *dev)
iounmap(ctrl_ptr); iounmap(ctrl_ptr);
} }
static irqreturn_t elphel_irq_handler(int irq, void * dev_instance)
{
unsigned long irq_flags;
irqreturn_t handled;
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv = host->private_data;
struct ata_port *port = host->ports[DEFAULT_PORT_NUM];
void __iomem *port_mmio = ahci_port_base(port);
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
uint32_t irq_stat, host_irq_stat;
if (dpriv->flags & IRQ_SIMPLE) {
/* handle interrupt from internal command */
host_irq_stat = readl(hpriv->mmio + HOST_IRQ_STAT);
if (!host_irq_stat)
return IRQ_NONE;
dpriv->flags &= ~IRQ_SIMPLE;
irq_stat = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(host->dev, "irq_stat = 0x%x, host irq_stat = 0x%x\n", irq_stat, host_irq_stat);
writel(irq_stat, port_mmio + PORT_IRQ_STAT);
writel(host_irq_stat, hpriv->mmio + HOST_IRQ_STAT);
handled = IRQ_HANDLED;
tasklet_schedule(&dpriv->bh);
} else {
/* pass handling to AHCI level and then decide if the resource should be freed */
handled = ahci_single_irq_intr(irq, dev_instance);
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
if (is_cmdq_empty(dpriv)) {
dpriv->flags &= ~DISK_BUSY;
} else {
tasklet_schedule(&dpriv->bh);
}
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
return handled;
}
/** Command queue processing tasklet */
void process_queue(unsigned long data)
{
unsigned long irq_flags;
struct elphel_ahci_priv *dpriv = (struct elphel_ahci_priv *)data;
if (process_cmd(dpriv) == 0) {
finish_cmd(dpriv);
if (move_head(dpriv) != -1) {
process_cmd(dpriv);
} else {
if (dpriv->flags & DELAYED_FINISH) {
dpriv->flags &= ~DELAYED_FINISH;
finish_rec(dpriv);
} else {
/* all commands have been processed */
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
dpriv->flags &= ~DISK_BUSY;
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
}
}
}
// What about port_stop and freeing/unmapping ? // What about port_stop and freeing/unmapping ?
// Or at least check if it is re-started and memory is already allocated/mapped // Or at least check if it is re-started and memory is already allocated/mapped
static int elphel_port_start(struct ata_port *ap) static int elphel_port_start(struct ata_port *ap)
...@@ -177,12 +267,12 @@ static int elphel_parse_prop(const struct device_node *devn, ...@@ -177,12 +267,12 @@ static int elphel_parse_prop(const struct device_node *devn,
static int elphel_drv_probe(struct platform_device *pdev) static int elphel_drv_probe(struct platform_device *pdev)
{ {
int ret; int ret, i, irq_num;
struct ahci_host_priv *hpriv; struct ahci_host_priv *hpriv;
struct elphel_ahci_priv *dpriv; struct elphel_ahci_priv *dpriv;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
const struct of_device_id *match; const struct of_device_id *match;
unsigned int reg_val; struct ata_host *host;
if (&dev->kobj) { if (&dev->kobj) {
ret = sysfs_create_group(&dev->kobj, &dev_attr_root_group); ret = sysfs_create_group(&dev->kobj, &dev_attr_root_group);
...@@ -197,6 +287,17 @@ static int elphel_drv_probe(struct platform_device *pdev) ...@@ -197,6 +287,17 @@ static int elphel_drv_probe(struct platform_device *pdev)
if (!dpriv) if (!dpriv)
return -ENOMEM; return -ENOMEM;
dpriv->dev = dev;
spin_lock_init(&dpriv->flags_lock);
tasklet_init(&dpriv->bh, process_queue, (unsigned long)dpriv);
for (i = 0; i < MAX_CMD_SLOTS; i++) {
ret = init_buffers(dev, &dpriv->fbuffs[i]);
if (ret != 0)
return ret;
init_vectors(&dpriv->fbuffs[i], dpriv->data_chunks[i]);
}
match = of_match_device(ahci_elphel_of_match, &pdev->dev); match = of_match_device(ahci_elphel_of_match, &pdev->dev);
if (!match) if (!match)
return -EINVAL; return -EINVAL;
...@@ -219,12 +320,28 @@ static int elphel_drv_probe(struct platform_device *pdev) ...@@ -219,12 +320,28 @@ static int elphel_drv_probe(struct platform_device *pdev)
return ret; return ret;
} }
/* reassign automatically assigned interrupt handler */
irq_num = platform_get_irq(pdev, 0);
host = platform_get_drvdata(pdev);
devm_free_irq(dev, irq_num, host);
ret = devm_request_irq(dev, irq_num, elphel_irq_handler, IRQF_SHARED, dev_name(dev), host);
if (ret) {
dev_err(dev, "failed to reassign default IRQ handler to Elphel handler\n");
return ret;
}
return 0; return 0;
} }
static int elphel_drv_remove(struct platform_device *pdev) static int elphel_drv_remove(struct platform_device *pdev)
{ {
int i;
struct elphel_ahci_priv *dpriv = dev_get_dpriv(&pdev->dev);
dev_info(&pdev->dev, "removing Elphel AHCI driver"); dev_info(&pdev->dev, "removing Elphel AHCI driver");
tasklet_kill(&dpriv->bh);
for (i = 0; i < MAX_CMD_SLOTS; i++)
deinit_buffers(&pdev->dev, &dpriv->fbuffs[i]);
sysfs_remove_group(&pdev->dev.kobj, &dev_attr_root_group); sysfs_remove_group(&pdev->dev.kobj, &dev_attr_root_group);
ata_platform_remove_one(pdev); ata_platform_remove_one(pdev);
...@@ -291,9 +408,1003 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc) ...@@ -291,9 +408,1003 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc)
AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE); AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE);
} }
/** Set flag @e flag in driver private structure. This function uses spin lock to access the flags variable. */
static void set_flag(struct elphel_ahci_priv *dpriv, uint32_t flag)
{
unsigned long irq_flags;
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
dpriv->flags |= flag;
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
/** Reset flag @e flag in driver private structure. This function uses spin lock to access the flags variable. */
static void reset_flag(struct elphel_ahci_priv *dpriv, uint32_t flag)
{
unsigned long irq_flags;
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
dpriv->flags &= ~flag;
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
/** Map buffer vectors to S/G list and return the number of vectors mapped */
static int map_vectors(struct elphel_ahci_priv *dpriv)
{
int i;
int index = 0;
int finish = 0;
size_t total_sz = 0;
size_t tail;
struct fvec *chunks;
struct fvec vect;
chunks = dpriv->data_chunks[dpriv->head_ptr];
for (i = dpriv->curr_data_chunk; i < MAX_DATA_CHUNKS; i++) {
if (i == CHUNK_REM)
/* remainder should never be processed */
continue;
if (i == dpriv->curr_data_chunk) {
total_sz = chunks[i].iov_len - dpriv->curr_data_offset;
vect.iov_base = (unsigned char *)chunks[i].iov_base + dpriv->curr_data_offset;
vect.iov_dma = chunks[i].iov_dma + dpriv->curr_data_offset;
vect.iov_len = chunks[i].iov_len - dpriv->curr_data_offset;
} else {
total_sz += chunks[i].iov_len;
vect = chunks[i];
}
if (total_sz > dpriv->max_data_sz) {
/* truncate current buffer and finish mapping */
tail = total_sz - dpriv->max_data_sz;
vect.iov_len -= tail;
dpriv->curr_data_chunk = i;
dpriv->curr_data_offset = chunks[i].iov_len - tail;
finish = 1;
} else if (unlikely(total_sz == dpriv->max_data_sz)) {
dpriv->curr_data_chunk = i;
dpriv->curr_data_offset = chunks[i].iov_len;
finish = 1;
}
if (vect.iov_len != 0) {
if (vect.iov_len < MAX_PRDT_LEN) {
dpriv->sgl[index++] = vect;
} else {
/* current vector is too long and can not be mapped to a single PRDT entry, split it */
vectsplit(&vect, dpriv->sgl, &index);
if (vect.iov_len < MAX_PRDT_LEN) {
dpriv->sgl[index++] = vect;
} else {
/* free slots in PRDT table have ended */
dpriv->curr_data_chunk = i;
dpriv->curr_data_offset = (unsigned char *)vect.iov_base - (unsigned char *)chunks[i].iov_base;
finish = 1;
}
}
if (index == (MAX_SGL_LEN - 1))
finish = 1;
}
if (finish)
break;
}
if (finish == 0) {
/* frame vectors have been fully processed, stop calling me */
dpriv->curr_data_chunk = MAX_DATA_CHUNKS;
dpriv->curr_data_offset = 0;
}
return index;
}
/** Split buffer pointed by vector @e vect into several smaller buffer. Each part will be less than #MAX_PRDT_LEN bytes */
static inline void vectsplit(struct fvec *vect, struct fvec *parts, size_t *n_elem)
{
size_t len;
struct fvec split;
while (vect->iov_len > MAX_PRDT_LEN && *n_elem < MAX_SGL_LEN) {
len = MAX_PRDT_LEN - MAX_PRDT_LEN % PHY_BLOCK_SIZE;
split.iov_base = vect->iov_base;
split.iov_dma = vect->iov_dma;
split.iov_len = len;
vectmov(vect, len);
parts[*n_elem] = split;
*n_elem = *n_elem + 1;
}
}
/** Copy @e len bytes from buffer pointed by @e src vector to buffer pointed by @e dest vector */
static inline void vectcpy(struct fvec *dest, void *src, size_t len)
{
unsigned char *d = (unsigned char *)dest->iov_base;
memcpy(d + dest->iov_len, src, len);
dest->iov_len += len;
}
/** Move vector forward by @e len bytes decreasing its length */
static inline void vectmov(struct fvec *vec, size_t len)
{
if (vec->iov_len >= len) {
vec->iov_base = (unsigned char *)vec->iov_base + len;
vec->iov_dma += len;
vec->iov_len -= len;
}
}
/** Shrink vector length by @len bytes */
static inline void vectshrink(struct fvec *vec, size_t len)
{
if (vec->iov_len >= len) {
vec->iov_len -= len;
}
}
/** Return the number of bytes needed to align @e data_len to @e align_len boundary */
static inline size_t align_bytes_num(size_t data_len, size_t align_len)
{
size_t rem = data_len % align_len;
if (rem == 0)
return 0;
else
return align_len - rem;
}
/** This helper function is used to position a pointer @e offset bytes from the end
* of a buffer. DMA handle is not updated intentionally as it is not needed during copying */
static inline unsigned char *vectrpos(struct fvec *vec, size_t offset)
{
return (unsigned char *)vec->iov_base + (vec->iov_len - offset);
}
/** Align current frame to disk sector boundary and each individual buffer to #ALIGNMENT_SIZE boundary */
static void align_frame(struct elphel_ahci_priv *dpriv)
{
unsigned char *src;
size_t len, total_sz, data_len;
size_t cmd_slot = dpriv->tail_ptr;
size_t prev_slot = get_prev_slot(dpriv);
size_t max_len = dpriv->fbuffs[cmd_slot].common_buff.iov_len;
struct device *dev = dpriv->dev;
struct frame_buffers *fbuffs = &dpriv->fbuffs[cmd_slot];
struct fvec *chunks = dpriv->data_chunks[cmd_slot];
struct fvec *cbuff = &chunks[CHUNK_COMMON];
struct fvec *rbuff = &dpriv->data_chunks[prev_slot][CHUNK_REM];
total_sz = get_size_from(chunks, 0, 0, INCLUDE_REM) + rbuff->iov_len;
if (total_sz < PHY_BLOCK_SIZE) {
/* the frame length is less than sector size, delay this frame */
if (prev_slot != cmd_slot) {
/* some data may be left from previous frame */
vectcpy(&chunks[CHUNK_REM], rbuff->iov_base, rbuff->iov_len);
vectshrink(rbuff, rbuff->iov_len);
}
dev_dbg(dev, "frame size is less than sector size: %u bytes; delay recording\n", total_sz);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_LEADER].iov_base, chunks[CHUNK_LEADER].iov_len);
vectshrink(&chunks[CHUNK_LEADER], chunks[CHUNK_LEADER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_EXIF].iov_base, chunks[CHUNK_EXIF].iov_len);
vectshrink(&chunks[CHUNK_EXIF], chunks[CHUNK_EXIF].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_HEADER].iov_base, chunks[CHUNK_HEADER].iov_len);
vectshrink(&chunks[CHUNK_HEADER], chunks[CHUNK_HEADER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
return;
}
dma_sync_single_for_cpu(dev, fbuffs->common_buff.iov_dma, fbuffs->common_buff.iov_len, DMA_TO_DEVICE);
/* copy remainder of previous frame to the beginning of common buffer */
if (likely(rbuff->iov_len != 0)) {
len = rbuff->iov_len;
dev_dbg(dev, "copy %u bytes from REM #%u to common buffer\n", len, prev_slot);
vectcpy(cbuff, rbuff->iov_base, len);
vectshrink(rbuff, rbuff->iov_len);
}
/* copy JPEG marker */
len = chunks[CHUNK_LEADER].iov_len;
vectcpy(cbuff, chunks[CHUNK_LEADER].iov_base, len);
vectshrink(&chunks[CHUNK_LEADER], chunks[CHUNK_LEADER].iov_len);
/* copy Exif if present */
if (chunks[CHUNK_EXIF].iov_len != 0) {
len = chunks[CHUNK_EXIF].iov_len;
dev_dbg(dev, "copy %u bytes from EXIF to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_EXIF].iov_base, len);
vectshrink(&chunks[CHUNK_EXIF], chunks[CHUNK_EXIF].iov_len);
}
/* align common buffer to ALIGNMENT boundary, APP15 marker should be placed before header data */
data_len = cbuff->iov_len + chunks[CHUNK_HEADER].iov_len;
len = align_bytes_num(data_len, ALIGNMENT_SIZE);
if (len < JPEG_MARKER_LEN + JPEG_SIZE_LEN && len != 0) {
/* the number of bytes needed for alignment is less than the length of the marker itself, increase the number of stuffing bytes */
len += ALIGNMENT_SIZE;
}
dev_dbg(dev, "total number of stuffing bytes in APP15 marker: %u\n", len);
app15[3] = len - JPEG_MARKER_LEN;
vectcpy(cbuff, app15, len);
/* copy JPEG header */
len = chunks[CHUNK_HEADER].iov_len;
dev_dbg(dev, "copy %u bytes from HEADER to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_HEADER].iov_base, len);
vectshrink(&chunks[CHUNK_HEADER], chunks[CHUNK_HEADER].iov_len);
/* check if there is enough data to continue - JPEG data length can be too short */
len = get_size_from(chunks, CHUNK_DATA_0, 0, EXCLUDE_REM);
if (len < PHY_BLOCK_SIZE) {
size_t num = align_bytes_num(cbuff->iov_len, PHY_BLOCK_SIZE);
dev_dbg(dev, "jpeg data is too short, delay this frame\n");
if (len >= num) {
/* there is enough data to align common buffer to sector boundary */
if (num >= chunks[CHUNK_DATA_0].iov_len) {
vectcpy(cbuff, chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
num -= chunks[CHUNK_DATA_0].iov_len;
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
} else {
src = vectrpos(&chunks[CHUNK_DATA_0], num);
vectcpy(cbuff, chunks[CHUNK_DATA_0].iov_base, num);
vectshrink(&chunks[CHUNK_DATA_0], num);
num = 0;
}
if (num >= chunks[CHUNK_DATA_1].iov_len) {
vectcpy(cbuff, chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
num -= chunks[CHUNK_DATA_1].iov_len;
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
} else {
src = vectrpos(&chunks[CHUNK_DATA_1], num);
vectcpy(cbuff, chunks[CHUNK_DATA_1].iov_base, num);
vectshrink(&chunks[CHUNK_DATA_1], num);
num = 0;
}
if (num >= chunks[CHUNK_TRAILER].iov_len) {
vectcpy(cbuff, chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
num -= chunks[CHUNK_TRAILER].iov_len;
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
src = vectrpos(&chunks[CHUNK_TRAILER], num);
vectcpy(cbuff, chunks[CHUNK_TRAILER].iov_base, num);
vectshrink(&chunks[CHUNK_TRAILER], num);
num = 0;
}
} else {
/* there is not enough data to align common buffer to sector boundary, truncate common buffer */
data_len = cbuff->iov_len % PHY_BLOCK_SIZE;
src = vectrpos(cbuff, data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(cbuff, data_len);
}
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
return;
}
/* align frame to sector size boundary; total size could have changed by the moment - recalculate */
total_sz = get_size_from(chunks, 0, 0, INCLUDE_REM);
len = total_sz % PHY_BLOCK_SIZE;
dev_dbg(dev, "number of bytes crossing sector boundary: %u\n", len);
if (len != 0) {
if (len >= (chunks[CHUNK_DATA_1].iov_len + chunks[CHUNK_TRAILER].iov_len)) {
/* current frame is not split or the second part of JPEG data is too short */
data_len = len - chunks[CHUNK_DATA_1].iov_len - chunks[CHUNK_TRAILER].iov_len;
src = vectrpos(&chunks[CHUNK_DATA_0], data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(&chunks[CHUNK_DATA_0], data_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else if (len >= chunks[CHUNK_TRAILER].iov_len) {
/* there is enough data in second part to align the frame */
data_len = len - chunks[CHUNK_TRAILER].iov_len;
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
/* the trailing marker is split by sector boundary, copy (PHY_BLOCK_SIZE - 1) bytes from
* JPEG data block(s) to remainder buffer and then add trailing marker */
data_len = PHY_BLOCK_SIZE - (chunks[CHUNK_TRAILER].iov_len - len);
if (data_len >= chunks[CHUNK_DATA_1].iov_len) {
size_t cut_len = data_len - chunks[CHUNK_DATA_1].iov_len;
src = vectrpos(&chunks[CHUNK_DATA_0], cut_len);
vectcpy(&chunks[CHUNK_REM], src, cut_len);
vectshrink(&chunks[CHUNK_DATA_0], cut_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
}
} else {
/* the frame is aligned to sector boundary but some buffers may be not */
chunks[CHUNK_ALIGN].iov_base = vectrpos(cbuff, 0);
chunks[CHUNK_ALIGN].iov_dma = cbuff->iov_dma + cbuff->iov_len;
chunks[CHUNK_ALIGN].iov_len = 0;
if (chunks[CHUNK_DATA_1].iov_len == 0) {
data_len = chunks[CHUNK_DATA_0].iov_len % ALIGNMENT_SIZE;
src = vectrpos(&chunks[CHUNK_DATA_0], data_len);
vectcpy(&chunks[CHUNK_ALIGN], src, data_len);
vectshrink(&chunks[CHUNK_DATA_0], data_len);
} else {
data_len = chunks[CHUNK_DATA_1].iov_len % ALIGNMENT_SIZE;
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_ALIGN], src, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
}
vectcpy(&chunks[CHUNK_ALIGN], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
/* debug sanity check, should not happen */
if (cbuff->iov_len >= max_len) {
dev_err(NULL, "ERROR: the number of bytes copied to common buffer exceeds its size\n");
}
}
/** Calculate the number of blocks this frame will occupy. The frame must be aligned to block size */
static inline size_t get_blocks_num(struct fvec *sgl, size_t n_elem)
{
int num;
size_t total = 0;
for (num = 0; num < n_elem; num++) {
total += sgl[num].iov_len;
}
return total / PHY_BLOCK_SIZE;
}
/** Calculate the size of current frame in bytes starting from vector and offset given */
static inline size_t get_size_from(const struct fvec *vects, int index, size_t offset, int all)
{
int i;
size_t total = 0;
if (index >= MAX_DATA_CHUNKS || offset > vects[index].iov_len) {
return 0;
}
for (i = index; i < MAX_DATA_CHUNKS; i++) {
if (i == CHUNK_REM && all == EXCLUDE_REM)
/* remainder should not be processed */
continue;
if (i == index)
total += vects[i].iov_len - offset;
else
total += vects[i].iov_len;
}
return total;
}
/** Set vectors pointing to data buffers except for JPEG data - those are set in circbuf driver */
static void init_vectors(struct frame_buffers *buffs, struct fvec *chunks)
{
chunks[CHUNK_EXIF].iov_base = buffs->exif_buff.iov_base;
chunks[CHUNK_EXIF].iov_len = 0;
chunks[CHUNK_LEADER].iov_base = buffs->jpheader_buff.iov_base;
chunks[CHUNK_LEADER].iov_len = 0;
chunks[CHUNK_HEADER].iov_base = (unsigned char *)chunks[CHUNK_LEADER].iov_base + JPEG_MARKER_LEN;
chunks[CHUNK_HEADER].iov_len = 0;
chunks[CHUNK_TRAILER].iov_base = buffs->trailer_buff.iov_base;
chunks[CHUNK_TRAILER].iov_len = 0;
chunks[CHUNK_REM].iov_base = buffs->rem_buff.iov_base;
chunks[CHUNK_REM].iov_len = 0;
/* this is the only DMA mapped buffer and its DMA address should be set */
chunks[CHUNK_COMMON].iov_base = buffs->common_buff.iov_base;
chunks[CHUNK_COMMON].iov_dma = buffs->common_buff.iov_dma;
chunks[CHUNK_COMMON].iov_len = 0;
}
/** Allocate memory for frame buffers */
static int init_buffers(struct device *dev, struct frame_buffers *buffs)
{
int mult;
int total_sz;
unsigned char *ptr;
buffs->exif_buff.iov_base = kmalloc(MAX_EXIF_SIZE, GFP_KERNEL);
if (!buffs->exif_buff.iov_base)
return -ENOMEM;
buffs->exif_buff.iov_len = MAX_EXIF_SIZE;
buffs->jpheader_buff.iov_base = kmalloc(JPEG_HEADER_MAXSIZE, GFP_KERNEL);
if (!buffs->jpheader_buff.iov_base)
goto err_header;
buffs->jpheader_buff.iov_len = JPEG_HEADER_MAXSIZE;
buffs->trailer_buff.iov_base = kmalloc(JPEG_MARKER_LEN, GFP_KERNEL);
if (!buffs->trailer_buff.iov_base)
goto err_trailer;
buffs->trailer_buff.iov_len = JPEG_MARKER_LEN;
ptr = buffs->trailer_buff.iov_base;
ptr[0] = 0xff;
ptr[1] = 0xd9;
/* common buffer should be large enough to contain JPEG header, Exif, some alignment bytes and
* remainder from previous frame */
total_sz = MAX_EXIF_SIZE + JPEG_HEADER_MAXSIZE + ALIGNMENT_SIZE + 2 * PHY_BLOCK_SIZE;
if (total_sz > PAGE_SIZE) {
mult = total_sz / PAGE_SIZE + 1;
total_sz = mult * PAGE_SIZE;
} else {
total_sz = PAGE_SIZE;
}
buffs->common_buff.iov_base = kmalloc(total_sz, GFP_KERNEL);
if (!buffs->common_buff.iov_base)
goto err_common;
buffs->common_buff.iov_len = total_sz;
/* this is the only buffer which needs DMA mapping as all other data will be collected in it */
buffs->common_buff.iov_dma = dma_map_single(dev, buffs->common_buff.iov_base, buffs->common_buff.iov_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, buffs->common_buff.iov_dma))
goto err_common_dma;
buffs->rem_buff.iov_base = kmalloc(2 * PHY_BLOCK_SIZE, GFP_KERNEL);
if (!buffs->rem_buff.iov_base)
goto err_remainder;
buffs->rem_buff.iov_len = 2 * PHY_BLOCK_SIZE;
return 0;
err_remainder:
dma_unmap_single(dev, buffs->common_buff.iov_dma, buffs->common_buff.iov_len, DMA_TO_DEVICE);
err_common_dma:
kfree(buffs->common_buff.iov_base);
err_common:
kfree(buffs->trailer_buff.iov_base);
err_trailer:
kfree(buffs->jpheader_buff.iov_base);
err_header:
kfree(buffs->exif_buff.iov_base);
return -ENOMEM;
}
/** Free allocated frame buffers */
static void deinit_buffers(struct device *dev, struct frame_buffers *buffs)
{
kfree(buffs->jpheader_buff.iov_base);
kfree(buffs->exif_buff.iov_base);
kfree(buffs->trailer_buff.iov_base);
dma_unmap_single(dev, buffs->common_buff.iov_dma, buffs->common_buff.iov_len, DMA_TO_DEVICE);
kfree(buffs->common_buff.iov_base);
kfree(buffs->rem_buff.iov_base);
}
/** Discard buffer pointers which makes the command slot marked as empty */
static inline void reset_chunks(struct fvec *vects, int all)
{
int i;
for (i = 0; i < MAX_DATA_CHUNKS; i++) {
if (i != CHUNK_REM)
vects[i].iov_len = 0;
}
if (all) {
vects[CHUNK_REM].iov_len = 0;
}
}
/** Get driver private structure from pointer to device structure */
static inline struct elphel_ahci_priv *dev_get_dpriv(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
return dpriv;
}
/** Process command and return the number of S/G entries mapped */
static int process_cmd(struct elphel_ahci_priv *dpriv)
{
struct fvec *cbuff;
struct ata_host *host = dev_get_drvdata(dpriv->dev);
struct ata_port *port = host->ports[DEFAULT_PORT_NUM];
size_t max_sz = (MAX_LBA_COUNT + 1) * PHY_BLOCK_SIZE;
size_t rem_sz = get_size_from(dpriv->data_chunks[dpriv->head_ptr], dpriv->curr_data_chunk, dpriv->curr_data_offset, EXCLUDE_REM);
if (dpriv->flags & PROC_CMD)
dpriv->lba_ptr.lba_write += dpriv->lba_ptr.wr_count;
dpriv->flags |= PROC_CMD;
/* define ATA command to use for current transaction */
if ((dpriv->lba_ptr.lba_write & ~ADDR_MASK_28_BIT) || rem_sz > max_sz) {
dpriv->curr_cmd = ATA_CMD_WRITE_EXT;
dpriv->max_data_sz = (MAX_LBA_COUNT_EXT + 1) * PHY_BLOCK_SIZE;
} else {
dpriv->curr_cmd = ATA_CMD_WRITE;
dpriv->max_data_sz = (MAX_LBA_COUNT + 1) * PHY_BLOCK_SIZE;
}
dpriv->sg_elems = map_vectors(dpriv);
if (dpriv->sg_elems != 0) {
dump_sg_list(dpriv->dev, dpriv->sgl, dpriv->sg_elems);
dpriv->lba_ptr.wr_count = get_blocks_num(dpriv->sgl, dpriv->sg_elems);
if (dpriv->lba_ptr.lba_write + dpriv->lba_ptr.wr_count > dpriv->lba_ptr.lba_end) {
/* the frame rolls over the buffer boundary, don't split it and start writing from the beginning */
dpriv->lba_ptr.lba_write = dpriv->lba_ptr.lba_start;
}
cbuff = &dpriv->fbuffs[dpriv->head_ptr].common_buff;
dma_sync_single_for_device(dpriv->dev, cbuff->iov_dma, cbuff->iov_len, DMA_TO_DEVICE);
elphel_cmd_issue(port, dpriv->lba_ptr.lba_write, dpriv->lba_ptr.wr_count, dpriv->sgl, dpriv->sg_elems, dpriv->curr_cmd);
}
return dpriv->sg_elems;
}
/** Finish currently running command */
static void finish_cmd(struct elphel_ahci_priv *dpriv)
{
int all;
dpriv->lba_ptr.wr_count = 0;
if ((dpriv->flags & LAST_BLOCK) == 0) {
all = 0;
} else {
all = 1;
dpriv->flags &= ~LAST_BLOCK;
}
reset_chunks(dpriv->data_chunks[dpriv->head_ptr], all);
dpriv->curr_cmd = 0;
dpriv->max_data_sz = 0;
dpriv->curr_data_chunk = 0;
dpriv->curr_data_offset = 0;
dpriv->flags &= ~PROC_CMD;
}
/** Fill free space in REM buffer with 0 and save the remaining data chunk */
static void finish_rec(struct elphel_ahci_priv *dpriv)
{
size_t stuff_len;
unsigned char *src;
struct fvec *cvect = &dpriv->data_chunks[dpriv->head_ptr][CHUNK_COMMON];
struct fvec *rvect = &dpriv->data_chunks[dpriv->head_ptr][CHUNK_REM];
if (rvect->iov_len == 0)
return;
dev_dbg(dpriv->dev, "write last chunk of data from slot %u, size: %u\n", dpriv->head_ptr, rvect->iov_len);
stuff_len = PHY_BLOCK_SIZE - rvect->iov_len;
src = vectrpos(rvect, 0);
memset(src, 0, stuff_len);
rvect->iov_len += stuff_len;
dma_sync_single_for_cpu(dpriv->dev, dpriv->fbuffs[dpriv->head_ptr].common_buff.iov_dma, dpriv->fbuffs[dpriv->head_ptr].common_buff.iov_len, DMA_TO_DEVICE);
vectcpy(cvect, rvect->iov_base, rvect->iov_len);
vectshrink(rvect, rvect->iov_len);
dpriv->flags |= LAST_BLOCK;
process_cmd(dpriv);
}
/** Move a pointer to free command slot one step forward. This function holds spin lock #elphel_ahci_priv::flags_lock */
static int move_tail(struct elphel_ahci_priv *dpriv)
{
size_t slot = (dpriv->tail_ptr + 1) % MAX_CMD_SLOTS;
if (slot != dpriv->head_ptr) {
set_flag(dpriv, LOCK_TAIL);
dpriv->tail_ptr = slot;
dev_dbg(dpriv->dev, "move tail pointer to slot: %u\n", slot);
return 0;
} else {
/* no more free command slots */
return -1;
}
}
/** Move a pointer to next ready command. This function holds spin lock #elphel_ahci_priv::flags_lock*/
static int move_head(struct elphel_ahci_priv *dpriv)
{
size_t use_tail;
unsigned long irq_flags;
size_t slot = (dpriv->head_ptr + 1) % MAX_CMD_SLOTS;
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
if (dpriv->flags & LOCK_TAIL) {
/* current command slot is not ready yet, use previous */
use_tail = get_prev_slot(dpriv);
} else {
use_tail = dpriv->tail_ptr;
}
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
if (dpriv->head_ptr != use_tail) {
dpriv->head_ptr = slot;
dev_dbg(dpriv->dev, "move head pointer to slot: %u\n", slot);
return 0;
} else {
/* no more commands in queue */
return -1;
}
}
/** Check if command queue is empty */
static int is_cmdq_empty(const struct elphel_ahci_priv *dpriv)
{
size_t use_tail;
if (dpriv->flags & LOCK_TAIL) {
/* current command slot is not ready yet, use previous */
use_tail = get_prev_slot(dpriv);
} else {
use_tail = dpriv->tail_ptr;
}
if (dpriv->head_ptr != use_tail)
return 0;
else
return 1;
}
/** Get command slot before the last one filled in */
static size_t get_prev_slot(const struct elphel_ahci_priv *dpriv)
{
size_t slot;
if (dpriv->tail_ptr == dpriv->head_ptr)
return dpriv->tail_ptr;
if (dpriv->tail_ptr != 0) {
slot = dpriv->tail_ptr - 1;
} else {
slot = MAX_CMD_SLOTS - 1;
}
return slot;
}
/** Get and enqueue new command */
static ssize_t rawdev_write(struct device *dev, ///< device structure associated with the driver
struct device_attribute *attr, ///< interface for device attributes
const char *buff, ///< buffer containing new command
size_t buff_sz) ///< the size of the command buffer
{
ssize_t rcvd = 0;
bool proceed = false;
unsigned long irq_flags;
struct elphel_ahci_priv *dpriv = dev_get_dpriv(dev);
struct frame_data fdata;
struct frame_buffers *buffs;
struct fvec *chunks;
/* simple check if we've got the right command */
if (buff_sz != sizeof(struct frame_data)) {
dev_err(dev, "the size of the data buffer is incorrect, should be equal to sizeof(struct frame_data)\n");
return -EINVAL;
}
memcpy(&fdata, buff, sizeof(struct frame_data));
/* lock disk resource as soon as possible */
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
if ((dpriv->flags & DISK_BUSY) == 0) {
dpriv->flags |= DISK_BUSY;
proceed = true;
}
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
if (fdata.cmd & DRV_CMD_FINISH) {
if ((dpriv->flags & PROC_CMD) == 0 && proceed) {
finish_rec(dpriv);
} else {
dpriv->flags |= DELAYED_FINISH;
}
return buff_sz;
}
if (move_tail(dpriv) == -1) {
/* we are not ready yet because command queue is full */
printk_ratelimited(KERN_DEBUG "command queue is full, flags = %u, proceed = %d\n", dpriv->flags, proceed);
return -EAGAIN;
}
chunks = dpriv->data_chunks[dpriv->tail_ptr];
buffs = &dpriv->fbuffs[dpriv->tail_ptr];
dev_dbg(dev, "process frame from sensor port: %u, command = %d, flags = %u\n", fdata.sensor_port, fdata.cmd, dpriv->flags);
if (fdata.cmd & DRV_CMD_EXIF) {
rcvd = exif_get_data(fdata.sensor_port, fdata.meta_index, buffs->exif_buff.iov_base, buffs->exif_buff.iov_len);
chunks[CHUNK_EXIF].iov_len = rcvd;
}
rcvd = jpeghead_get_data(fdata.sensor_port, buffs->jpheader_buff.iov_base, buffs->jpheader_buff.iov_len, 0);
if (rcvd < 0) {
/* free resource lock and current command slot */
if (proceed) {
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
dpriv->flags &= ~DISK_BUSY;
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
reset_chunks(chunks, 0);
dpriv->tail_ptr = get_prev_slot(dpriv);
dpriv->flags &= ~LOCK_TAIL;
dev_err(dev, "could not get JPEG header, error %d\n", rcvd);
return -EINVAL;
}
chunks[CHUNK_LEADER].iov_len = JPEG_MARKER_LEN;
chunks[CHUNK_TRAILER].iov_len = JPEG_MARKER_LEN;
chunks[CHUNK_HEADER].iov_len = rcvd - chunks[CHUNK_LEADER].iov_len;
rcvd = circbuf_get_ptr(fdata.sensor_port, fdata.cirbuf_ptr, fdata.jpeg_len, &chunks[CHUNK_DATA_0], &chunks[CHUNK_DATA_1]);
if (rcvd < 0) {
/* free resource lock and current command slot */
if (proceed) {
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
dpriv->flags &= ~DISK_BUSY;
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
reset_chunks(chunks, 0);
dpriv->tail_ptr = get_prev_slot(dpriv);
dpriv->flags &= ~LOCK_TAIL;
dev_err(dev, "could not get JPEG data, error %d\n", rcvd);
return -EINVAL;
}
align_frame(dpriv);
/* new command slot is ready now and can be unlocked */
reset_flag(dpriv, LOCK_TAIL);
if (!proceed) {
/* disk may be free by the moment, try to grab it */
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
if ((dpriv->flags & DISK_BUSY) == 0) {
dpriv->flags |= DISK_BUSY;
proceed = true;
}
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
}
if ((dpriv->flags & PROC_CMD) == 0 && proceed) {
if (get_size_from(dpriv->data_chunks[dpriv->head_ptr], 0, 0, EXCLUDE_REM) == 0)
move_head(dpriv);
process_cmd(dpriv);
}
return buff_sz;
}
/** Prepare software constructed command FIS in command table area. The structure of the
* command FIS is described in Transport Layer chapter of Serial ATA revision 3.1 documentation.
*/
static inline void prep_cfis(uint8_t *cmd_tbl, ///< pointer to the beginning of command table
uint8_t cmd, ///< ATA command as described in ATA/ATAPI command set
uint64_t start_addr, ///< LBA start address
uint16_t count) ///< sector count, the number of 512 byte sectors to read or write
///< @return None
{
uint8_t device, ctrl;
/* select the content of Device and Control registers based on command, read the description of
* a command in ATA/ATAPI command set documentation
*/
switch (cmd) {
case ATA_CMD_WRITE:
case ATA_CMD_READ:
device = 0xe0 | ((start_addr >> 24) & 0x0f);
ctrl = 0x08;
/* this is 28-bit command; 4 bits of the address have already been
* placed to Device register, invalidate the remaining (if any) upper
* bits of the address and leave only 24 significant bits (just in case)
*/
start_addr &= 0xffffff;
count &= 0xff;
break;
case ATA_CMD_WRITE_EXT:
case ATA_CMD_READ_EXT:
device = 0xe0;
ctrl = 0x08;
break;
default:
device = 0xe0;
ctrl = 0x08;
}
cmd_tbl[0] = 0x27; // H2D register FIS
cmd_tbl[1] = 0x80; // set C = 1
cmd_tbl[2] = cmd; // ATA READ or WRITE DMA command as described in ATA/ATAPI command set
cmd_tbl[3] = 0; // features(7:0)
cmd_tbl[4] = start_addr & 0xff; // LBA(7:0)
cmd_tbl[5] = (start_addr >> 8) & 0xff; // LBA(15:8)
cmd_tbl[6] = (start_addr >> 16) & 0xff; // LBA(23:16)
cmd_tbl[7] = device; // device
cmd_tbl[8] = (start_addr >> 24) & 0xff; // LBA(31:24)
cmd_tbl[9] = (start_addr >> 32) & 0xff; // LBA(39:32)
cmd_tbl[10] = (start_addr >> 40) & 0xff; // LBA(47:40)
cmd_tbl[11] = 0; // features(15:8)
cmd_tbl[12] = count & 0xff; // count(7:0)
cmd_tbl[13] = (count >> 8) & 0xff; // count(15:8)
cmd_tbl[14] = 0; // ICC (isochronous command completion)
cmd_tbl[15] = ctrl; // control
}
/** Map S/G list to physical region descriptor table in AHCI controller command table */
static inline void prep_prdt(struct fvec *sgl, ///< pointer to S/G list which should be mapped to physical
///< region description table
unsigned int n_elem, ///< the number of elements in @e sgl
struct ahci_sg *ahci_sgl) ///< pointer to physical region description table
///< @return None
{
unsigned int num = 0;
for (num = 0; num < n_elem; num++) {
ahci_sgl[num].addr = cpu_to_le32(sgl[num].iov_dma & 0xffffffff);
ahci_sgl[num].addr_hi = cpu_to_le32((sgl[num].iov_dma >> 16) >> 16);
ahci_sgl[num].flags_size = cpu_to_le32(sgl[num].iov_len - 1);
}
}
/** Prepare and issue read or write command */
static void elphel_cmd_issue(struct ata_port *ap,///< device port for which the command should be issued
uint64_t start, ///< LBA start address
uint16_t count, ///< the number of sectors to read or write
struct fvec *sgl, ///< S/G list pointing to data buffers
unsigned int elem, ///< the number of elements in @e sgl
uint8_t cmd) ///< the command to be issued; should be ATA_CMD_READ, ATA_CMD_READ_EXT,
///< ATA_CMD_WRITE or ATA_CMD_WRITE_EXT, other commands are not tested
///< @return None
{
uint32_t opts;
uint8_t *cmd_tbl;
unsigned int slot_num = 0;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
struct ahci_sg *ahci_sg;
void __iomem *port_mmio = ahci_port_base(ap);
dpriv->flags |= IRQ_SIMPLE;
/* prepare command FIS */
dma_sync_single_for_cpu(ap->dev, pp->cmd_tbl_dma, AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE);
cmd_tbl = pp->cmd_tbl + slot_num * AHCI_CMD_TBL_SZ;
prep_cfis(cmd_tbl, cmd, start, count);
/* prepare physical region descriptor table */
ahci_sg = pp->cmd_tbl + slot_num * AHCI_CMD_TBL_SZ + AHCI_CMD_TBL_HDR_SZ;
prep_prdt(sgl, elem, ahci_sg);
/* prepare command header */
opts = CMD_FIS_LEN | (elem << 16) | AHCI_CMD_PREFETCH | AHCI_CMD_CLR_BUSY;
if (cmd == ATA_CMD_WRITE || cmd == ATA_CMD_WRITE_EXT)
opts |= AHCI_CMD_WRITE;
ahci_fill_cmd_slot(pp, slot_num, opts);
dev_dbg(ap->dev, "dump command table content, first %d bytes, phys addr = 0x%x:\n", 16, pp->cmd_tbl_dma);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, pp->cmd_tbl, 16);
dma_sync_single_for_device(ap->dev, pp->cmd_tbl_dma, AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE);
/* issue command */
writel(0x11, port_mmio + PORT_CMD);
writel(1 << slot_num, port_mmio + PORT_CMD_ISSUE);
}
/** Defer system command if internal command queue is not empty */
static int elphel_qc_defer(struct ata_queued_cmd *qc)
{
int ret;
unsigned long irq_flags;
struct elphel_ahci_priv *dpriv = dev_get_dpriv(qc->ap->dev);
/* First apply the usual rules */
ret = ata_std_qc_defer(qc);
if (ret != 0)
return ret;
/* And now check if internal command is in progress */
spin_lock_irqsave(&dpriv->flags_lock, irq_flags);
if ((dpriv->flags & DISK_BUSY) || is_cmdq_empty(dpriv) == 0) {
ret = ATA_DEFER_LINK;
} else {
dpriv->flags |= DISK_BUSY;
}
spin_unlock_irqrestore(&dpriv->flags_lock, irq_flags);
return ret;
}
/** Return the stating position of disk buffer (in LBA) */
static ssize_t lba_start_read(struct device *dev, struct device_attribute *attr, char *buff)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
return snprintf(buff, 20, "%llu\n", dpriv->lba_ptr.lba_start);
}
/** Set the starting position of disk buffer (in LBA) */
static ssize_t lba_start_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
if (kstrtoull(buff, 10, &dpriv->lba_ptr.lba_start) != 0)
return -EINVAL;
if (dpriv->lba_ptr.lba_write < dpriv->lba_ptr.lba_start)
dpriv->lba_ptr.lba_write = dpriv->lba_ptr.lba_start;
return buff_sz;
}
/** Return the ending position of disk buffer (in LBA) */
static ssize_t lba_end_read(struct device *dev, struct device_attribute *attr, char *buff)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
return snprintf(buff, 20, "%llu\n", dpriv->lba_ptr.lba_end);
}
/** Set the ending position of disk buffer (in LBA) */
static ssize_t lba_end_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
if (kstrtoull(buff, 10, &dpriv->lba_ptr.lba_end) != 0)
return -EINVAL;
if (dpriv->lba_ptr.lba_write > dpriv->lba_ptr.lba_end)
dpriv->lba_ptr.lba_write = dpriv->lba_ptr.lba_end;
return buff_sz;
}
/** Return the current position of write pointer (in LBA) */
static ssize_t lba_current_read(struct device *dev, struct device_attribute *attr, char *buff)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
return snprintf(buff, 20, "%llu\n", dpriv->lba_ptr.lba_write);
}
/** Set the current position of write pointer (in LBA) */
static ssize_t lba_current_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
if (kstrtoull(buff, 10, &dpriv->lba_ptr.lba_write) != 0)
return -EINVAL;
return buff_sz;
}
static DEVICE_ATTR(load_module, S_IWUSR | S_IWGRP, NULL, set_load_flag); static DEVICE_ATTR(load_module, S_IWUSR | S_IWGRP, NULL, set_load_flag);
static DEVICE_ATTR(SYSFS_AHCI_FNAME_WRITE, S_IWUSR | S_IWGRP, NULL, rawdev_write);
static DEVICE_ATTR(SYSFS_AHCI_FNAME_START, S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP, lba_start_read, lba_start_write);
static DEVICE_ATTR(SYSFS_AHCI_FNAME_END, S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP, lba_end_read, lba_end_write);
static DEVICE_ATTR(SYSFS_AHCI_FNAME_CURR, S_IRUSR | S_IRGRP | S_IWUSR | S_IRGRP, lba_current_read, lba_current_write);
static struct attribute *root_dev_attrs[] = { static struct attribute *root_dev_attrs[] = {
&dev_attr_load_module.attr, &dev_attr_load_module.attr,
&dev_attr_SYSFS_AHCI_FNAME_WRITE.attr,
&dev_attr_SYSFS_AHCI_FNAME_START.attr,
&dev_attr_SYSFS_AHCI_FNAME_END.attr,
&dev_attr_SYSFS_AHCI_FNAME_CURR.attr,
NULL NULL
}; };
static const struct attribute_group dev_attr_root_group = { static const struct attribute_group dev_attr_root_group = {
...@@ -305,6 +1416,7 @@ static struct ata_port_operations ahci_elphel_ops = { ...@@ -305,6 +1416,7 @@ static struct ata_port_operations ahci_elphel_ops = {
.inherits = &ahci_ops, .inherits = &ahci_ops,
.port_start = elphel_port_start, .port_start = elphel_port_start,
.qc_prep = elphel_qc_prep, .qc_prep = elphel_qc_prep,
.qc_defer = elphel_qc_defer,
}; };
static const struct ata_port_info ahci_elphel_port_info = { static const struct ata_port_info ahci_elphel_port_info = {
...@@ -341,6 +1453,42 @@ static struct platform_driver ahci_elphel_driver = { ...@@ -341,6 +1453,42 @@ static struct platform_driver ahci_elphel_driver = {
}; };
module_platform_driver(ahci_elphel_driver); module_platform_driver(ahci_elphel_driver);
/** Debug function, checks frame alignment */
static int check_chunks(struct fvec *vects)
{
int i;
int ret = 0;
size_t sz = 0;
for (i = 0; i < MAX_DATA_CHUNKS; i++) {
if (i != CHUNK_REM) {
sz += vects[i].iov_len;
if ((vects[i].iov_len % ALIGNMENT_SIZE) != 0) {
dev_err(NULL, "ERROR: unaligned write from slot %d, length %u\n", i, vects[i].iov_len);
ret = -1;
}
}
}
if ((sz % PHY_BLOCK_SIZE) != 0) {
dev_err(NULL, "ERROR: total length of the transaction is not aligned to sector boundary, total length %u\n", sz);
ret = -1;
} else {
dev_err(NULL, "===== frame is OK =====\n");
}
return ret;
}
/** Debug function, prints the S/G list of current command */
static void dump_sg_list(const struct device *dev, const struct fvec *sgl, size_t elems)
{
int i;
dev_dbg(dev, "===== dump S/G list, %u elements:\n", elems);
for (i = 0; i < elems; i++) {
dev_dbg(dev, "dma address: 0x%x, len: %u\n", sgl[i].iov_dma, sgl[i].iov_len);
}
dev_dbg(dev, "===== end of S/G list =====\n");
}
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Elphel, Inc."); MODULE_AUTHOR("Elphel, Inc.");
MODULE_DESCRIPTION("Elphel AHCI SATA platform driver for elphel393 camera"); MODULE_DESCRIPTION("Elphel AHCI SATA platform driver for elphel393 camera");
/** @file ahci_elphel_ext.h
*
* @brief Elphel AHCI SATA platform driver for Elphel393 camera. This module provides
* additional functions which allows to use a part of a disk (or entire disk) as a
* raw circular buffer.
*
* @copyright Copyright (C) 2016 Elphel, Inc
*
* @par <b>License</b>
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <uapi/elphel/ahci_cmd.h>
#include "../elphel/circbuf.h"
#ifndef _AHCI_ELPHEL_EXT
#define _AHCI_ELPHEL_EXT
#define IRQ_SIMPLE (1 << 0) ///< Flag indicating that IRQ corresponds to internal command and should not be
///< processed in ahci_handle_port_interrupt
#define DISK_BUSY (1 << 1) ///< Flag indicating that disk is currently busy. Access to this flag should be protected by
///< spin locks to prevent race conditions
#define PROC_CMD (1 << 2) ///< Processing driver's internal command is in progress
#define LAST_BLOCK (1 << 3) ///< Flag indicating that the remaining chunk of data will be recorded
#define DELAYED_FINISH (1 << 4) ///< Flag indicating that recording should be stopped right after the last chunk of data is written
#define LOCK_TAIL (1 << 5) ///< Lock current command slot until all data buffers are assigned and the frame is aligned
#define CMD_FIS_LEN 5 ///< The length of a command FIS in double words
#define ADDR_MASK_28_BIT ((u64)0xfffffff)///< This is used to get 28-bit address from 64-bit value
#define MAX_PRDT_LEN 0x3fffff ///< A maximum of length of 4MB may exist for PRDT entry
#define MAX_DATA_CHUNKS 9 ///< An array or JPEG frame chunks contains pointers to JPEG leading marker,
///< JPEG header, Exif data if present, stuffing bytes chunk which aligns
///< the frame size to disk sector boundary, JPEG data which
///< can be split into two chunks, align buffers, JPEG
///< trailing marker, and pointer to a buffer containing the remainder of a
///< frame. Nine chunks of data in total.
#define DEFAULT_PORT_NUM 0 ///< Default port number
#define ALIGNMENT_SIZE 32 ///< Align buffers length to this amount of bytes
#define MAX_SGL_LEN 168 ///< Maximum number of entries in PRDT table. HW max is 64k.
///< Set this value the same as AHCI_MAX_SG in ahci.h
#define MAX_CMD_SLOTS 4 ///< Maximum number of frames which will be processed at the same time
#define MAX_LBA_COUNT 0xff ///< Maximum number of sectors for READ DMA or WRITE DMA commands
#define MAX_LBA_COUNT_EXT 0xffff ///< Maximum number of sectors for READ DMA EXT or WRITE_DMA EXT commands
#define PHY_BLOCK_SIZE 512 ///< Physical disk block size
#define JPEG_MARKER_LEN 2 ///< The size in bytes of JPEG marker
#define JPEG_SIZE_LEN 2 ///< The size in bytes of JPEG marker length field
#define INCLUDE_REM 1 ///< Include REM buffer to total size calculation
#define EXCLUDE_REM 0 ///< Exclude REM buffer from total size calculation
/** This structure holds raw device buffer pointers */
struct drv_pointers {
uint64_t lba_start; ///< raw buffer starting LBA
uint64_t lba_end; ///< raw buffer ending LBA
uint64_t lba_write; ///< current write pointer inside raw buffer
uint16_t wr_count; ///< the number of LBA to write next time
};
/** Container structure for frame buffers */
struct frame_buffers {
struct fvec exif_buff; ///< Exif buffer
struct fvec jpheader_buff; ///< JPEG header buffer
struct fvec trailer_buff; ///< buffer for trailing marker
struct fvec common_buff; ///< common buffer where other parts are combined
struct fvec rem_buff; ///< remainder from previous frame
};
/** Symbolic names for slots in buffer pointers. Buffer alignment function relies on the order of these names, so
* new names can be added but the overall order should not be changed */
enum {
CHUNK_LEADER, ///< pointer to JPEG leading marker
CHUNK_EXIF, ///< pointer to Exif buffer
CHUNK_HEADER, ///< pointer to JPEG header data excluding leading marker
CHUNK_COMMON, ///< pointer to common buffer
CHUNK_DATA_0, ///< pointer to JPEG data
CHUNK_DATA_1, ///< pointer to the second half of JPEG data if a frame crosses circbuf boundary
CHUNK_TRAILER, ///< pointer to JPEG trailing marker
CHUNK_ALIGN, ///< pointer to buffer where the second part of JPEG data should be aligned
CHUNK_REM ///< pointer to buffer containing the remainder of current frame. It will be recorded during next transaction
};
/** AHCI driver private structure */
struct elphel_ahci_priv {
u32 clb_offs; ///< CLB offset, received from device tree
u32 fb_offs; ///< FB offset, received from device tree
u32 base_addr; ///< controller base address
u32 flags; ///< flags indicating current state of the driver. Access to #DISK_BUSY flags is protected with
///< a spin lock
int curr_cmd; ///< current ATA command
size_t max_data_sz; ///< maximum data size (in bytes) which can be processed with current ATA command
struct drv_pointers lba_ptr; ///< disk buffer pointers
struct frame_buffers fbuffs[MAX_CMD_SLOTS]; ///< a set of buffers for each command
struct fvec data_chunks[MAX_CMD_SLOTS][MAX_DATA_CHUNKS];///< a set of vectors pointing to data buffers for each command
struct fvec sgl[MAX_SGL_LEN]; ///< an array of data buffers mapped for next transaction
int sg_elems; ///< the number of S/G vectors mapped for next transaction in @e sgl array
int curr_data_chunk; ///< index of a data chunk used during last transaction
size_t curr_data_offset; ///< offset of the last byte in a data chunk pointed to by @e curr_data_chunk
size_t head_ptr; ///< pointer to command slot which will be written next
size_t tail_ptr; ///< pointer to next free command slot
spinlock_t flags_lock; ///< controls access to #DISK_BUSY flag in @e flags variable.
///< This flag controls access to disk write operations either from
///< the the driver itself or from the system. Mutex is not used
///< because this flag is accessed from interrupt context
struct tasklet_struct bh; ///< command processing tasklet
struct device *dev; ///< pointer to parent device structure
};
#endif /* _AHCI_ELPHEL_EXT */
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/uio.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <uapi/elphel/x393_devices.h> #include <uapi/elphel/x393_devices.h>
#include <uapi/elphel/c313a.h> #include <uapi/elphel/c313a.h>
...@@ -108,6 +109,36 @@ int init_ccam_dma_buf_ptr(struct platform_device *pdev) ...@@ -108,6 +109,36 @@ int init_ccam_dma_buf_ptr(struct platform_device *pdev)
return 0; return 0;
} }
ssize_t circbuf_get_ptr(int sensor_port, size_t offset, size_t len, struct fvec *vect_0, struct fvec *vect_1)
{
int ret = 1;
if (offset > CCAM_DMA_SIZE || sensor_port >= SENSOR_PORTS)
return -EINVAL;
if (offset + len < CCAM_DMA_SIZE) {
// the image is not split
vect_0->iov_base = &circbuf_priv[sensor_port].buf_ptr[BYTE2DW(offset)];
vect_0->iov_dma = circbuf_priv[sensor_port].phys_addr + offset;
vect_0->iov_len = len;
vect_1->iov_base = NULL;
vect_1->iov_len = 0;
vect_1->iov_dma = 0;
} else {
// the image is split into two segments
vect_0->iov_base = &circbuf_priv[sensor_port].buf_ptr[BYTE2DW(offset)];
vect_0->iov_dma = circbuf_priv[sensor_port].phys_addr + offset;
vect_0->iov_len = CCAM_DMA_SIZE - offset;
vect_1->iov_base = circbuf_priv[sensor_port].buf_ptr;
vect_1->iov_dma = circbuf_priv[sensor_port].phys_addr;
vect_1->iov_len = len - vect_0->iov_len;
ret = 2;
}
return ret;
}
EXPORT_SYMBOL_GPL(circbuf_get_ptr);
/** /**
* @brief Process circular buffer file opening and define further action in accordance * @brief Process circular buffer file opening and define further action in accordance
* with minor file number. * with minor file number.
......
...@@ -24,6 +24,12 @@ ...@@ -24,6 +24,12 @@
#include <linux/poll.h> #include <linux/poll.h>
struct fvec {
void *iov_base; ///< pointer to allocated buffer
size_t iov_len; ///< the size (in bytes) of allocated buffer; set after allocation and is not modified during buffer lifetime
dma_addr_t iov_dma; ///< buffer physical address
};
/** @brief Circular buffer private data */ /** @brief Circular buffer private data */
struct circbuf_priv_t { struct circbuf_priv_t {
int minor; ///< device file minor number int minor; ///< device file minor number
...@@ -75,4 +81,6 @@ extern unsigned char circbuf_byrshift; ...@@ -75,4 +81,6 @@ extern unsigned char circbuf_byrshift;
#endif #endif
/* end of debug code */ /* end of debug code */
ssize_t circbuf_get_ptr(int sensor_port, size_t offset, size_t len, struct fvec *vect_0, struct fvec *vect_1);
#endif /* _CIRCBUF_H */ #endif /* _CIRCBUF_H */
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
#include "exif393.h" #include "exif393.h"
#define D(x) #define D(x)
//#define D(x) printk("%s:%d:",__FILE__,__LINE__);x //#define D(x) printk(">>> %s:%d:",__FILE__,__LINE__);x
//Major //Major
...@@ -90,6 +90,7 @@ static int aexif_wp[SENSOR_PORTS] = {1,1,1,1}; // frame write pointer in ...@@ -90,6 +90,7 @@ static int aexif_wp[SENSOR_PORTS] = {1,1,1,1}; // frame write pointer in
static int aexif_enabled[SENSOR_PORTS] = {0,0,0,0}; // enable storing of frame meta data, enable reading Exif data static int aexif_enabled[SENSOR_PORTS] = {0,0,0,0}; // enable storing of frame meta data, enable reading Exif data
static int aexif_valid[SENSOR_PORTS] = {0,0,0,0}; // Exif tables and buffer are valid. static int aexif_valid[SENSOR_PORTS] = {0,0,0,0}; // Exif tables and buffer are valid.
static char * ameta_buffer[SENSOR_PORTS]= {NULL,NULL,NULL,NULL}; // dynamically allocated buffer to store frame meta data. static char * ameta_buffer[SENSOR_PORTS]= {NULL,NULL,NULL,NULL}; // dynamically allocated buffer to store frame meta data.
static char exif_tmp_buff[MAX_EXIF_SIZE];
//static char * meta_buffer=NULL; // dynamically allocated buffer to store frame meta data. //static char * meta_buffer=NULL; // dynamically allocated buffer to store frame meta data.
// page 0 - temporary storage, 1..MAX_EXIF_FRAMES - buffer // page 0 - temporary storage, 1..MAX_EXIF_FRAMES - buffer
...@@ -724,6 +725,35 @@ ssize_t exif_read (struct file * file, char * buf, size_t count, loff_t *of ...@@ -724,6 +725,35 @@ ssize_t exif_read (struct file * file, char * buf, size_t count, loff_t *of
return count; return count;
} }
/* This code is copied from exif_read, consider replacing it with this function invocation */
size_t exif_get_data(int sensor_port, unsigned short meta_index, void *buff, size_t buff_sz)
{
size_t ret = 0;
size_t count = exif_template_size;
loff_t off;
int start_p, page_p, i;
char *metap;
//will truncate by the end of current page
if (!aexif_enabled[sensor_port])
return 0;
off = meta_index * exif_template_size;
D(printk("%s: count= 0x%x, *off= 0x%x, i=0x%x, exif_template_size=0x%x\n", __func__, (int) count, (int) off, (int) meta_index, (int) exif_template_size));
start_p = meta_index * exif_template_size;
page_p = off - start_p;
D(printk("%s: count= 0x%x, pos= 0x%x, start_p=0x%x, page_p=0x%x, i=0x%x, exif_template_size=0x%x\n", __func__, (int) count, (int) off, (int)start_p, (int)page_p,(int) meta_index, (int) exif_template_size));
metap = &ameta_buffer[sensor_port][meta_index * aexif_meta_size[sensor_port]]; // pointer to the start of the selected page in frame meta_buffer
if ((page_p + count) > exif_template_size)
count = exif_template_size - page_p;
memcpy(exif_tmp_buff, exif_template, exif_template_size);
D(printk("%s: count= 0x%x, pos= 0x%x, start_p=0x%x, page_p=0x%x\n", __func__, (int) count, (int) off, (int)start_p, (int)page_p));
for (i = 0; i < exif_fields; i++) {
memcpy(&exif_tmp_buff[dir_table[i].dst], &metap[dir_table[i].src], dir_table[i].len);
}
memcpy(buff, &exif_tmp_buff[page_p], count);
return count;
}
EXPORT_SYMBOL_GPL(exif_get_data);
//!++++++++++++++++++++++++++++++++++++ _init() ++++++++++++++++++++++++++++++++++++++++++++++++++++++ //!++++++++++++++++++++++++++++++++++++ _init() ++++++++++++++++++++++++++++++++++++++++++++++++++++++
......
...@@ -53,5 +53,6 @@ int putlong_meta(int sensor_port, unsigned long data, int * indx, unsigned long ...@@ -53,5 +53,6 @@ int putlong_meta(int sensor_port, unsigned long data, int * indx, unsigned long
char * encode_time(char buf[27], unsigned long sec, unsigned long usec); char * encode_time(char buf[27], unsigned long sec, unsigned long usec);
int store_meta(int sensor_port); //called from IRQ service - put current metadata to meta_buffer, return page index int store_meta(int sensor_port); //called from IRQ service - put current metadata to meta_buffer, return page index
size_t exif_get_data(int sensor_port, unsigned short meta_index, void * buff, size_t buff_sz);
#endif #endif
...@@ -412,6 +412,22 @@ ssize_t jpeghead_read(struct file *file, char *buf, size_t count, loff_t *off) ...@@ -412,6 +412,22 @@ ssize_t jpeghead_read(struct file *file, char *buf, size_t count, loff_t *off)
return count; return count;
} }
ssize_t jpeghead_get_data(int sensor_port, void *buff, size_t buff_sz, size_t offset)
{
unsigned long ptr = offset;
size_t count = jpeghead_priv[sensor_port].jpeg_h_sz;
if (ptr >= jpeghead_priv[sensor_port].jpeg_h_sz)
ptr = jpeghead_priv[sensor_port].jpeg_h_sz;
if ((ptr + count) > jpeghead_priv[sensor_port].jpeg_h_sz)
count = jpeghead_priv[sensor_port].jpeg_h_sz - ptr;
if (buff_sz < count)
return -EINVAL;
memcpy(buff, &jpeghead_priv[sensor_port].header[ptr], count);
return count;
}
EXPORT_SYMBOL_GPL(jpeghead_get_data);
/**huffman_* file operations /**huffman_* file operations
* write, read Huffman tables, initialize tables to default ones, program FPGA with the Huffman tables * write, read Huffman tables, initialize tables to default ones, program FPGA with the Huffman tables
......
...@@ -14,6 +14,7 @@ int jpegheader_create(struct interframe_params_t * params, unsigned char * b ...@@ -14,6 +14,7 @@ int jpegheader_create(struct interframe_params_t * params, unsigned char * b
int jpeghead_open (struct inode *inode, struct file *filp); // set filesize int jpeghead_open (struct inode *inode, struct file *filp); // set filesize
loff_t jpeghead_lseek (struct file * file, loff_t offset, int orig, struct interframe_params_t *fp); loff_t jpeghead_lseek (struct file * file, loff_t offset, int orig, struct interframe_params_t *fp);
ssize_t jpeghead_read (struct file * file, char * buf, size_t count, loff_t *off); ssize_t jpeghead_read (struct file * file, char * buf, size_t count, loff_t *off);
ssize_t jpeghead_get_data(int sensor_port, void *buff, size_t buff_sz, size_t offset);
int huffman_open (struct inode *inode, struct file *filp); // set filesize int huffman_open (struct inode *inode, struct file *filp); // set filesize
int huffman_release(struct inode *inode, struct file *filp); int huffman_release(struct inode *inode, struct file *filp);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/module.h>
#include <stddef.h> #include <stddef.h>
#include "x393_helpers.h" #include "x393_helpers.h"
...@@ -47,3 +48,32 @@ u32 get_rtc_usec(void) ...@@ -47,3 +48,32 @@ u32 get_rtc_usec(void)
} }
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(get_rtc_usec);
/**
* @brief Read RTC second counter.
* @return Current value of second counter or 0 in case read sequence was
* not successful.
*/
u32 get_rtc_sec(void)
{
x393_rtc_status_t stat;
x393_status_ctrl_t stat_ctrl;
x393_rtc_sec_t sec;
unsigned int i;
stat = x393_rtc_status();
stat_ctrl.d32 = 0;
stat_ctrl.mode = 1;
stat_ctrl.seq_num = stat.seq_num + 1;
set_x393_rtc_set_status(stat_ctrl);
for (i = 0; i < REPEAT_READ; i++) {
stat = x393_rtc_status();
if (stat.seq_num == stat_ctrl.seq_num) {
sec = x393_rtc_status_sec();
return sec.sec;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(get_rtc_sec);
...@@ -30,5 +30,6 @@ ...@@ -30,5 +30,6 @@
#define REPEAT_READ 10 #define REPEAT_READ 10
u32 get_rtc_usec(void); u32 get_rtc_usec(void);
u32 get_rtc_sec(void);
#endif /* _X393_HELPERS_H */ #endif /* _X393_HELPERS_H */
...@@ -5,4 +5,5 @@ ...@@ -5,4 +5,5 @@
header-y += exifa.h header-y += exifa.h
header-y += c313a.h header-y += c313a.h
header-y += x393_devices.h header-y += x393_devices.h
header-y += ahci_cmd.h
/** @file ahci_cmd.h
*
* @brief Elphel AHCI SATA platform driver for Elphel393 camera. This module provides
* constants and data structures which are used to organize interaction between drivers
* and user space applications during JPEG files recording.
*
* @copyright Copyright (C) 2016 Elphel, Inc
*
* @par <b>License</b>
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _AHCI_CMD
#define _AHCI_CMD
#define DRV_CMD_WRITE (1 << 0)
#define DRV_CMD_FINISH (1 << 1)
#define DRV_CMD_EXIF (1 << 2)
#define _NAME_TO_STR(...) #__VA_ARGS__
#define NAME_TO_STR(NAME) _NAME_TO_STR(NAME)
/** The path to Elphel AHCI driver sysfs entry. The trailing slash is mandatory. */
#define SYSFS_AHCI_ENTRY "/sys/devices/soc0/amba@0/80000000.elphel-ahci/"
/** sysfs entry name, no double quotes. This macro is used to populate <em>struct attribute</em> in #ahci_elphel.c */
#define SYSFS_AHCI_FNAME_WRITE write
/** sysfs entry name, no double quotes. This macro is used to populate <em>struct attribute</em> in #ahci_elphel.c */
#define SYSFS_AHCI_FNAME_START lba_start
/** sysfs entry name, no double quotes. This macro is used to populate <em>struct attribute</em> in #ahci_elphel.c */
#define SYSFS_AHCI_FNAME_END lba_end
/** sysfs entry name, no double quotes. This macro is used to populate <em>struct attribute</em> in #ahci_elphel.c */
#define SYSFS_AHCI_FNAME_CURR lba_current
/** This file is used to send commands to AHCI driver from user space applications (camogm as for now). */
#define SYSFS_AHCI_WRITE SYSFS_AHCI_ENTRY NAME_TO_STR(SYSFS_AHCI_FNAME_WRITE)
/** This file is used to control starting LBA of a disk buffer (R/W). */
#define SYSFS_AHCI_LBA_START SYSFS_AHCI_ENTRY NAME_TO_STR(SYSFS_AHCI_FNAME_START)
/** This file is used to control ending LBA of a disk buffer (R/W). */
#define SYSFS_AHCI_LBA_END SYSFS_AHCI_ENTRY NAME_TO_STR(SYSFS_AHCI_FNAME_END)
/** This file is used to control current LBA of a disk buffer (R/W). Use this file to set a pointer inside
* [lba_start..lba_end] area where next write operation will begin. */
#define SYSFS_AHCI_LBA_CURRENT SYSFS_AHCI_ENTRY NAME_TO_STR(SYSFS_AHCI_FNAME_CURR)
struct frame_data {
unsigned int sensor_port;
int cirbuf_ptr;
int jpeg_len;
int meta_index;
int cmd;
};
#endif /* _AHCI_CMD */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment