Commit e41643e0 authored by Mikhail Karpenko's avatar Mikhail Karpenko

Merge read and write code into one command

parent 6a573df8
...@@ -24,7 +24,15 @@ ...@@ -24,7 +24,15 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/uio.h>
//#include <asm/uaccess.h>
#include "ahci.h" #include "ahci.h"
#include "ahci_elphel.h"
#include "../elphel/exif393.h"
#include "../elphel/exifa.h"
#include "../elphel/jpeghead.h"
//#include "../elphel/circbuf.h"
#include <elphel/elphel393-mem.h> #include <elphel/elphel393-mem.h>
...@@ -39,12 +47,11 @@ ...@@ -39,12 +47,11 @@
/* Property names from device tree, these are specific for the controller */ /* Property names from device tree, these are specific for the controller */
#define PROP_NAME_CLB_OFFS "clb_offs" #define PROP_NAME_CLB_OFFS "clb_offs"
#define PROP_NAME_FB_OFFS "fb_offs" #define PROP_NAME_FB_OFFS "fb_offs"
/** Flag indicating that IRQ should not be processed in ahci_port_interrupt */
#define IRQ_SIMPLE (1 << 0) /** Maximum number of sectors for READ DMA or WRITE DMA commands */
/** The length of a command FIS in double words */ #define MAX_LBA_COUNT 0xff
#define CMD_FIS_LEN 5 /** Maximum number of sectors for READ DMA EXT or WRITE_DMA EXT commands */
/** This is used to get 28-bit address from 64-bit value */ #define MAX_LBA_COUNT_EXT 0xffff
#define ADDR_MASK_28_BIT ((u64)0xfffffff)
static struct ata_port_operations ahci_elphel_ops; static struct ata_port_operations ahci_elphel_ops;
static const struct ata_port_info ahci_elphel_port_info; static const struct ata_port_info ahci_elphel_port_info;
...@@ -54,20 +61,15 @@ static const struct attribute_group dev_attr_root_group; ...@@ -54,20 +61,15 @@ static const struct attribute_group dev_attr_root_group;
static bool load_driver = false; static bool load_driver = false;
struct elphel_ahci_priv { static void elphel_cmd_issue(struct ata_port *ap, uint64_t start, uint16_t count, struct fvec *sgl, unsigned int elem, uint8_t cmd);
u32 clb_offs; static int init_buffers(struct device *dev, struct frame_buffers *buffs);
u32 fb_offs; static void init_vectors(struct elphel_ahci_priv *dpriv);
u32 base_addr; static void deinit_buffers(struct device *dev, struct frame_buffers *buffs);
u32 flags; static inline struct elphel_ahci_priv *dev_get_dpriv(struct device *dev);
}; static void finish_cmd(struct device *dev, struct elphel_ahci_priv *dpriv);
static int process_cmd(struct device *dev, struct elphel_ahci_priv *dpriv, struct ata_port *port);
static struct platform_device *g_pdev; //static void start_cmd(struct device *dev, struct elphel_ahci_priv *dpriv, struct ata_port *port);
static ssize_t elphel_test_write(struct device *dev, struct device_attribute *attr, static inline size_t get_size_from(const struct fvec *vects, int index, size_t offset, int all);
const char *buff, size_t buff_sz);
static irqreturn_t elphel_irq_handler(int irq, void * dev_instance);
static int elphel_write_dma(struct ata_port *ap, u64 start, u16 count, struct scatterlist *sg, unsigned int elem);
static int elphel_read_dma(struct ata_port *ap, u64 start, u16 count, struct scatterlist *sgl, unsigned int elem);
void prep_cfis(u8 *cmd_tbl, u8 cmd, u64 start_addr, u16 count);
static ssize_t set_load_flag(struct device *dev, struct device_attribute *attr, static ssize_t set_load_flag(struct device *dev, struct device_attribute *attr,
const char *buff, size_t buff_sz) const char *buff, size_t buff_sz)
...@@ -111,6 +113,43 @@ static void elphel_defer_load(struct device *dev) ...@@ -111,6 +113,43 @@ static void elphel_defer_load(struct device *dev)
iounmap(ctrl_ptr); iounmap(ctrl_ptr);
} }
static irqreturn_t elphel_irq_handler(int irq, void * dev_instance)
{
irqreturn_t handled;
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv = host->private_data;
struct ata_port *port = host->ports[DEFAULT_PORT_NUM];
void __iomem *port_mmio = ahci_port_base(port);
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
uint32_t irq_stat, host_irq_stat;
if (dpriv->flags & IRQ_SIMPLE) {
/* handle interrupt */
host_irq_stat = readl(hpriv->mmio + HOST_IRQ_STAT);
if (!host_irq_stat)
return IRQ_NONE;
dpriv->flags &= ~IRQ_SIMPLE;
irq_stat = readl(port_mmio + PORT_IRQ_STAT);
printk(KERN_DEBUG "irq_stat = 0x%x, host irq_stat = 0x%x\n", irq_stat, host_irq_stat);
// writel(irq_stat, port_mmio + PORT_IRQ_STAT);
writel(0xffffffff, port_mmio + PORT_IRQ_STAT);
writel(host_irq_stat, hpriv->mmio + HOST_IRQ_STAT);
handled = IRQ_HANDLED;
// if (proc_cmd(host->dev, dpriv, host->ports[0]) == 0)
// finish_cmd(host->dev, dpriv);
} else {
/* pass handling to AHCI level */
handled = ahci_single_irq_intr(irq, dev_instance);
}
return handled;
}
// What about port_stop and freeing/unmapping ? // What about port_stop and freeing/unmapping ?
// Or at least check if it is re-started and memory is already allocated/mapped // Or at least check if it is re-started and memory is already allocated/mapped
static int elphel_port_start(struct ata_port *ap) static int elphel_port_start(struct ata_port *ap)
...@@ -213,6 +252,12 @@ static int elphel_drv_probe(struct platform_device *pdev) ...@@ -213,6 +252,12 @@ static int elphel_drv_probe(struct platform_device *pdev)
if (!dpriv) if (!dpriv)
return -ENOMEM; return -ENOMEM;
ret = init_buffers(dev, &dpriv->fbuffs);
if (ret != 0)
return ret;
// sg_init_table(dpriv->sgl, MAX_DATA_CHUNKS);
init_vectors(dpriv);
match = of_match_device(ahci_elphel_of_match, &pdev->dev); match = of_match_device(ahci_elphel_of_match, &pdev->dev);
if (!match) if (!match)
return -EINVAL; return -EINVAL;
...@@ -234,7 +279,6 @@ static int elphel_drv_probe(struct platform_device *pdev) ...@@ -234,7 +279,6 @@ static int elphel_drv_probe(struct platform_device *pdev)
ahci_platform_disable_resources(hpriv); ahci_platform_disable_resources(hpriv);
return ret; return ret;
} }
g_pdev = pdev;
/* reassign interrupt handler*/ /* reassign interrupt handler*/
int rc; int rc;
unsigned int irq_flags = IRQF_SHARED; unsigned int irq_flags = IRQF_SHARED;
...@@ -254,7 +298,10 @@ static int elphel_drv_probe(struct platform_device *pdev) ...@@ -254,7 +298,10 @@ static int elphel_drv_probe(struct platform_device *pdev)
static int elphel_drv_remove(struct platform_device *pdev) static int elphel_drv_remove(struct platform_device *pdev)
{ {
struct elphel_ahci_priv *dpriv = dev_get_dpriv(&pdev->dev);
dev_info(&pdev->dev, "removing Elphel AHCI driver"); dev_info(&pdev->dev, "removing Elphel AHCI driver");
deinit_buffers(&pdev->dev, &dpriv->fbuffs);
sysfs_remove_group(&pdev->dev.kobj, &dev_attr_root_group); sysfs_remove_group(&pdev->dev.kobj, &dev_attr_root_group);
ata_platform_remove_one(pdev); ata_platform_remove_one(pdev);
...@@ -293,7 +340,7 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc) ...@@ -293,7 +340,7 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc)
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
dev_dbg(ap->dev, ">>> CFIS dump, data from libahci:\n"); dev_dbg(ap->dev, ">>> CFIS dump, data from libahci, phys addr = 0x%x:\n", pp->cmd_tbl_dma);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, cmd_tbl, 20); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, cmd_tbl, 20);
if (is_atapi) { if (is_atapi) {
...@@ -331,154 +378,1311 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc) ...@@ -331,154 +378,1311 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc)
AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE); AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE);
} }
static DEVICE_ATTR(load_module, S_IWUSR | S_IWGRP, NULL, set_load_flag); /* ============================================== */
static DEVICE_ATTR(test_write, S_IWUSR | S_IWGRP, NULL, elphel_test_write); #define TEST_BUFF_SZ 512
static struct attribute *root_dev_attrs[] = { #define MAX_IOVECTORS 10
&dev_attr_load_module.attr, /** The position of size field in copy buffer */
&dev_attr_test_write.attr, #define VECTOR_SZ_POS 0
NULL /** The position of vector pointer field in copy buffer */
}; #define POINTER_POS 1
static const struct attribute_group dev_attr_root_group = { /** Physical disk block size */
.attrs = root_dev_attrs, #define PHY_BLOCK_SIZE 512
.name = NULL, //#define PHY_BLOCK_SIZE 4096
#define JPEG_MARKER_LEN 2
#define SG_TBL_SZ 256
/** Include REM buffer to total size calculation */
#define INCLUDE_REM 1
/** Exclude REM buffer from total size calculation */
#define EXCLUDE_REM 0
#define DEBUG_DONT_WRITE
unsigned char app15[ALIGNMENT_SIZE] = {0xff, 0xef};
/* this should be placed to system includes directory*/
struct frame_data {
unsigned int sensor_port;
int cirbuf_ptr;
int jpeg_len;
int meta_index;
}; };
/* end of system includes */
/* Debug functions */
#define DATA_BUFF_SIZE 500000
unsigned char *g_jpg_data_0;
unsigned char *g_jpg_data_1;
int use_preset;
size_t g_jpg_0_sz;
size_t g_jpg_1_sz;
ssize_t g_exif_sz;
ssize_t g_jpg_hdr_sz;
static size_t exif_get_data_tst(int sensor_port, unsigned short meta_index, void *buff, size_t buff_sz, int enable)
{
int i;
const int default_exif_sz = 774;
int exif_sz;
unsigned char *dest = buff;
static struct ata_port_operations ahci_elphel_ops = { if (g_exif_sz >= 0 && g_exif_sz < MAX_EXIF_SIZE)
.inherits = &ahci_ops, exif_sz = g_exif_sz;
.port_start = elphel_port_start, else
.qc_prep = elphel_qc_prep, exif_sz = default_exif_sz;
};
static const struct ata_port_info ahci_elphel_port_info = { if (buff_sz < exif_sz || enable == 0)
AHCI_HFLAGS(AHCI_HFLAG_NO_NCQ), return 0;
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_elphel_ops,
};
static struct scsi_host_template ahci_platform_sht = { dest[0] = 0xff;
AHCI_SHT(DRV_NAME), dest[1] = 0xe1;
.can_queue = 1, for (i = 2; i < exif_sz; i++) {
.sg_tablesize = AHCI_MAX_SG, dest[i] = 0xa1;
.dma_boundary = AHCI_DMA_BOUNDARY, }
.shost_attrs = ahci_shost_attrs,
.sdev_attrs = ahci_sdev_attrs,
};
static const struct of_device_id ahci_elphel_of_match[] = { return exif_sz;
{ .compatible = "elphel,elphel-ahci", }, }
{ /* end of list */ } static size_t jpeghead_get_data_tst(int sensor_port, void *buff, size_t buff_sz, size_t offs)
}; {
MODULE_DEVICE_TABLE(of, ahci_elphel_of_match); int i;
const int default_jpeghdr_sz = 623;
int jpeghdr_sz;
unsigned char *dest = buff;
static struct platform_driver ahci_elphel_driver = { if (g_jpg_hdr_sz >=0 && g_jpg_hdr_sz < JPEG_HEADER_MAXSIZE)
.probe = elphel_drv_probe, jpeghdr_sz = g_jpg_hdr_sz;
.remove = elphel_drv_remove, else
.driver = { jpeghdr_sz = default_jpeghdr_sz;
.name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = ahci_elphel_of_match,
},
};
module_platform_driver(ahci_elphel_driver);
#define TEST_BUFF_SZ 512 if (buff_sz < jpeghdr_sz)
//#define SDA2_LBA_ADDR 124963848 return 0;
#define SDA2_LBA_ADDR 1
#define SG_TBL_SZ 3 dest[0] = 0xff;
static ssize_t elphel_test_write(struct device *dev, struct device_attribute *attr, dest[1] = 0xd8;
const char *buff, size_t buff_sz) dest[2] = 0xff;
dest[3] = 0xe0;
for (i = 4; i < jpeghdr_sz; i++) {
dest[i] = 0xb2;
}
return jpeghdr_sz;
}
#include <linux/random.h>
static int circbuf_get_ptr_tst(int sensor_port, size_t offset, size_t len, struct fvec *vect_0, struct fvec *vect_1)
{ {
int i, n_elem; int ret = 1;
int sg_elems = 0; size_t jpg_0_sz;
struct ata_host *host; size_t jpg_1_sz;
struct ata_port *port;
struct ahci_host_priv *hpriv; get_random_bytes(&jpg_0_sz, sizeof(size_t));
struct elphel_ahci_priv *dpriv; get_random_bytes(&jpg_1_sz, sizeof(size_t));
struct scatterlist *sgl; if (use_preset == 0) {
struct scatterlist *sg_ptr; if (jpg_0_sz != 0)
// u8 *test_buff = pElphel_buf->d2h_vaddr; jpg_0_sz = jpg_0_sz % (DATA_BUFF_SIZE - 1);
u8 *test_buff; if (jpg_1_sz != 0)
unsigned int lba_addr; jpg_1_sz = jpg_1_sz % (DATA_BUFF_SIZE - 1);
} else if (use_preset == 1) {
if (g_jpg_0_sz != 0)
jpg_0_sz = jpg_0_sz % g_jpg_0_sz;
if (g_jpg_1_sz != 0)
jpg_1_sz = jpg_1_sz % g_jpg_1_sz;
} else if (use_preset == 2) {
jpg_0_sz = g_jpg_0_sz;
jpg_1_sz = g_jpg_1_sz;
}
if (g_jpg_0_sz != 0)
memset(g_jpg_data_0, 0xc3, jpg_0_sz);
if (g_jpg_1_sz != 0)
memset(g_jpg_data_1, 0xd4, jpg_1_sz);
if (g_jpg_0_sz != 0) {
vect_0->iov_base = g_jpg_data_0;
// vect_0->iov_dma = 0;
vect_0->iov_len = jpg_0_sz;
} else {
vect_0->iov_base = NULL;
vect_0->iov_dma = 0;
vect_0->iov_len = 0;
}
if (g_jpg_1_sz != 0) {
vect_1->iov_base = g_jpg_data_1;
// vect_1->iov_dma = 0;
vect_1->iov_len = jpg_1_sz;
ret = 2;
} else {
vect_1->iov_base = NULL;
vect_1->iov_dma = 0;
vect_1->iov_len = 0;
}
return ret;
}
static void dump_frame(struct fvec *vects)
{
int i;
for (i = 0; i < MAX_DATA_CHUNKS; i++) {
printk(KERN_DEBUG ">>> dump data chunk %d, size %u\n", i, vects[i].iov_len);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, vects[i].iov_base, vects[i].iov_len);
}
}
static int check_chunks(struct fvec *vects)
{
int i;
int ret = 0;
size_t sz = 0;
for (i = 0; i < MAX_DATA_CHUNKS; i++) {
if (i != CHUNK_REM) {
sz += vects[i].iov_len;
if ((vects[i].iov_len % ALIGNMENT_SIZE) != 0) {
dev_err(NULL, "ERROR: unaligned write from slot %d, length %u\n", i, vects[i].iov_len);
ret = -1;
}
if ((vects[i].iov_dma % ALIGNMENT_ADDR) != 0) {
dev_err(NULL, "ERROR: unaligned DMA address in slot %d: 0x%x\n", i, vects[i].iov_dma);
ret = -1;
}
}
if ((i == CHUNK_ALIGN_0 || i == CHUNK_ALIGN_1) && vects[i].iov_len > 2 * ALIGNMENT_SIZE) {
dev_err(NULL, "ERROR: alignment buffer %d overflow\n", i);
ret = -1;
}
}
if ((sz % PHY_BLOCK_SIZE) != 0) {
dev_err(NULL, "ERROR: total length of the transaction is not aligned to sector boundary, total length %u\n", sz);
ret = -1;
} else {
dev_err(NULL, ">>> +++ frame is OK +++\n");
}
return ret;
}
static ssize_t data_0_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
if (kstrtoul(buff, 10, &g_jpg_0_sz) != 0)
return -EINVAL;
printk(KERN_DEBUG ">>> preset DATA_0 length: %u\n", g_jpg_0_sz);
return buff_sz;
}
static ssize_t data_1_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
if (kstrtoul(buff, 10, &g_jpg_1_sz) != 0)
return -EINVAL;
printk(KERN_DEBUG ">>> preset DATA_1 length: %u\n", g_jpg_1_sz);
return buff_sz;
}
static ssize_t data_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
if (kstrtoul(buff, 10, &use_preset) != 0)
return -EINVAL;
return buff_sz;
}
static ssize_t exif_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
if (kstrtol(buff, 10, &g_exif_sz) != 0)
return -EINVAL;
printk(KERN_DEBUG ">>> preset EXIF length: %u\n", g_exif_sz);
return buff_sz;
}
static ssize_t hdr_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
if (kstrtol(buff, 10, &g_jpg_hdr_sz) != 0)
return -EINVAL;
printk(KERN_DEBUG ">>> preset JPEGHEADER length: %u\n", g_jpg_hdr_sz);
if (sscanf(buff, "%u", &lba_addr) == 1) { return buff_sz;
printk(KERN_DEBUG ">>> got LBA address: %d\n", lba_addr); }
static DEVICE_ATTR(data_0_sz, S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP, NULL, data_0_write);
static DEVICE_ATTR(data_1_sz, S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP, NULL, data_1_write);
static DEVICE_ATTR(data_proc, S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP, NULL, data_write);
static DEVICE_ATTR(exif_sz, S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP, NULL, exif_write);
static DEVICE_ATTR(jpg_hdr_sz, S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP, NULL, hdr_write);
/* End of debug functions*/
/** Map buffer vectors to S/G list and return the number of vectors mapped */
static int map_vectors(struct elphel_ahci_priv *dpriv)
{
int i;
int index = 0;
int finish = 0;
size_t total_sz = 0;
size_t tail;
struct fvec *chunks = dpriv->data_chunks;
struct fvec vect;
for (i = dpriv->curr_data_chunk; i < MAX_DATA_CHUNKS; i++) {
if (i == CHUNK_REM)
/* remainder should never be processed */
continue;
if (i == dpriv->curr_data_chunk) {
total_sz = chunks[i].iov_len - dpriv->curr_data_offset;
vect.iov_base = (unsigned char *)chunks[i].iov_base + dpriv->curr_data_offset;
vect.iov_dma = chunks[i].iov_dma + dpriv->curr_data_offset;
vect.iov_len = chunks[i].iov_len - dpriv->curr_data_offset;
} else { } else {
lba_addr = SDA2_LBA_ADDR; total_sz += chunks[i].iov_len;
// vect.iov_base = chunks[i].iov_base;
// vect.iov_len = chunks[i].iov_len;
vect = chunks[i];
}
if (total_sz > dpriv->max_data_sz) {
// truncate current buffer and finish mapping
tail = total_sz - dpriv->max_data_sz;
vect.iov_len -= tail;
dpriv->curr_data_chunk = i;
dpriv->curr_data_offset = chunks[i].iov_len - tail;
finish = 1;
} else if (unlikely(total_sz == dpriv->max_data_sz)) {
dpriv->curr_data_chunk = i;
dpriv->curr_data_offset = chunks[i].iov_len;
finish = 1;
}
printk(KERN_DEBUG "mapping data chunk number %d: total_sz = %u, vect.iov_len = %u\n", i, total_sz, vect.iov_len);
if (vect.iov_len != 0) {
// sg_set_buf(&dpriv->sgl[index++], vect.iov_base, vect.iov_len);
dpriv->sgl[index++] = vect;
}
if (finish)
break;
}
if (finish == 0) {
// frame vectors have been fully processed, stop calling me
dpriv->curr_data_chunk = MAX_DATA_CHUNKS;
dpriv->curr_data_offset = 0;
}
return index;
}
static inline void vectcpy(struct fvec *dest, void *src, size_t len)
{
unsigned char *d = (unsigned char *)dest->iov_base;
memcpy(d + dest->iov_len, src, len);
dest->iov_len += len;
}
static inline void vectmov(struct fvec *vec, size_t len)
{
if (vec->iov_len >= len) {
vec->iov_base = (unsigned char *)vec->iov_base + len;
vec->iov_dma += len;
vec->iov_len -= len;
}
}
static inline void vectshrink(struct fvec *vec, size_t len)
{
if (vec->iov_len >= len) {
vec->iov_len -= len;
}
}
static inline size_t align_bytes_num(size_t data_len, size_t align_len)
{
size_t rem = data_len % align_len;
if (rem == 0)
return 0;
else
return align_len - rem;
}
/** This helper function is used to position a pointer @e offset bytes from the end
* of a buffer. DMA handle is not updated intentionally as it is not needed during copying */
static inline unsigned char *vectrpos(struct fvec *vec, size_t offset)
{
return (unsigned char *)vec->iov_base + (vec->iov_len - offset);
}
/** Check if bus address will be properly aligned after moving vector forward by @e len bytes */
static inline int is_addr_aligned(const struct fvec *vec, size_t len)
{
int ret = 0;
if (vec->iov_len >= len) {
if (((vec->iov_dma + len) % ALIGNMENT_ADDR) == 0)
ret = 1;
} }
host = platform_get_drvdata(g_pdev); return ret;
port = host->ports[0]; }
hpriv = port->host->private_data; static size_t align_address(struct fvec *dest, struct fvec *src, size_t len)
dpriv = hpriv->plat_data; {
size_t cut_bytes = (src->iov_dma + len) % ALIGNMENT_ADDR;
size_t pad_len = ALIGNMENT_SIZE - cut_bytes;
if (pad_len <= len) {
len -= pad_len;
}
if (len != 0) {
len += cut_bytes;
}
/* set marker length */
app15[3] = pad_len - JPEG_MARKER_LEN;
printk(KERN_DEBUG ">>> copy %u bytes from APP15 to other buffer\n", pad_len);
vectcpy(dest, app15, pad_len);
/* prepare buffer and fill it with markers */ return len;
sgl = kmalloc(sizeof(struct scatterlist) * SG_TBL_SZ, GFP_KERNEL); }
if (!sgl) static void align_frame(struct device *dev, struct elphel_ahci_priv *dpriv)
return ENOMEM; {
sg_init_table(sgl, SG_TBL_SZ); int is_delayed = 0;
for_each_sg(sgl, sg_ptr, SG_TBL_SZ, n_elem) { unsigned char *src;
test_buff = kmalloc(TEST_BUFF_SZ, GFP_KERNEL); size_t len, total_sz, data_len;
if (!test_buff) size_t max_len = dpriv->fbuffs.common_buff.iov_len;
return ENOMEM; struct frame_buffers *fbuffs = &dpriv->fbuffs;
memset(test_buff, 0xa5, TEST_BUFF_SZ); struct fvec *chunks = dpriv->data_chunks;
sg_set_buf(sg_ptr, (void *)test_buff, TEST_BUFF_SZ); struct fvec *cbuff = &chunks[CHUNK_COMMON];
sg_elems++;
total_sz = get_size_from(chunks, 0, 0, INCLUDE_REM);
if (total_sz < PHY_BLOCK_SIZE) {
/* the frame length is less than sector size, delay this frame */
dev_dbg(dev, "frame size is less than sector size: %u bytes; delay recording\n", total_sz);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_LEADER].iov_base, chunks[CHUNK_LEADER].iov_len);
vectshrink(&chunks[CHUNK_LEADER], chunks[CHUNK_LEADER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_EXIF].iov_base, chunks[CHUNK_EXIF].iov_len);
vectshrink(&chunks[CHUNK_EXIF], chunks[CHUNK_EXIF].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_HEADER].iov_base, chunks[CHUNK_HEADER].iov_len);
vectshrink(&chunks[CHUNK_HEADER], chunks[CHUNK_HEADER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
return;
} }
printk(KERN_DEBUG ">>> mapped %d SG elemets\n", sg_elems); dma_sync_single_for_cpu(dev, fbuffs->common_buff.iov_dma, fbuffs->common_buff.iov_len, DMA_TO_DEVICE);
printk(KERN_DEBUG ">>>\n"); dma_sync_single_for_cpu(dev, chunks[CHUNK_DATA_0].iov_dma, chunks[CHUNK_DATA_0].iov_len, DMA_TO_DEVICE);
dma_sync_single_for_cpu(dev, chunks[CHUNK_DATA_1].iov_dma, chunks[CHUNK_DATA_1].iov_len, DMA_TO_DEVICE);
/* read test */ /* copy remainder of previous frame to the beginning of common buffer */
dma_map_sg(dev, sgl, sg_elems, DMA_FROM_DEVICE); if (likely(chunks[CHUNK_REM].iov_len != 0)) {
len = chunks[CHUNK_REM].iov_len;
printk(KERN_DEBUG ">>> copy %u bytes from REM to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_REM].iov_base, len);
vectshrink(&chunks[CHUNK_REM], chunks[CHUNK_REM].iov_len);
}
printk(KERN_DEBUG ">>> trying to read data to sg list\n"); /* copy JPEG marker */
elphel_read_dma(port, lba_addr, sg_elems, sgl, sg_elems); len = chunks[CHUNK_LEADER].iov_len;
printk(KERN_DEBUG ">>> command has been issued\n"); printk(KERN_DEBUG ">>> copy %u bytes from LEADER to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_LEADER].iov_base, len);
vectshrink(&chunks[CHUNK_LEADER], chunks[CHUNK_LEADER].iov_len);
/* copy Exif if present */
if (chunks[CHUNK_EXIF].iov_len != 0) {
len = chunks[CHUNK_EXIF].iov_len;
printk(KERN_DEBUG ">>> copy %u bytes from EXIF to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_EXIF].iov_base, len);
vectshrink(&chunks[CHUNK_EXIF], chunks[CHUNK_EXIF].iov_len);
}
while (dpriv->flags & IRQ_SIMPLE) { /* copy JPEG header */
printk_once(KERN_DEBUG ">>> waiting for interrupt\n"); len = chunks[CHUNK_HEADER].iov_len;
msleep(1); printk(KERN_DEBUG ">>> copy %u bytes from HEADER to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_HEADER].iov_base, len);
vectshrink(&chunks[CHUNK_HEADER], chunks[CHUNK_HEADER].iov_len);
/* check if we have enough data for further processing */
len = chunks[CHUNK_DATA_0].iov_len + chunks[CHUNK_DATA_1].iov_len + chunks[CHUNK_TRAILER].iov_len;
if (len < PHY_BLOCK_SIZE) {
data_len = cbuff->iov_len % PHY_BLOCK_SIZE;
dev_dbg(dev, "JPEG data size is too small: %u; unaligned data length: %u\n", len, data_len);
if (data_len + len <= fbuffs->rem_buff.iov_len) {
/* place data to REM buffer and continue */
src = vectrpos(cbuff, data_len);
printk(KERN_DEBUG ">>> 5.0.1 copy %u bytes from COMMON to REM buffer \n", len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(cbuff, data_len);
printk(KERN_DEBUG ">>> 5.0.1 copy %u bytes from DATA_0 to REM buffer \n", len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
printk(KERN_DEBUG ">>> 5.0.1 copy %u bytes from DATA_1 to REM buffer \n", len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
printk(KERN_DEBUG ">>> 5.0.1 copy %u bytes from TRAILER to REM buffer \n", len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
/* the REM buffer is too short, process some buffers to align COMMON buffer to sector boundary */
dev_dbg(dev, "REM buffer is too short, align COMMON to sector boundary\n");
data_len = PHY_BLOCK_SIZE - data_len;
if (data_len >= chunks[CHUNK_DATA_0].iov_len) {
/* copy whole DATA_0 to COMMON */
printk(KERN_DEBUG ">>> 5.0.2 copy %u bytes from DATA_0 to COMMON buffer \n", chunks[CHUNK_DATA_0].iov_len);
vectcpy(cbuff, chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
data_len -= chunks[CHUNK_DATA_0].iov_len;
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
} else {
/* copy a part of DATA_0 to COMMON */
printk(KERN_DEBUG ">>> 5.0.2 copy %u bytes from DATA_0 to COMMON buffer \n", data_len);
src = vectrpos(&chunks[CHUNK_DATA_0], data_len);
vectcpy(cbuff, src, data_len);
vectshrink(&chunks[CHUNK_DATA_0], data_len);
data_len = 0;
}
if (data_len != 0) {
if (data_len >= chunks[CHUNK_DATA_1].iov_len) {
/* copy whole DATA_1 to COMMON */
printk(KERN_DEBUG ">>> 5.0.2 copy %u bytes from DATA_1 to COMMON buffer \n", chunks[CHUNK_DATA_1].iov_len);
vectcpy(cbuff, chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
data_len -= chunks[CHUNK_DATA_1].iov_len;
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
} else {
/* copy a part of DATA_1 to COMMON */
printk(KERN_DEBUG ">>> 5.0.2 copy %u bytes from DATA_1 to COMMON buffer \n", data_len);
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(cbuff, src, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
data_len = 0;
}
if (data_len != 0) {
/* copy a part of TRAILER to COMMON */
printk(KERN_DEBUG ">>> 5.0.2 copy %u bytes from TRAILER to COMMON buffer \n", data_len);
src = vectrpos(&chunks[CHUNK_TRAILER], data_len);
vectcpy(cbuff, src, data_len);
vectshrink(&chunks[CHUNK_TRAILER], data_len);
data_len = 0;
}
}
/* COMMON buffer is aligned to sector boundary, copy all other data to REM buffer */
if (chunks[CHUNK_DATA_0].iov_len != 0) {
printk(KERN_DEBUG ">>> 5.0.3 copy %u bytes from DATA_0 to REM buffer \n", chunks[CHUNK_DATA_0].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
}
if (chunks[CHUNK_DATA_1].iov_len != 0) {
printk(KERN_DEBUG ">>> 5.0.3 copy %u bytes from DATA_1 to REM buffer \n", chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
}
if (chunks[CHUNK_TRAILER].iov_len != 0) {
printk(KERN_DEBUG ">>> 5.0.3 copy %u bytes from TRAILER to REM buffer \n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
}
return;
}
/* there is enough data in data buffers, align common buffer */
len = align_bytes_num(cbuff->iov_len, ALIGNMENT_SIZE);
if (likely(len != 0)) {
dev_dbg(dev, "align COMMON buffer, add %u bytes from other buffers\n", len);
if ((len + ALIGNMENT_ADDR) <= chunks[CHUNK_DATA_0].iov_len) {
printk(KERN_DEBUG ">>> check alignment of DMA address: 0x%x\n", chunks[CHUNK_DATA_0].iov_dma);
if (is_addr_aligned(&chunks[CHUNK_DATA_0], len) == 0) {
/* we are going to copy =len= bytes from DATA_0 buffer to align COMMON buffer to
* ALIGNMNET_SIZE boundary which will make the starting address of DATA_0 buffer
* unaligned to ALIGNMENT_ADDR boundary, fix this by moving additional bytes from
* the beginning of DATA_0 buffer and padding COMMON buffer with APP15 marker */
len = align_address(cbuff, &chunks[CHUNK_DATA_0], len);
}
printk(KERN_DEBUG ">>> 5.1.1 copy %u bytes from DATA_0 to common buffer \n", len);
vectcpy(cbuff, chunks[CHUNK_DATA_0].iov_base, len);
vectmov(&chunks[CHUNK_DATA_0], len);
} else {
/* handle special case: JPEG data rolls over circular buffer boundary and the
* data chunk in the end of the buffer is less than we need for alignment; copy
* all data from first chunk, discard it, copy remainder from second chunk
* and from trailer if needed
*/
if (len <= (chunks[CHUNK_DATA_0].iov_len + chunks[CHUNK_DATA_1].iov_len + chunks[CHUNK_TRAILER].iov_len + ALIGNMENT_ADDR)) {
printk(KERN_DEBUG ">>> 5.2 copy %u bytes from DATA_0 to common buffer\n", chunks[CHUNK_DATA_0].iov_len);
vectcpy(cbuff, chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
len -= chunks[CHUNK_DATA_0].iov_len;
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
if (chunks[CHUNK_DATA_1].iov_len >= len) {
printk(KERN_DEBUG ">>> check alignment of DMA address: 0x%x\n", chunks[CHUNK_DATA_1].iov_dma);
if (is_addr_aligned(&chunks[CHUNK_DATA_1], len) == 0) {
len = align_address(cbuff, &chunks[CHUNK_DATA_1], len);
}
printk(KERN_DEBUG ">>> 5.2.4 copy %u bytes from DATA_1 to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_DATA_1].iov_base, len);
vectmov(&chunks[CHUNK_DATA_1], len);
len = 0;
} else {
printk(KERN_DEBUG ">>> 5.2.5 copy %u bytes from DATA_1 to common buffer\n", chunks[CHUNK_DATA_1].iov_len);
vectcpy(cbuff, chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
len -= chunks[CHUNK_DATA_1].iov_len;
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
}
if (chunks[CHUNK_TRAILER].iov_len >= len) {
printk(KERN_DEBUG ">>> 5.2.6 copy %u bytes from TRAILER to common buffer\n", len);
vectcpy(cbuff, chunks[CHUNK_TRAILER].iov_base, len);
vectmov(&chunks[CHUNK_TRAILER], len);
len = 0;
} else {
printk(KERN_DEBUG ">>> 5.2.7 copy %u bytes from TRAILER to common buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(cbuff, chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
len -= chunks[CHUNK_TRAILER].iov_len;
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
} else {
/* JPEG data is too short, place all to remainder buffer */
data_len = cbuff->iov_len % PHY_BLOCK_SIZE;
src = vectrpos(cbuff, data_len);
printk(KERN_DEBUG ">>> 5.2.1 copy %u bytes from COMMON to REM buffer\n", data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(cbuff, data_len);
printk(KERN_DEBUG ">>> 5.2.1 copy %u bytes from DATA_0 to REM buffer\n", chunks[CHUNK_DATA_0].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
printk(KERN_DEBUG ">>> 5.2.1 copy %u bytes from DATA_1 to REM buffer\n", chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
printk(KERN_DEBUG ">>> 5.2.1 copy %u bytes from TRAILER to REM buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
}
}
/* JPEG header and Exif data is aligned to ALIGNMENT_SIZE boundary in a common buffer now,
* the entire frame should be aligned to physical sector boundary and remainder should be
* copied to a buffer for recording during next frame */
total_sz = get_size_from(chunks, 0, 0, INCLUDE_REM);
len = total_sz % PHY_BLOCK_SIZE;
chunks[CHUNK_ALIGN_0].iov_base = (unsigned char *)cbuff->iov_base + cbuff->iov_len;
chunks[CHUNK_ALIGN_0].iov_len = 0;
dev_dbg(dev, "total frame size: %u, unaligned bytes to sector boundary: %u\n", total_sz, len);
if (likely(chunks[CHUNK_DATA_1].iov_len == 0)) {
/* JPEG data is not split, align just one buffer to physical sector boundary if it is not already aligned */
if (chunks[CHUNK_DATA_0].iov_len + chunks[CHUNK_TRAILER].iov_len >= len) {
if (likely(len >= chunks[CHUNK_TRAILER].iov_len)) {
data_len = len - chunks[CHUNK_TRAILER].iov_len;
src = vectrpos(&chunks[CHUNK_DATA_0], data_len);
printk(KERN_DEBUG ">>> 6.1 copy %u bytes from DATA_0 to remainder buffer\n", data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(&chunks[CHUNK_DATA_0], data_len);
printk(KERN_DEBUG ">>> 6.1 copy %u bytes from TRAILER to remainder buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
/* align DATA_0 buffer, break trailing marker and place first part to ALIGN_0 buffer and second part to REM */
data_len = chunks[CHUNK_DATA_0].iov_len % ALIGNMENT_SIZE;
printk(KERN_DEBUG ">>> 6.1.2 copy %u bytes from DATA_0 to ALIGN_0 buffer\n", data_len);
src = vectrpos(&chunks[CHUNK_DATA_0], data_len);
vectcpy(&chunks[CHUNK_ALIGN_0], src, data_len);
vectshrink(&chunks[CHUNK_DATA_0], data_len);
if (len == 0)
len = chunks[CHUNK_TRAILER].iov_len;
printk(KERN_DEBUG ">>> 6.1.2 copy %u bytes from TRAILER to ALIGN_0 buffer\n", len);
vectcpy(&chunks[CHUNK_ALIGN_0], chunks[CHUNK_TRAILER].iov_base, len);
vectmov(&chunks[CHUNK_TRAILER], len);
printk(KERN_DEBUG ">>> 6.1.2 copy %u bytes from TRAILER to remainder buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
} else {
/* the number of bytes needed to align to sector boundary is greater than the number of bytes left
* in DATA_0 and TRAILER buffers, realign COMMON buffer and delay DATA_0 and TRAILING buffers recording */
data_len = cbuff->iov_len % PHY_BLOCK_SIZE;
printk(KERN_DEBUG ">>> 6.1.3 copy %u bytes from COMMON to remainder buffer\n", data_len);
vectcpy(&chunks[CHUNK_REM], cbuff->iov_base, data_len);
vectshrink(cbuff, data_len);
printk(KERN_DEBUG ">>> 6.1.3 copy %u bytes from DATA_0 to remainder buffer\n", chunks[CHUNK_DATA_0].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
printk(KERN_DEBUG ">>> 6.1.3 copy %u bytes from TRAILER to remainder buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
} else {
printk(KERN_DEBUG ">>> 6.2 <<<\n");
/* JPEG data rolls over circular buffer boundary */
struct kvec data_rem;
data_len = chunks[CHUNK_DATA_0].iov_len % ALIGNMENT_SIZE;
size_t align_len = ALIGNMENT_SIZE - data_len;
chunks[CHUNK_ALIGN_1].iov_base = (unsigned char *)chunks[CHUNK_ALIGN_0].iov_base + chunks[CHUNK_ALIGN_0].iov_len;
chunks[CHUNK_ALIGN_1].iov_len = 0;
if (chunks[CHUNK_DATA_1].iov_len >= len + align_len) {
/* DATA_1 buffer contains enough bytes to align frame to sector boundary,
* align DATA_0 buffer to ALIGNMENT_SIZE boundary only */
if (data_len != 0) {
printk(KERN_DEBUG ">>> 6.2 copy %u bytes from DATA_0 to ALIGN_0 buffer\n", data_len);
src = vectrpos(&chunks[CHUNK_DATA_0], data_len);
vectcpy(&chunks[CHUNK_ALIGN_0], src, data_len);
vectshrink(&chunks[CHUNK_DATA_0], data_len);
printk(KERN_DEBUG ">>> check alignment of DMA address: 0x%x\n", chunks[CHUNK_DATA_1].iov_dma);
if (is_addr_aligned(&chunks[CHUNK_DATA_1], align_len) == 0) {
align_len = align_address(&chunks[CHUNK_ALIGN_0], &chunks[CHUNK_DATA_1], align_len);
}
printk(KERN_DEBUG ">>> 6.2 copy %u bytes from DATA_1 to ALIGN_0 buffer\n", align_len);
vectcpy(&chunks[CHUNK_ALIGN_0], chunks[CHUNK_DATA_1].iov_base, align_len);
vectmov(&chunks[CHUNK_DATA_1], align_len);
/* adjust ALIGN_1 position */
chunks[CHUNK_ALIGN_1].iov_base = (unsigned char *)chunks[CHUNK_ALIGN_0].iov_base + chunks[CHUNK_ALIGN_0].iov_len;
chunks[CHUNK_ALIGN_1].iov_len = 0;
}
} else {
/* there is no enough data in the second JPEG data buffer, delay its recording and
* align first buffer to physical sector boundary */
data_len = (cbuff->iov_len + chunks[CHUNK_DATA_0].iov_len) % PHY_BLOCK_SIZE;
printk(KERN_DEBUG ">>> 6.2.2 align to data_len: %u\n", data_len);
if (data_len <= chunks[CHUNK_DATA_0].iov_len) {
src = vectrpos(&chunks[CHUNK_DATA_0], data_len);
printk(KERN_DEBUG ">>> 6.2.2 copy %u bytes from DATA_0 to REM buffer\n", data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(&chunks[CHUNK_DATA_0], data_len);
} else {
data_len -= chunks[CHUNK_DATA_0].iov_len;
src = vectrpos(cbuff, data_len);
printk(KERN_DEBUG ">>> 6.2.2 copy %u bytes from COMMON to REM buffer\n", data_len);
vectcpy(&chunks[CHUNK_REM], cbuff->iov_base, data_len);
vectshrink(cbuff, data_len);
printk(KERN_DEBUG ">>> 6.2.2 copy %u bytes from DATA_0 to REM buffer\n", chunks[CHUNK_DATA_0].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len);
vectshrink(&chunks[CHUNK_DATA_0], chunks[CHUNK_DATA_0].iov_len);
/* COMMON buffer has been updated, adjust ALIGN_0 and ALIGN_1 position */
chunks[CHUNK_ALIGN_1].iov_base = (unsigned char *)chunks[CHUNK_ALIGN_0].iov_base + chunks[CHUNK_ALIGN_0].iov_len;
chunks[CHUNK_ALIGN_1].iov_len = 0;
chunks[CHUNK_ALIGN_1].iov_base = (unsigned char *)chunks[CHUNK_ALIGN_0].iov_base + chunks[CHUNK_ALIGN_0].iov_len;
chunks[CHUNK_ALIGN_1].iov_len = 0;
}
printk(KERN_DEBUG ">>> 6.2.2 copy %u bytes from DATA_1 to REM buffer\n", chunks[CHUNK_DATA_1].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len);
vectshrink(&chunks[CHUNK_DATA_1], chunks[CHUNK_DATA_1].iov_len);
printk(KERN_DEBUG ">>> 6.2.2 copy %u bytes from TRAILER to REM buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
printk(KERN_DEBUG ">>> 6.3 <<<\n");
/* the total length could have changed by the moment, recalculate */
total_sz = get_size_from(chunks, 0, 0, INCLUDE_REM);
len = total_sz % PHY_BLOCK_SIZE;
dev_dbg(dev, "total frame size: %u, unaligned bytes to sector boundary: %u\n", total_sz, len);
if (chunks[CHUNK_DATA_1].iov_len + chunks[CHUNK_TRAILER].iov_len > len) {
if (len >= chunks[CHUNK_TRAILER].iov_len) {
/* DATA_1 and TRAILER buffers contain enough data to align the current frame to sector boundary */
data_len = len - chunks[CHUNK_TRAILER].iov_len;
printk(KERN_DEBUG ">>> 6.3.1 copy %u bytes from DATA_1 to REM buffer\n", data_len);
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_REM], src, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
printk(KERN_DEBUG ">>> 6.3.1 copy %u bytes from TRAILER to REM buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else if (len != 0) {
/* break trailing marker and copy the remaining data to REM buffer, align DATA_1 and TRAILER parts in
* ALIGN_1 buffer */
data_len = chunks[CHUNK_DATA_1].iov_len % ALIGNMENT_SIZE;
printk(KERN_DEBUG ">>> 6.3.2 copy %u bytes from DATA_1 to ALIGN_1 buffer\n", data_len);
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_ALIGN_1], chunks[CHUNK_DATA_1].iov_base, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
printk(KERN_DEBUG ">>> 6.3.2 copy %u bytes from TRAILER to ALIGN_1 buffer\n", len);
vectcpy(&chunks[CHUNK_ALIGN_1], chunks[CHUNK_TRAILER].iov_base, len);
vectmov(&chunks[CHUNK_TRAILER], len);
printk(KERN_DEBUG ">>> 6.3.2 copy %u bytes from TRAILER to REM buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
/* the frame is aligned to sector boundary, but DATA_1 and TRAILER buffers may be
* unaligned to ALIGNMENT_SIZE boundary */
data_len = chunks[CHUNK_DATA_1].iov_len % ALIGNMENT_SIZE;
if (data_len >= chunks[CHUNK_TRAILER].iov_len) {
printk(KERN_DEBUG ">>> 6.3.3 copy %u bytes from DATA_1 to ALIGN_1 buffer\n", data_len);
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_ALIGN_1], src, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
printk(KERN_DEBUG ">>> 6.3.3 copy %u bytes from TRAILER to ALIGN_1 buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_ALIGN_1], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
} else {
/* break trailing marker and copy the remaining data to REM buffer */
data_len = chunks[CHUNK_DATA_1].iov_len % ALIGNMENT_SIZE - len;
printk(KERN_DEBUG ">>> 6.3.4 copy %u bytes from DATA_1 to ALIGN_1 buffer\n", data_len);
src = vectrpos(&chunks[CHUNK_DATA_1], data_len);
vectcpy(&chunks[CHUNK_ALIGN_1], chunks[CHUNK_DATA_1].iov_base, data_len);
vectshrink(&chunks[CHUNK_DATA_1], data_len);
printk(KERN_DEBUG ">>> 6.3.4 copy %u bytes from TRAILER to ALIGN_1 buffer\n", len);
vectcpy(&chunks[CHUNK_ALIGN_1], chunks[CHUNK_TRAILER].iov_base, len);
vectmov(&chunks[CHUNK_TRAILER], len);
printk(KERN_DEBUG ">>> 6.3.4 copy %u bytes from TRAILER to REM buffer\n", chunks[CHUNK_TRAILER].iov_len);
vectcpy(&chunks[CHUNK_REM], chunks[CHUNK_TRAILER].iov_base, chunks[CHUNK_TRAILER].iov_len);
vectshrink(&chunks[CHUNK_TRAILER], chunks[CHUNK_TRAILER].iov_len);
}
}
}
} }
printk(KERN_DEBUG ">>> dump test buffer after reading: %d bytes\n", TEST_BUFF_SZ); /* debug sanity check, should not happen */
dma_unmap_sg(dev, sgl, sg_elems, DMA_FROM_DEVICE); if (cbuff->iov_len >= max_len) {
for (i = 0; i < sg_elems; i++) { dev_err(NULL, "ERROR: the number of bytes copied to common buffer exceeds its size\n");
dev_dbg(dev, ">>> sector %i\n", i);
u8 buff[TEST_BUFF_SZ];
sg_copy_to_buffer(&sgl[i], 1, buff, TEST_BUFF_SZ);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buff, TEST_BUFF_SZ);
} }
/* end of read test */ }
/** TEST FUNCTION: stuff frame data to align the frame to disk block boundary */
//static void stuff_frame(struct fvec *vects)
//{
// int i;
// size_t total = 0;
// size_t stuffing = 0;
//
// for (i = 0; i < MAX_DATA_CHUNKS; i++) {
// total += vects[i].iov_len;
// }
//
// stuffing = PHY_BLOCK_SIZE - total % PHY_BLOCK_SIZE;
//
// printk(KERN_DEBUG "%s: total = %u, stuffing = %u\n", __func__, total, stuffing);
// if (stuffing == PHY_BLOCK_SIZE)
// return;
//
// if (stuffing < 3) {
// // the number of stuffing bytes is less then marker plus one byte, add one more sector
// stuffing += PHY_BLOCK_SIZE;
// }
// vects[CHUNK_STUFFING].iov_len = stuffing;
//}
static void dump_sg_list(const struct fvec *sgl, size_t elems)
{
int i;
printk(KERN_DEBUG "dump S/G list, %u elements:\n", elems);
for (i = 0; i < elems; i++) {
printk(KERN_DEBUG "dma address: 0x%x, len: %u\n", sgl[i].iov_dma, sgl[i].iov_len);
}
printk(KERN_DEBUG "===== end of S/G list =====\n");
}
/** Calculate the number of blocks this frame will occupy. The frame must be aligned to block size */
static inline size_t get_blocks_num(struct fvec *sgl, size_t n_elem)
{
int num;
size_t total = 0;
for (num = 0; num < n_elem; num++) {
total += sgl[num].iov_len;
}
return total / PHY_BLOCK_SIZE;
}
/** Calculate the size of current frame in bytes starting from vector and offset given */
static inline size_t get_size_from(const struct fvec *vects, int index, size_t offset, int all)
{
int i;
size_t total = 0;
if (index >= MAX_DATA_CHUNKS || offset > vects[index].iov_len) {
dev_dbg(NULL, "nothing to process, index or offset is out of vector range: vector %d, offset %u\n", index, offset);
return 0;
}
for (i = index; i < MAX_DATA_CHUNKS; i++) {
if (i == CHUNK_REM && all == EXCLUDE_REM)
/* remainder should not be processed */
continue;
if (i == index)
total += vects[i].iov_len - offset;
else
total += vects[i].iov_len;
}
return total;
}
/** Set vectors pointing to data buffers except for JPEG data - those are set in circbuf driver */
static void init_vectors(struct elphel_ahci_priv *dpriv)
{
struct frame_buffers *buffs = &dpriv->fbuffs;
struct fvec *chunks = dpriv->data_chunks;
chunks[CHUNK_EXIF].iov_base = buffs->exif_buff.iov_base;
chunks[CHUNK_EXIF].iov_len = 0;
chunks[CHUNK_LEADER].iov_base = buffs->jpheader_buff.iov_base;
chunks[CHUNK_LEADER].iov_len = JPEG_MARKER_LEN;
chunks[CHUNK_HEADER].iov_base = (unsigned char *)chunks[CHUNK_LEADER].iov_base + chunks[CHUNK_LEADER].iov_len;
chunks[CHUNK_HEADER].iov_len = 0;
chunks[CHUNK_TRAILER].iov_base = buffs->trailer_buff.iov_base;
chunks[CHUNK_TRAILER].iov_len = JPEG_MARKER_LEN;
chunks[CHUNK_REM].iov_base = buffs->rem_buff.iov_base;
chunks[CHUNK_REM].iov_len = 0;
/* this is the only DMA mapped buffer and its DMA address should be set */
chunks[CHUNK_COMMON].iov_base = buffs->common_buff.iov_base;
chunks[CHUNK_COMMON].iov_dma = buffs->common_buff.iov_dma;
chunks[CHUNK_COMMON].iov_len = 0;
}
static int init_buffers(struct device *dev, struct frame_buffers *buffs)
{
int mult;
int total_sz;
unsigned char *ptr;
buffs->exif_buff.iov_base = kmalloc(MAX_EXIF_SIZE, GFP_KERNEL);
if (!buffs->exif_buff.iov_base)
return -ENOMEM;
buffs->exif_buff.iov_len = MAX_EXIF_SIZE;
buffs->jpheader_buff.iov_base = kmalloc(JPEG_HEADER_MAXSIZE, GFP_KERNEL);
if (!buffs->jpheader_buff.iov_base)
goto err_header;
buffs->jpheader_buff.iov_len = JPEG_HEADER_MAXSIZE;
buffs->trailer_buff.iov_base = kmalloc(JPEG_MARKER_LEN, GFP_KERNEL);
if (!buffs->trailer_buff.iov_base)
goto err_trailer;
buffs->trailer_buff.iov_len = JPEG_MARKER_LEN;
ptr = buffs->trailer_buff.iov_base;
ptr[0] = 0xff;
ptr[1] = 0xd9;
/* 3 * ALIGMENT_SIZE here means 2 buffers for JPEG data alignment plus one buffer for
* DATA_0 address alignment - this one is padded with APP15 marker */
total_sz = MAX_EXIF_SIZE + JPEG_HEADER_MAXSIZE + 4 * ALIGNMENT_SIZE + PHY_BLOCK_SIZE;
if (total_sz > PAGE_SIZE) {
mult = total_sz / PAGE_SIZE + 1;
total_sz = mult * PAGE_SIZE;
} else {
total_sz = PAGE_SIZE;
}
buffs->common_buff.iov_base = kmalloc(total_sz, GFP_KERNEL);
if (!buffs->common_buff.iov_base)
goto err_common;
buffs->common_buff.iov_len = total_sz;
/* this is the only buffer which needs DMA mapping as all other data will be collected in it */
buffs->common_buff.iov_dma = dma_map_single(dev, buffs->common_buff.iov_base, buffs->common_buff.iov_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, buffs->common_buff.iov_dma))
goto err_common_dma;
buffs->rem_buff.iov_base = kmalloc(PHY_BLOCK_SIZE, GFP_KERNEL);
if (!buffs->rem_buff.iov_base)
goto err_remainder;
buffs->rem_buff.iov_len = PHY_BLOCK_SIZE;
/* debug code follows */
g_jpg_data_0 = kzalloc(DATA_BUFF_SIZE, GFP_KERNEL);
g_jpg_data_1 = kzalloc(DATA_BUFF_SIZE, GFP_KERNEL);
if (!g_jpg_data_0 || !g_jpg_data_1)
return -ENOMEM;
/* end of debug code */
return 0;
err_remainder:
dma_unmap_single(dev, buffs->common_buff.iov_dma, buffs->common_buff.iov_len, DMA_TO_DEVICE);
err_common_dma:
kfree(buffs->common_buff.iov_base);
err_common:
kfree(buffs->trailer_buff.iov_base);
err_trailer:
kfree(buffs->jpheader_buff.iov_base);
err_header:
kfree(buffs->exif_buff.iov_base);
return -ENOMEM;
}
static void deinit_buffers(struct device *dev, struct frame_buffers *buffs)
{
kfree(buffs->jpheader_buff.iov_base);
kfree(buffs->exif_buff.iov_base);
kfree(buffs->trailer_buff.iov_base);
dma_unmap_single(dev, buffs->common_buff.iov_dma, buffs->common_buff.iov_len, DMA_TO_DEVICE);
kfree(buffs->common_buff.iov_base);
kfree(buffs->rem_buff.iov_base);
}
static inline void reset_chunks(struct fvec *vects, int all)
{
int i;
for (i = 0; i < MAX_DATA_CHUNKS; i++) {
if (i != CHUNK_REM)
vects[i].iov_len = 0;
}
if (all) {
vects[CHUNK_REM].iov_len = 0;
}
}
static inline struct elphel_ahci_priv *dev_get_dpriv(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
return dpriv;
}
//static void start_cmd(struct device *dev, struct elphel_ahci_priv *dpriv, struct ata_port *port)
//{
// int num;
// size_t max_sz = (MAX_LBA_COUNT + 1) * PHY_BLOCK_SIZE;
// size_t total_sz = get_total_size(dpriv->data_chunks);
//
// if ((dpriv->lba_ptr.lba_write & ~ADDR_MASK_28_BIT) || total_sz > max_sz) {
//// if (dpriv->lba_ptr.lba_write & ~ADDR_MASK_28_BIT) {
// dpriv->curr_cmd = ATA_CMD_WRITE_EXT;
// dpriv->max_data_sz = (MAX_LBA_COUNT_EXT + 1) * PHY_BLOCK_SIZE;
// } else {
// dpriv->curr_cmd = ATA_CMD_WRITE;
// dpriv->max_data_sz = (MAX_LBA_COUNT + 1) * PHY_BLOCK_SIZE;
// }
// dpriv->flags |= PROC_CMD;
// dpriv->sg_elems = map_vectors(dpriv);
//
// num = dma_map_sg(dev, dpriv->sgl, dpriv->sg_elems, DMA_TO_DEVICE);
// printk(KERN_DEBUG ">>> %d entries dma mapped\n", num);
// dump_sg_list(dpriv->sgl, dpriv->sg_elems);
//
// dpriv->lba_ptr.wr_count = get_blocks_num(dpriv->sgl, dpriv->sg_elems);
// printk(KERN_DEBUG ">>> trying to write data from sg list %u blocks, LBA: %llu\n", dpriv->lba_ptr.wr_count, dpriv->lba_ptr.lba_write);
// elphel_cmd_issue(port, dpriv->lba_ptr.lba_write, dpriv->lba_ptr.wr_count, dpriv->sgl, dpriv->sg_elems, dpriv->curr_cmd);
//}
/** Process command and return the number of S/G entries mapped */
static int process_cmd(struct device *dev, struct elphel_ahci_priv *dpriv, struct ata_port *port)
{
int num;
struct fvec *cbuff = &dpriv->data_chunks[CHUNK_COMMON];
size_t max_sz = (MAX_LBA_COUNT + 1) * PHY_BLOCK_SIZE;
size_t rem_sz = get_size_from(dpriv->data_chunks, dpriv->curr_data_chunk, dpriv->curr_data_offset, EXCLUDE_REM);
/* define ATA command to use for current transaction */
if ((dpriv->lba_ptr.lba_write & ~ADDR_MASK_28_BIT) || rem_sz > max_sz) {
dpriv->curr_cmd = ATA_CMD_WRITE_EXT;
dpriv->max_data_sz = (MAX_LBA_COUNT_EXT + 1) * PHY_BLOCK_SIZE;
} else {
dpriv->curr_cmd = ATA_CMD_WRITE;
dpriv->max_data_sz = (MAX_LBA_COUNT + 1) * PHY_BLOCK_SIZE;
}
if (dpriv->flags & PROC_CMD)
dpriv->lba_ptr.lba_write += dpriv->lba_ptr.wr_count;
dpriv->flags |= PROC_CMD;
dpriv->sg_elems = map_vectors(dpriv);
if (dpriv->sg_elems != 0) {
dump_sg_list(dpriv->sgl, dpriv->sg_elems);
dpriv->lba_ptr.wr_count = get_blocks_num(dpriv->sgl, dpriv->sg_elems);
printk(KERN_DEBUG ">>> trying to write data from sg list %u blocks, LBA: %llu\n", dpriv->lba_ptr.wr_count, dpriv->lba_ptr.lba_write);
dma_sync_single_for_device(dev, cbuff->iov_dma, cbuff->iov_len, DMA_TO_DEVICE);
if (dpriv->data_chunks[CHUNK_DATA_0].iov_len != 0)
dma_sync_single_for_device(dev, dpriv->data_chunks[CHUNK_DATA_0].iov_dma, dpriv->data_chunks[CHUNK_DATA_0].iov_len, DMA_TO_DEVICE);
if (dpriv->data_chunks[CHUNK_DATA_1].iov_len != 0)
dma_sync_single_for_device(dev, dpriv->data_chunks[CHUNK_DATA_1].iov_dma, dpriv->data_chunks[CHUNK_DATA_1].iov_len, DMA_TO_DEVICE);
elphel_cmd_issue(port, dpriv->lba_ptr.lba_write, dpriv->lba_ptr.wr_count, dpriv->sgl, dpriv->sg_elems, dpriv->curr_cmd);
}
return dpriv->sg_elems;
}
static void finish_cmd(struct device *dev, struct elphel_ahci_priv *dpriv)
{
int all;
dpriv->lba_ptr.wr_count = 0;
if ((dpriv->flags & LAST_BLOCK) == 0) {
all = 0;
} else {
all = 1;
dpriv->flags &= ~LAST_BLOCK;
}
reset_chunks(dpriv->data_chunks, all);
dpriv->flags &= ~PROC_CMD;
dpriv->curr_cmd = 0;
dpriv->max_data_sz = 0;
dpriv->curr_data_chunk = 0;
dpriv->curr_data_offset = 0;
}
printk(KERN_DEBUG ">>> *** proceeding to write test *** <<<\n"); /** Fill free space in REM buffer with 0 and save the reaming data chunk */
static void finish_rec(struct device *dev, struct elphel_ahci_priv *dpriv, struct ata_port *port)
{
size_t stuff_len;
struct fvec *src;
struct fvec *cbuff = &dpriv->data_chunks[CHUNK_COMMON];
struct fvec *rbuff = &dpriv->data_chunks[CHUNK_REM];
if (rbuff->iov_len == 0)
return;
dev_dbg(dev, "write last chunk of data, size: %u\n", rbuff->iov_len);
stuff_len = PHY_BLOCK_SIZE - rbuff->iov_len;
src = vectrpos(rbuff, stuff_len);
memset(src->iov_base, 0, stuff_len);
vectcpy(cbuff, rbuff->iov_base, rbuff->iov_len);
vectshrink(rbuff, rbuff->iov_len);
dpriv->flags |= LAST_BLOCK;
process_cmd(dev, dpriv, port);
}
/* write test */ static ssize_t rawdev_write(struct device *dev, ///<
for_each_sg(sgl, sg_ptr, SG_TBL_SZ, n_elem) { struct device_attribute *attr, ///<
u8 pattern_buff[TEST_BUFF_SZ]; const char *buff, ///<
memset(pattern_buff, 0x5a, TEST_BUFF_SZ); size_t buff_sz) ///<
sg_copy_from_buffer(sg_ptr, 1, pattern_buff, TEST_BUFF_SZ); {
int i, n_elem;
int sg_elems = 0;
struct ata_host *host = dev_get_drvdata(dev);
struct ata_port *port = host->ports[DEFAULT_PORT_NUM];
struct elphel_ahci_priv *dpriv = dev_get_dpriv(dev);
struct scatterlist *sgl;
struct scatterlist *sg_ptr;
// u8 *test_buff = pElphel_buf->d2h_vaddr;
u8 *test_buff;
uint8_t *buffers[SG_TBL_SZ] = {0};
uint64_t lba_addr;
struct frame_data fdata;
size_t rcvd = 0;
struct frame_buffers *buffs = &dpriv->fbuffs;
struct fvec *chunks = dpriv->data_chunks;
size_t blocks_num;
static int dont_process = 0;
if ((dpriv->flags & PROC_CMD) || dont_process)
// we are not ready yet
return -EAGAIN;
if (buff_sz != sizeof(struct frame_data)) {
dev_err(dev, "the size of the data buffer is incorrect, should be equal to sizeof(struct data_page)\n");
return -EINVAL;
}
memcpy(&fdata, buff, sizeof(struct frame_data));
/* debug code follows */
printk(KERN_DEBUG ">>> data pointers received:\n");
printk(KERN_DEBUG ">>> sensor port: %u\n", fdata.sensor_port);
printk(KERN_DEBUG ">>> cirbuf ptr: %d, cirbuf data len: %d\n", fdata.cirbuf_ptr, fdata.jpeg_len);
printk(KERN_DEBUG ">>> meta_index: %d\n", fdata.meta_index);
printk(KERN_DEBUG "\n");
// rcvd = exif_get_data(fdata.sensor_port, fdata.meta_index, buffs->exif_buff.iov_base, buffs->exif_buff.iov_len);
rcvd = exif_get_data_tst(fdata.sensor_port, fdata.meta_index, buffs->exif_buff.iov_base, buffs->exif_buff.iov_len, 1);
printk(KERN_DEBUG ">>> bytes received from exif driver: %u\n", rcvd);
if (rcvd > 0 && rcvd < buffs->exif_buff.iov_len)
// print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buffs->exif_buff.iov_base, rcvd);
printk(KERN_DEBUG ">>>\tskipped");
chunks[CHUNK_EXIF].iov_len = rcvd;
// rcvd = jpeghead_get_data(fdata.sensor_port, buffs->jpheader_buff.iov_base, buffs->jpheader_buff.iov_len, 0);
rcvd = jpeghead_get_data_tst(fdata.sensor_port, buffs->jpheader_buff.iov_base, buffs->jpheader_buff.iov_len, 0);
printk(KERN_DEBUG ">>> bytes received from jpeghead driver: %u\n", rcvd);
if (rcvd > 0 && rcvd < buffs->jpheader_buff.iov_len) {
// print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buffs->jpheader_buff.iov_base, rcvd);
chunks[CHUNK_LEADER].iov_len = JPEG_MARKER_LEN;
chunks[CHUNK_TRAILER].iov_len = JPEG_MARKER_LEN;
chunks[CHUNK_HEADER].iov_len = rcvd - chunks[CHUNK_LEADER].iov_len;
} else {
// we don't want these buffers for test purposes
chunks[CHUNK_LEADER].iov_len = 0;
chunks[CHUNK_TRAILER].iov_len = 0;
chunks[CHUNK_HEADER].iov_len = 0;
} }
dma_map_sg(dev, sgl, sg_elems, DMA_TO_DEVICE);
printk(KERN_DEBUG ">>> trying to write data from sg list\n"); rcvd = 0;
elphel_write_dma(port, lba_addr, sg_elems, sgl, sg_elems); // rcvd = circbuf_get_ptr(fdata.sensor_port, fdata.cirbuf_ptr, fdata.jpeg_len, &chunks[CHUNK_DATA_0], &chunks[CHUNK_DATA_1]);
printk(KERN_DEBUG ">>> command has been issued\n"); rcvd = circbuf_get_ptr_tst(fdata.sensor_port, fdata.cirbuf_ptr, fdata.jpeg_len, &chunks[CHUNK_DATA_0], &chunks[CHUNK_DATA_1]);
if (rcvd > 0) {
printk(KERN_DEBUG ">>> number of jpeg data pointers: %d\n", rcvd);
printk(KERN_DEBUG ">>> bytes received from circbuf driver, chunk 0: %u\n", chunks[CHUNK_DATA_0].iov_len);
if (rcvd == 2)
printk(KERN_DEBUG ">>> bytes received from circbuf driver, chunk 1: %u\n", chunks[CHUNK_DATA_1].iov_len);
}
if (chunks[CHUNK_DATA_0].iov_len != 0)
chunks[CHUNK_DATA_0].iov_dma = dma_map_single(dev, chunks[CHUNK_DATA_0].iov_base, chunks[CHUNK_DATA_0].iov_len, DMA_TO_DEVICE);
if (chunks[CHUNK_DATA_1].iov_len != 0)
chunks[CHUNK_DATA_1].iov_dma = dma_map_single(dev, chunks[CHUNK_DATA_1].iov_base, chunks[CHUNK_DATA_1].iov_len, DMA_TO_DEVICE);
printk(KERN_DEBUG ">>> unaligned frame dump:\n");
for (i = 0; i < MAX_DATA_CHUNKS; i++) {
printk(KERN_DEBUG ">>>\tslot: %i; len: %u\n", i, dpriv->data_chunks[i].iov_len);
}
align_frame(dev, dpriv);
printk(KERN_DEBUG ">>> aligned frame dump:\n");
for (i = 0; i < MAX_DATA_CHUNKS; i++) {
printk(KERN_DEBUG ">>>\tslot: %i; len: %u\n", i, dpriv->data_chunks[i].iov_len);
}
if (check_chunks(dpriv->data_chunks) != 0) {
dont_process = 1;
return -EINVAL;
}
process_cmd(dev, dpriv, port);
while (dpriv->flags & PROC_CMD) {
#ifndef DEBUG_DONT_WRITE
while (dpriv->flags & IRQ_SIMPLE) { while (dpriv->flags & IRQ_SIMPLE) {
printk_once(KERN_DEBUG ">>> waiting for interrupt\n"); printk_once(KERN_DEBUG ">>> waiting for interrupt\n");
msleep(1); msleep_interruptible(1);
} }
#endif
printk(KERN_DEBUG ">>> dump test buffer after writing: %d bytes\n", TEST_BUFF_SZ); printk(KERN_DEBUG ">>> proceeding to next cmd chunk\n");
dma_unmap_sg(dev, sgl, sg_elems, DMA_TO_DEVICE); sg_elems = process_cmd(dev, dpriv, port);
for (i = 0; i < sg_elems; i++) { if (sg_elems == 0)
dev_dbg(dev, ">>> sector %i\n", i); finish_cmd(dev, dpriv);
u8 buff[TEST_BUFF_SZ];
sg_copy_to_buffer(&sgl[i], 1, buff, TEST_BUFF_SZ);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buff, TEST_BUFF_SZ);
} }
if (chunks[CHUNK_DATA_0].iov_len != 0)
dma_unmap_single(dev, chunks[CHUNK_DATA_0].iov_dma, chunks[CHUNK_DATA_0].iov_len, DMA_TO_DEVICE);
if (chunks[CHUNK_DATA_1].iov_len != 0)
dma_unmap_single(dev, chunks[CHUNK_DATA_1].iov_dma, chunks[CHUNK_DATA_1].iov_len, DMA_TO_DEVICE);
// chunks[CHUNK_STUFFING].iov_base = buffs->stuff_buff.iov_base;
// stuff_frame(chunks);
//
// /* copy data to common buffer */
// struct fvec vec = {0};
// vec.iov_base = buffs->common_buff.iov_base;
// memcpy(vec.iov_base, chunks[CHUNK_LEADER].iov_base, chunks[CHUNK_LEADER].iov_len);
// vec.iov_len += chunks[CHUNK_LEADER].iov_len;
// chunks[CHUNK_LEADER].iov_len = 0;
//
// memcpy(vec.iov_base + vec.iov_len, chunks[CHUNK_EXIF].iov_base, chunks[CHUNK_EXIF].iov_len);
// vec.iov_len += chunks[CHUNK_EXIF].iov_len;
// chunks[CHUNK_EXIF].iov_len = 0;
//
// memcpy(vec.iov_base + vec.iov_len, chunks[CHUNK_HEADER].iov_base, chunks[CHUNK_HEADER].iov_len);
// vec.iov_len += chunks[CHUNK_HEADER].iov_len;
// chunks[CHUNK_HEADER].iov_len = 0;
//
// memcpy(vec.iov_base + vec.iov_len, chunks[CHUNK_STUFFING].iov_base, chunks[CHUNK_STUFFING].iov_len);
// vec.iov_len += chunks[CHUNK_STUFFING].iov_len;
// chunks[CHUNK_STUFFING].iov_len = 0;
//
// chunks[CHUNK_HEADER] = vec;
/* end of debug code */
// start_cmd(dev, dpriv, port);
// while (dpriv->flags & PROC_CMD) {
// while (dpriv->flags & IRQ_SIMPLE) {
// printk_once(KERN_DEBUG ">>> waiting for interrupt\n");
// msleep_interruptible(1);
// }
// printk(KERN_DEBUG ">>> proceeding to next cmd chunk\n");
//// if (proc_cmd(dev, dpriv, port) == 0)
// finish_cmd(dev, dpriv);
// }
// /* prepare buffer and fill it with markers */
// sgl = kmalloc(sizeof(struct scatterlist) * SG_TBL_SZ, GFP_KERNEL);
// if (!sgl)
// return ENOMEM;
// sg_init_table(sgl, SG_TBL_SZ);
// for_each_sg(sgl, sg_ptr, SG_TBL_SZ, n_elem) {
// test_buff = kmalloc(TEST_BUFF_SZ, GFP_KERNEL);
// if (!test_buff)
// return ENOMEM;
// buffers[n_elem] = test_buff;
// memset(test_buff, 0xa5, TEST_BUFF_SZ);
// sg_set_buf(sg_ptr, (void *)test_buff, TEST_BUFF_SZ);
// sg_elems++;
// }
//
// printk(KERN_DEBUG ">>> mapped %d SG elemets\n", sg_elems);
// printk(KERN_DEBUG ">>>\n");
//
// /* write test #2 */
// if (dpriv->lba_ptr.lba_write & ~ADDR_MASK_28_BIT) {
// dpriv->curr_cmd = ATA_CMD_WRITE_EXT;
// dpriv->max_data_sz = (0xffff + 1) * PHY_BLOCK_SIZE;
// } else {
// dpriv->curr_cmd = ATA_CMD_WRITE;
// dpriv->max_data_sz = (0xff + 1) * PHY_BLOCK_SIZE;
// }
// dpriv->flags |= PROC_CMD;
// blocks_num = get_blocks_num(sgl, sg_elems);
// i = dma_map_sg(dev, sgl, sg_elems, DMA_TO_DEVICE);
// printk(KERN_DEBUG ">>> dma mapped %d elements\n", i);
// printk(KERN_DEBUG ">>> trying to write data from sg list, %u blocks, LBA: %llu\n", blocks_num, dpriv->lba_ptr.lba_write);
// elphel_cmd_issue(port, dpriv->lba_ptr.lba_write, blocks_num, sgl, sg_elems, dpriv->curr_cmd);
//
// while (dpriv->flags & IRQ_SIMPLE) {
// printk_once(KERN_DEBUG ">>> waiting for interrupt\n");
// msleep_interruptible(1);
// }
// dma_unmap_sg(dev, sgl, sg_elems, DMA_TO_DEVICE);
// dpriv->lba_ptr.lba_write += blocks_num;
// dpriv->flags &= ~PROC_CMD;
// /* end of write test #2 */
//
// for (i = 0; i < sg_elems; i++) {
// kfree(buffers[i]);
// }
// kfree(sgl);
//
// /* read test */
// dma_map_sg(dev, sgl, sg_elems, DMA_FROM_DEVICE);
//
// printk(KERN_DEBUG ">>> trying to read data to sg list\n");
// if (lba_addr & ~ADDR_MASK_28_BIT)
// cmd = ATA_CMD_READ_EXT;
// else
// cmd = ATA_CMD_READ;
// elphel_cmd_issue(port, lba_addr, sg_elems, sgl, sg_elems, cmd);
// printk(KERN_DEBUG ">>> command has been issued\n");
//
// while (dpriv->flags & IRQ_SIMPLE) {
// printk_once(KERN_DEBUG ">>> waiting for interrupt\n");
// msleep(1);
// }
//
// printk(KERN_DEBUG ">>> dump test buffer after reading: %d bytes\n", TEST_BUFF_SZ);
// dma_unmap_sg(dev, sgl, sg_elems, DMA_FROM_DEVICE);
// for (i = 0; i < sg_elems; i++) {
// dev_dbg(dev, ">>> sector %i\n", i);
// u8 buff[TEST_BUFF_SZ];
// sg_copy_to_buffer(&sgl[i], 1, buff, TEST_BUFF_SZ);
// print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buff, TEST_BUFF_SZ);
// }
// /* end of read test */
//
// printk(KERN_DEBUG ">>> *** proceeding to write test *** <<<\n");
//
// /* write test */
// for_each_sg(sgl, sg_ptr, SG_TBL_SZ, n_elem) {
// uint8_t pattern_buff[TEST_BUFF_SZ];
// memset(pattern_buff, 0xb6, TEST_BUFF_SZ);
// sg_copy_from_buffer(sg_ptr, 1, pattern_buff, TEST_BUFF_SZ);
// }
// if (lba_addr & ~ADDR_MASK_28_BIT) {
// dpriv->curr_cmd = ATA_CMD_WRITE_EXT;
// dpriv->max_data_sz = (0xffff + 1) * PHY_BLOCK_SIZE;
// } else {
// dpriv->curr_cmd = ATA_CMD_WRITE;
// dpriv->max_data_sz = (0xff + 1) * PHY_BLOCK_SIZE;
// }
// sg_elems = map_vectors(sgl, dpriv, dpriv->max_data_sz);
// dump_sg_list(sgl, sg_elems);
// while (sg_elems != 0) {
// dma_map_sg(dev, sgl, sg_elems, DMA_TO_DEVICE);
// lba_addr = dpriv->lba_ptr.lba_write;
// blocks_num = get_blocks_num(sgl, sg_elems);
// printk(KERN_DEBUG ">>> trying to write data from sg list %u blocks, LBA: %llu\n", blocks_num, lba_addr);
// elphel_cmd_issue(port, lba_addr, blocks_num, sgl, sg_elems, cmd);
// printk(KERN_DEBUG ">>> command has been issued, wrting %u LBAs\n", blocks_num);
//
// while (dpriv->flags & IRQ_SIMPLE) {
// printk_once(KERN_DEBUG ">>> waiting for interrupt\n");
// msleep(1);
// }
// dma_unmap_sg(dev, sgl, sg_elems, DMA_TO_DEVICE);
// dpriv->lba_ptr.lba_write += blocks_num;
//
// sg_elems = map_vectors(sgl, dpriv, dpriv->max_data_sz);
// dump_sg_list(sgl, sg_elems);
// }
// finish_cmd(dpriv);
// printk(KERN_DEBUG ">>> dump test buffer after writing: %d bytes\n", TEST_BUFF_SZ);
// for (i = 0; i < sg_elems; i++) {
// dev_dbg(dev, ">>> sector %i\n", i);
// u8 buff[TEST_BUFF_SZ];
// sg_copy_to_buffer(&sgl[i], 1, buff, TEST_BUFF_SZ);
// print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buff, TEST_BUFF_SZ);
// }
/* end of write test */ /* end of write test */
return buff_sz; return buff_sz;
...@@ -487,32 +1691,19 @@ static ssize_t elphel_test_write(struct device *dev, struct device_attribute *at ...@@ -487,32 +1691,19 @@ static ssize_t elphel_test_write(struct device *dev, struct device_attribute *at
/** Prepare software constructed command FIS in command table area. The structure of the /** Prepare software constructed command FIS in command table area. The structure of the
* command FIS is described in Transport Layer chapter of Serial ATA revision 3.1 documentation. * command FIS is described in Transport Layer chapter of Serial ATA revision 3.1 documentation.
*/ */
inline void prep_cfis(u8 *cmd_tbl, ///< pointer to the beginning of command table static inline void prep_cfis(uint8_t *cmd_tbl, ///< pointer to the beginning of command table
u8 cmd, ///< ATA command as described in ATA/ATAPI command set uint8_t cmd, ///< ATA command as described in ATA/ATAPI command set
u64 start_addr, ///< LBA start address uint64_t start_addr, ///< LBA start address
u16 count) ///< sector count, the number of 512 byte sectors to read or write uint16_t count) ///< sector count, the number of 512 byte sectors to read or write
///< @return None ///< @return None
{ {
u8 device, ctrl; uint8_t device, ctrl;
/* select the content of Device and Control registers based on command, read the description of /* select the content of Device and Control registers based on command, read the description of
* a command in ATA/ATAPI command set documentation * a command in ATA/ATAPI command set documentation
*/ */
switch (cmd) { switch (cmd) {
case ATA_CMD_WRITE: case ATA_CMD_WRITE:
device = 0xe0 | ((start_addr >> 24) & 0x0f);
ctrl = 0x08;
/* this is 28-bit command; 4 bits of the address have already been
* placed to Device register, invalidate the remaining (if any) upper
* bits of the address and leave only 24 significant bits (just in case)
*/
start_addr &= 0xffffff;
break;
case ATA_CMD_WRITE_EXT:
// not verified yet
device = 0x00;
ctrl = 0x08;
break;
case ATA_CMD_READ: case ATA_CMD_READ:
device = 0xe0 | ((start_addr >> 24) & 0x0f); device = 0xe0 | ((start_addr >> 24) & 0x0f);
ctrl = 0x08; ctrl = 0x08;
...@@ -521,14 +1712,16 @@ inline void prep_cfis(u8 *cmd_tbl, ///< pointer to the beginning o ...@@ -521,14 +1712,16 @@ inline void prep_cfis(u8 *cmd_tbl, ///< pointer to the beginning o
* bits of the address and leave only 24 significant bits (just in case) * bits of the address and leave only 24 significant bits (just in case)
*/ */
start_addr &= 0xffffff; start_addr &= 0xffffff;
count &= 0xff;
break; break;
case ATA_CMD_WRITE_EXT:
case ATA_CMD_READ_EXT: case ATA_CMD_READ_EXT:
device = 0xe0; device = 0xe0;
ctrl = 0x08; ctrl = 0x08;
break; break;
default: default:
device = 0x00; device = 0xe0;
ctrl = 0x00; ctrl = 0x08;
} }
cmd_tbl[0] = 0x27; // H2D register FIS cmd_tbl[0] = 0x27; // H2D register FIS
...@@ -550,30 +1743,33 @@ inline void prep_cfis(u8 *cmd_tbl, ///< pointer to the beginning o ...@@ -550,30 +1743,33 @@ inline void prep_cfis(u8 *cmd_tbl, ///< pointer to the beginning o
} }
/** Map S/G list to physical region descriptor table in AHCI controller command table */ /** Map S/G list to physical region descriptor table in AHCI controller command table */
inline void prep_prdt(struct scatterlist *sgl, ///< pointer to S/G list which should be mapped to physical static inline void prep_prdt(struct fvec *sgl, ///< pointer to S/G list which should be mapped to physical
///< region description table ///< region description table
unsigned int n_elem, ///< the number of elements in @e sgl unsigned int n_elem, ///< the number of elements in @e sgl
struct ahci_sg *ahci_sgl) ///< pointer to physical region description table struct ahci_sg *ahci_sgl) ///< pointer to physical region description table
///< @return None ///< @return None
{ {
unsigned int num = 0; unsigned int num = 0;
struct scatterlist *sg_ptr;
for_each_sg(sgl, sg_ptr, n_elem, num) {
dma_addr_t addr = sg_dma_address(sg_ptr);
u32 sg_len = sg_dma_len(sg_ptr);
ahci_sgl[num].addr = cpu_to_le32(addr & 0xffffffff); for (num = 0; num < n_elem; num++) {
ahci_sgl[num].addr_hi = cpu_to_le32((addr >> 16) >> 16); ahci_sgl[num].addr = cpu_to_le32(sgl[num].iov_dma & 0xffffffff);
ahci_sgl[num].flags_size = cpu_to_le32(sg_len - 1); ahci_sgl[num].addr_hi = cpu_to_le32((sgl[num].iov_dma >> 16) >> 16);
ahci_sgl[num].flags_size = cpu_to_le32(sgl[num].iov_len - 1);
} }
} }
static int elphel_write_dma(struct ata_port *ap, u64 start, u16 count, struct scatterlist *sgl, unsigned int elem) /** Prepare and issue read or write command */
static void elphel_cmd_issue(struct ata_port *ap,///< device port for which the command should be issued
uint64_t start, ///< LBA start address
uint16_t count, ///< the number of sectors to read or write
struct fvec *sgl, ///< S/G list pointing to data buffers
unsigned int elem, ///< the number of elements in @e sgl
uint8_t cmd) ///< the command to be issued; should be ATA_CMD_READ, ATA_CMD_READ_EXT,
///< ATA_CMD_WRITE or ATA_CMD_WRITE_EXT, other commands are not tested
///< @return None
{ {
u32 opts; uint32_t opts;
u8 *cmd_tbl; uint8_t *cmd_tbl;
u8 cmd;
unsigned int slot_num = 0; unsigned int slot_num = 0;
struct ahci_port_priv *pp = ap->private_data; struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data; struct ahci_host_priv *hpriv = ap->host->private_data;
...@@ -586,10 +1782,6 @@ static int elphel_write_dma(struct ata_port *ap, u64 start, u16 count, struct sc ...@@ -586,10 +1782,6 @@ static int elphel_write_dma(struct ata_port *ap, u64 start, u16 count, struct sc
/* prepare command FIS */ /* prepare command FIS */
dma_sync_single_for_cpu(ap->dev, pp->cmd_tbl_dma, AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE); dma_sync_single_for_cpu(ap->dev, pp->cmd_tbl_dma, AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE);
cmd_tbl = pp->cmd_tbl + slot_num * AHCI_CMD_TBL_SZ; cmd_tbl = pp->cmd_tbl + slot_num * AHCI_CMD_TBL_SZ;
if (start & ~ADDR_MASK_28_BIT)
cmd = ATA_CMD_WRITE_EXT;
else
cmd = ATA_CMD_WRITE;
prep_cfis(cmd_tbl, cmd, start, count); prep_cfis(cmd_tbl, cmd, start, count);
/* prepare physical region descriptor table */ /* prepare physical region descriptor table */
...@@ -597,91 +1789,159 @@ static int elphel_write_dma(struct ata_port *ap, u64 start, u16 count, struct sc ...@@ -597,91 +1789,159 @@ static int elphel_write_dma(struct ata_port *ap, u64 start, u16 count, struct sc
prep_prdt(sgl, elem, ahci_sg); prep_prdt(sgl, elem, ahci_sg);
/* prepare command header */ /* prepare command header */
opts = CMD_FIS_LEN | (elem << 16) | AHCI_CMD_PREFETCH | AHCI_CMD_CLR_BUSY | AHCI_CMD_WRITE; opts = CMD_FIS_LEN | (elem << 16) | AHCI_CMD_PREFETCH | AHCI_CMD_CLR_BUSY;
if (cmd == ATA_CMD_WRITE || cmd == ATA_CMD_WRITE_EXT)
opts |= AHCI_CMD_WRITE;
ahci_fill_cmd_slot(pp, slot_num, opts); ahci_fill_cmd_slot(pp, slot_num, opts);
printk(KERN_DEBUG ">>> dump command table content, first %d bytes, phys addr = 0x%x:\n", TEST_BUFF_SZ, pp->cmd_tbl_dma); dev_dbg(ap->dev, ">>> dump command table content, first %d bytes, phys addr = 0x%x:\n", 20, pp->cmd_tbl_dma);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, pp->cmd_tbl, TEST_BUFF_SZ); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, pp->cmd_tbl, 20);
dma_sync_single_for_device(ap->dev, pp->cmd_tbl_dma, AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE); dma_sync_single_for_device(ap->dev, pp->cmd_tbl_dma, AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE);
/* debug code follows */
#ifdef DEBUG_DONT_WRITE
return;
#endif
/* end of debug code */
/* issue command */ /* issue command */
writel(0x11, port_mmio + PORT_CMD); writel(0x11, port_mmio + PORT_CMD);
writel(1 << slot_num, port_mmio + PORT_CMD_ISSUE); writel(1 << slot_num, port_mmio + PORT_CMD_ISSUE);
}
return 0; static ssize_t lba_start_read(struct device *dev, struct device_attribute *attr, char *buff)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
return snprintf(buff, 20, "%llu\n", dpriv->lba_ptr.lba_start);
} }
static int elphel_read_dma(struct ata_port *ap, u64 start, u16 count, struct scatterlist *sgl, unsigned int elem) static ssize_t lba_start_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{ {
u32 opts; struct ata_host *host = dev_get_drvdata(dev);
u8 *cmd_tbl; struct ahci_host_priv *hpriv = host->private_data;
u8 cmd;
unsigned int slot_num = 0;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data; struct elphel_ahci_priv *dpriv = hpriv->plat_data;
struct ahci_sg *ahci_sg;
void __iomem *port_mmio = ahci_port_base(ap);
dpriv->flags |= IRQ_SIMPLE; if (kstrtoull(buff, 10, &dpriv->lba_ptr.lba_start) != 0)
return -EINVAL;
/* prepare command FIS */ if (dpriv->lba_ptr.lba_write < dpriv->lba_ptr.lba_start)
dma_sync_single_for_cpu(ap->dev, pp->cmd_tbl_dma, AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE); dpriv->lba_ptr.lba_write = dpriv->lba_ptr.lba_start;
cmd_tbl = pp->cmd_tbl + slot_num * AHCI_CMD_TBL_SZ;
if (start & ~ADDR_MASK_28_BIT)
cmd = ATA_CMD_READ_EXT;
else
cmd = ATA_CMD_READ;
prep_cfis(cmd_tbl, cmd, start, count);
/* prepare physical region descriptor table */ return buff_sz;
ahci_sg = pp->cmd_tbl + slot_num * AHCI_CMD_TBL_SZ + AHCI_CMD_TBL_HDR_SZ; }
prep_prdt(sgl, elem, ahci_sg);
/* prepare command header */ static ssize_t lba_end_read(struct device *dev, struct device_attribute *attr, char *buff)
opts = CMD_FIS_LEN | (elem << 16) | AHCI_CMD_PREFETCH | AHCI_CMD_CLR_BUSY; {
ahci_fill_cmd_slot(pp, slot_num, opts); struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
printk(KERN_DEBUG ">>> dump command table content, first %d bytes, phys addr = 0x%x:\n", TEST_BUFF_SZ, pp->cmd_tbl_dma); return snprintf(buff, 20, "%llu\n", dpriv->lba_ptr.lba_end);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, pp->cmd_tbl, TEST_BUFF_SZ); }
dma_sync_single_for_device(ap->dev, pp->cmd_tbl_dma, AHCI_CMD_TBL_AR_SZ, DMA_TO_DEVICE); static ssize_t lba_end_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
/* issue command */ if (kstrtoull(buff, 10, &dpriv->lba_ptr.lba_end) != 0)
writel(0x11, port_mmio + PORT_CMD); return -EINVAL;
writel(1 << slot_num, port_mmio + PORT_CMD_ISSUE);
return 0; if (dpriv->lba_ptr.lba_write > dpriv->lba_ptr.lba_end)
dpriv->lba_ptr.lba_write = dpriv->lba_ptr.lba_end;
return buff_sz;
} }
static irqreturn_t elphel_irq_handler(int irq, void * dev_instance) static ssize_t lba_current_read(struct device *dev, struct device_attribute *attr, char *buff)
{ {
irqreturn_t handled; struct ata_host *host = dev_get_drvdata(dev);
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv = host->private_data; struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data; struct elphel_ahci_priv *dpriv = hpriv->plat_data;
u32 irq_stat, irq_masked;
if (dpriv->flags & IRQ_SIMPLE) { return snprintf(buff, 20, "%llu\n", dpriv->lba_ptr.lba_write);
/* handle interrupt */ }
printk(KERN_DEBUG ">>> handling interrupt\n");
dpriv->flags &= ~IRQ_SIMPLE;
// clear_bit(IRQ_SIMPLE, &dpriv->flags);
irq_stat = readl(hpriv->mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
// irq_masked = irq_stat & hpriv->port_map;
writel(irq_stat, hpriv->mmio + HOST_IRQ_STAT);
handled = IRQ_HANDLED;
} else {
/* pass handling to AHCI level*/
handled = ahci_single_irq_intr(irq, dev_instance);
}
return handled; static ssize_t lba_current_write(struct device *dev, struct device_attribute *attr, const char *buff, size_t buff_sz)
{
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct elphel_ahci_priv *dpriv = hpriv->plat_data;
if (kstrtoull(buff, 10, &dpriv->lba_ptr.lba_write) != 0)
return -EINVAL;
return buff_sz;
} }
static DEVICE_ATTR(load_module, S_IWUSR | S_IWGRP, NULL, set_load_flag);
static DEVICE_ATTR(write, S_IWUSR | S_IWGRP, NULL, rawdev_write);
static DEVICE_ATTR(lba_start, S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP, lba_start_read, lba_start_write);
static DEVICE_ATTR(lba_end, S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP, lba_end_read, lba_end_write);
static DEVICE_ATTR(lba_current, S_IRUSR | S_IRGRP | S_IWUSR | S_IRGRP, lba_current_read, lba_current_write);
static struct attribute *root_dev_attrs[] = {
&dev_attr_load_module.attr,
&dev_attr_write.attr,
&dev_attr_lba_start.attr,
&dev_attr_lba_end.attr,
&dev_attr_lba_current.attr,
&dev_attr_data_0_sz.attr,
&dev_attr_data_1_sz.attr,
&dev_attr_data_proc.attr,
&dev_attr_exif_sz.attr,
&dev_attr_jpg_hdr_sz.attr,
NULL
};
static const struct attribute_group dev_attr_root_group = {
.attrs = root_dev_attrs,
.name = NULL,
};
static struct ata_port_operations ahci_elphel_ops = {
.inherits = &ahci_ops,
.port_start = elphel_port_start,
.qc_prep = elphel_qc_prep,
};
static const struct ata_port_info ahci_elphel_port_info = {
AHCI_HFLAGS(AHCI_HFLAG_NO_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_elphel_ops,
};
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
.can_queue = 1,
.sg_tablesize = AHCI_MAX_SG,
.dma_boundary = AHCI_DMA_BOUNDARY,
.shost_attrs = ahci_shost_attrs,
.sdev_attrs = ahci_sdev_attrs,
};
static const struct of_device_id ahci_elphel_of_match[] = {
{ .compatible = "elphel,elphel-ahci", },
{ /* end of list */ }
};
MODULE_DEVICE_TABLE(of, ahci_elphel_of_match);
static struct platform_driver ahci_elphel_driver = {
.probe = elphel_drv_probe,
.remove = elphel_drv_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = ahci_elphel_of_match,
},
};
module_platform_driver(ahci_elphel_driver);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Elphel, Inc."); MODULE_AUTHOR("Elphel, Inc.");
MODULE_DESCRIPTION("Elphel AHCI SATA platform driver for elphel393 camera"); MODULE_DESCRIPTION("Elphel AHCI SATA platform driver for elphel393 camera");
/** @file ahci_elphel_ext.h
*
* @brief Elphel AHCI SATA platform driver for Elphel393 camera. This module provides
* additional functions which allows to use a part of a disk (or entire disk) as a
* raw circular buffer.
*
* @copyright Copyright (C) 2016 Elphel, Inc
*
* @par <b>License</b>
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "../elphel/circbuf.h"
#ifndef _AHCI_ELPHEL_EXT
#define _AHCI_ELPHEL_EXT
/** Flag indicating that IRQ should not be processed in ahci_port_interrupt */
#define IRQ_SIMPLE (1 << 0)
/** Processing command in progress */
#define PROC_CMD (1 << 1)
/** Flag indicating that the remaining chunk of data will be recorder */
#define LAST_BLOCK (1 << 2)
/** The length of a command FIS in double words */
#define CMD_FIS_LEN 5
/** This is used to get 28-bit address from 64-bit value */
#define ADDR_MASK_28_BIT ((u64)0xfffffff)
/** An array or JPEG frame chunks contains pointers to JPEG leading marker,
* JPEG header, Exif data if present, stuffing bytes chunk which aligns
* the frame size to disk sector boundary, JPEG data which
* can be split into two chunks, their corresponding align buffers, JPEG
* trailing marker, and pointer to a buffer containing the remainder of a
* frame. Ten chunks of data in total.
* @todo Fix description */
#define MAX_DATA_CHUNKS 10
/** Default port number */
#define DEFAULT_PORT_NUM 0
/** Align buffers length to this amount of bytes */
#define ALIGNMENT_SIZE 32
/** Address alignment boundary */
#define ALIGNMENT_ADDR 2
/** This structure holds raw device buffer pointers */
struct drv_pointers {
uint64_t lba_start; ///< raw buffer starting LBA
uint64_t lba_end; ///< raw buffer ending LBA
uint64_t lba_write; ///< current write pointer inside raw buffer
uint16_t wr_count; ///< the number of LBA to write next time
};
//struct fvec {
// void *iov_base; ///< pointer to allocated buffer
// size_t iov_len; ///< the size (in bytes) of allocated buffer; set after allocation and is not modified during buffer lifetime
// dma_addr_t iov_dma; ///< buffer physical address
//};
//struct frame_vects {
// struct fvec prev_chunk; ///< remainder chunk of data from previous frame (NOT USED NOW)
// struct fvec header_buff; ///< buffer for JPEG header
// struct fvec leader; ///< JPEG leading marker, pointer to header_buff
// struct fvec exif; ///< pointer to Exif data buffer
// struct fvec header; ///< JPEG header data without leading marker, pointer to header_buff
// struct fvec stuffing; ///< stuffing bytes for frame alignment (WILL BE REMOVED AFTER SPEED TEST)
// struct kvec jpeg[2]; ///< pointers to JPEG frame data which can be split across circular buffer boundary
// struct fvec trailer; ///< JPEG trailing marker
//};
struct frame_buffers {
struct fvec exif_buff;
struct fvec jpheader_buff;
struct fvec trailer_buff;
struct fvec common_buff;
struct fvec rem_buff; ///< remainder from previous frame
};
enum {
CHUNK_LEADER,
CHUNK_EXIF,
CHUNK_HEADER,
CHUNK_COMMON,
CHUNK_DATA_0,
CHUNK_ALIGN_0,
CHUNK_DATA_1,
CHUNK_ALIGN_1,
CHUNK_TRAILER,
CHUNK_REM
};
struct elphel_ahci_priv {
u32 clb_offs;
u32 fb_offs;
u32 base_addr;
u32 flags;
int curr_cmd;
size_t max_data_sz;
struct drv_pointers lba_ptr;
struct frame_buffers fbuffs;
struct fvec data_chunks[MAX_DATA_CHUNKS];
struct fvec sgl[MAX_DATA_CHUNKS];
int sg_elems;
int curr_data_chunk; ///< index of a data chunk used during last transaction
size_t curr_data_offset; ///< offset of the last byte in a data chunk pointed to by @e curr_data_chunk
};
#endif /* _AHCI_ELPHEL_EXT */
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/uio.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <elphel/driver_numbers.h> #include <elphel/driver_numbers.h>
#include <elphel/c313a.h> #include <elphel/c313a.h>
...@@ -99,6 +100,36 @@ int init_ccam_dma_buf_ptr(struct platform_device *pdev) ...@@ -99,6 +100,36 @@ int init_ccam_dma_buf_ptr(struct platform_device *pdev)
return 0; return 0;
} }
int circbuf_get_ptr(int sensor_port, size_t offset, size_t len, struct fvec *vect_0, struct fvec *vect_1)
{
int ret = 1;
if (offset > CCAM_DMA_SIZE || sensor_port >= SENSOR_PORTS)
return -EINVAL;
if (offset + len < CCAM_DMA_SIZE) {
// the image is not split
vect_0->iov_base = circbuf_priv[sensor_port].buf_ptr + offset;
vect_0->iov_dma = circbuf_priv[sensor_port].phys_addr + offset;
vect_0->iov_len = len;
vect_1->iov_base = NULL;
vect_1->iov_len = 0;
vect_1->iov_dma = 0;
} else {
// the image is split into two segments
vect_0->iov_base = circbuf_priv[sensor_port].buf_ptr + offset;
vect_0->iov_dma = circbuf_priv[sensor_port].phys_addr + offset;
vect_0->iov_len = CCAM_DMA_SIZE - offset;
vect_1->iov_base = circbuf_priv[sensor_port].buf_ptr;
vect_1->iov_dma = circbuf_priv[sensor_port].phys_addr;
vect_1->iov_len = len - vect_0->iov_len;
ret = 2;
}
return ret;
}
EXPORT_SYMBOL_GPL(circbuf_get_ptr);
/** /**
* @brief Process circular buffer file opening and define further action in accordance * @brief Process circular buffer file opening and define further action in accordance
* with minor file number. * with minor file number.
......
...@@ -55,4 +55,13 @@ extern unsigned short circbuf_width; ...@@ -55,4 +55,13 @@ extern unsigned short circbuf_width;
extern unsigned char circbuf_byrshift; extern unsigned char circbuf_byrshift;
/* end of debug code */ /* end of debug code */
/* this should be placed to drivers common includes */
struct fvec {
void *iov_base; ///< pointer to allocated buffer
size_t iov_len; ///< the size (in bytes) of allocated buffer; set after allocation and is not modified during buffer lifetime
dma_addr_t iov_dma; ///< buffer physical address
};
/* end of common includes */
int circbuf_get_ptr(int sensor_port, size_t offset, size_t len, struct fvec *vect_0, struct fvec *vect_1);
#endif /* _CIRCBUF_H */ #endif /* _CIRCBUF_H */
...@@ -82,7 +82,7 @@ ...@@ -82,7 +82,7 @@
#include "exif393.h" #include "exif393.h"
#define D(x) #define D(x)
//#define D(x) printk("%s:%d:",__FILE__,__LINE__);x //#define D(x) printk(">>> %s:%d:",__FILE__,__LINE__);x
//Major //Major
...@@ -115,6 +115,7 @@ static int aexif_wp[SENSOR_PORTS] = {1,1,1,1}; // frame write pointer in ...@@ -115,6 +115,7 @@ static int aexif_wp[SENSOR_PORTS] = {1,1,1,1}; // frame write pointer in
static int aexif_enabled[SENSOR_PORTS] = {0,0,0,0}; // enable storing of frame meta data, enable reading Exif data static int aexif_enabled[SENSOR_PORTS] = {0,0,0,0}; // enable storing of frame meta data, enable reading Exif data
static int aexif_valid[SENSOR_PORTS] = {0,0,0,0}; // Exif tables and buffer are valid. static int aexif_valid[SENSOR_PORTS] = {0,0,0,0}; // Exif tables and buffer are valid.
static char * ameta_buffer[SENSOR_PORTS]= {NULL,NULL,NULL,NULL}; // dynamically allocated buffer to store frame meta data. static char * ameta_buffer[SENSOR_PORTS]= {NULL,NULL,NULL,NULL}; // dynamically allocated buffer to store frame meta data.
static char exif_tmp_buff[MAX_EXIF_SIZE];
//static char * meta_buffer=NULL; // dynamically allocated buffer to store frame meta data. //static char * meta_buffer=NULL; // dynamically allocated buffer to store frame meta data.
// page 0 - temporary storage, 1..MAX_EXIF_FRAMES - buffer // page 0 - temporary storage, 1..MAX_EXIF_FRAMES - buffer
...@@ -741,6 +742,35 @@ static ssize_t exif_read (struct file * file, char * buf, size_t count, lof ...@@ -741,6 +742,35 @@ static ssize_t exif_read (struct file * file, char * buf, size_t count, lof
return count; return count;
} }
/* This code is copied from exif_read, consider replacing it with this function invocation */
size_t exif_get_data(int sensor_port, unsigned short meta_index, void *buff, size_t buff_sz)
{
size_t ret = 0;
size_t count = exif_template_size;
loff_t off;
int start_p, page_p, i;
char *metap;
//will truncate by the end of current page
if (!aexif_enabled[sensor_port])
return 0;
off = meta_index * exif_template_size;
D(printk("%s: count= 0x%x, *off= 0x%x, i=0x%x, exif_template_size=0x%x\n", __func__, (int) count, (int) off, (int) meta_index, (int) exif_template_size));
start_p = meta_index * exif_template_size;
page_p = off - start_p;
D(printk("%s: count= 0x%x, pos= 0x%x, start_p=0x%x, page_p=0x%x, i=0x%x, exif_template_size=0x%x\n", __func__, (int) count, (int) off, (int)start_p, (int)page_p,(int) meta_index, (int) exif_template_size));
metap = &ameta_buffer[sensor_port][meta_index * aexif_meta_size[sensor_port]]; // pointer to the start of the selected page in frame meta_buffer
if ((page_p + count) > exif_template_size)
count = exif_template_size - page_p;
memcpy(exif_tmp_buff, exif_template, exif_template_size);
D(printk("%s: count= 0x%x, pos= 0x%x, start_p=0x%x, page_p=0x%x\n", __func__, (int) count, (int) off, (int)start_p, (int)page_p));
for (i = 0; i < exif_fields; i++) {
memcpy(&exif_tmp_buff[dir_table[i].dst], &metap[dir_table[i].src], dir_table[i].len);
}
memcpy(buff, &exif_tmp_buff[page_p], count);
return count;
}
EXPORT_SYMBOL_GPL(exif_get_data);
//!++++++++++++++++++++++++++++++++++++ _init() ++++++++++++++++++++++++++++++++++++++++++++++++++++++ //!++++++++++++++++++++++++++++++++++++ _init() ++++++++++++++++++++++++++++++++++++++++++++++++++++++
......
...@@ -40,5 +40,6 @@ int putlong_meta(int sensor_port, unsigned long data, int * indx, unsigned long ...@@ -40,5 +40,6 @@ int putlong_meta(int sensor_port, unsigned long data, int * indx, unsigned long
char * encode_time(char buf[27], unsigned long sec, unsigned long usec); char * encode_time(char buf[27], unsigned long sec, unsigned long usec);
int store_meta(int sensor_port); //called from IRQ service - put current metadata to meta_buffer, return page index int store_meta(int sensor_port); //called from IRQ service - put current metadata to meta_buffer, return page index
size_t exif_get_data(int sensor_port, unsigned short meta_index, void * buff, size_t buff_sz);
#endif #endif
...@@ -412,6 +412,22 @@ ssize_t jpeghead_read(struct file *file, char *buf, size_t count, loff_t *off) ...@@ -412,6 +412,22 @@ ssize_t jpeghead_read(struct file *file, char *buf, size_t count, loff_t *off)
return count; return count;
} }
ssize_t jpeghead_get_data(int sensor_port, void *buff, size_t buff_sz, size_t offset)
{
unsigned long ptr = offset;
size_t count = jpeghead_priv[sensor_port].jpeg_h_sz;
if (ptr >= jpeghead_priv[sensor_port].jpeg_h_sz)
ptr = jpeghead_priv[sensor_port].jpeg_h_sz;
if ((ptr + count) > jpeghead_priv[sensor_port].jpeg_h_sz)
count = jpeghead_priv[sensor_port].jpeg_h_sz - ptr;
if (buff_sz < count)
return -EINVAL;
memcpy(buff, &jpeghead_priv[sensor_port].header[ptr], count);
return count;
}
EXPORT_SYMBOL_GPL(jpeghead_get_data);
/**huffman_* file operations /**huffman_* file operations
* write, read Huffman tables, initialize tables to default ones, program FPGA with the Huffman tables * write, read Huffman tables, initialize tables to default ones, program FPGA with the Huffman tables
......
...@@ -14,6 +14,7 @@ int jpegheader_create(struct interframe_params_t * params, unsigned char * b ...@@ -14,6 +14,7 @@ int jpegheader_create(struct interframe_params_t * params, unsigned char * b
int jpeghead_open (struct inode *inode, struct file *filp); // set filesize int jpeghead_open (struct inode *inode, struct file *filp); // set filesize
loff_t jpeghead_lseek (struct file * file, loff_t offset, int orig, struct interframe_params_t *fp); loff_t jpeghead_lseek (struct file * file, loff_t offset, int orig, struct interframe_params_t *fp);
ssize_t jpeghead_read (struct file * file, char * buf, size_t count, loff_t *off); ssize_t jpeghead_read (struct file * file, char * buf, size_t count, loff_t *off);
ssize_t jpeghead_get_data(int sensor_port, void *buff, size_t buff_sz, size_t offset);
int huffman_open (struct inode *inode, struct file *filp); // set filesize int huffman_open (struct inode *inode, struct file *filp); // set filesize
int huffman_release(struct inode *inode, struct file *filp); int huffman_release(struct inode *inode, struct file *filp);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment