Commit 5a0e45c2 authored by Andrey Filippov's avatar Andrey Filippov

more debugging, recording fake circbuf region from kernel space

parent 8b4a20b4
......@@ -573,12 +573,15 @@ EXPORT_SYMBOL(bio_put);
inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *!* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments);
}
return bio->bi_phys_segments;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *!* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments);
}
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *#* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments);
}
return bio->bi_phys_segments;
}
EXPORT_SYMBOL(bio_phys_segments);
......@@ -844,16 +847,31 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
size_t offset;
ssize_t size;
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
// pr_debug("size = %d", size);
if (unlikely(size <= 0)) {
pr_debug("size = %d <0, return -EFAULT", size);
return size ? size : -EFAULT;
size_t offset;
ssize_t size;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *A* nr_pages=0x%04x,bio->bi_max_vecs=0x%04x, bio->bi_vcnt==0x%04x", nr_pages, bio->bi_max_vecs, bio->bi_vcnt); // *A* nr_pages=0x009f,bio->bi_max_vecs=0x009f, bio->bi_vcnt==0x0000
pr_debug(" *B* niter->type=%d, iter->iov_offset=0x%08x, iter->count=0x%08x, iter->nr_segs=0x%08lx",\
iter->type, iter->iov_offset,iter->count,iter->nr_segs); // *B* niter->type=1, iter->iov_offset=0x00000000, iter->count=0x0009f000, iter->nr_segs=0x00000001
}
idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0)) {
pr_debug("Error=%d, trying continuous mmap-ed pages, bio->bi_vcnt=0x%04x, bio->bi_max_vecs=0x%04x, offset=0x%08x",
size, bio->bi_vcnt, bio->bi_max_vecs, offset);
if ((size == -EFAULT) && ( bio->bi_vcnt == 0) && (offset == 0) && (iter->nr_segs == 1)){ // Only for all pages, single segment
size= iov_iter_get_pages_elphel(iter, pages, LONG_MAX, nr_pages, &offset);
}
if (unlikely(size <= 0)) {
pr_debug("Error after iov_iter_get_pages_elphel: %d, trying continuous mmap-ed pages, bio->bi_vcnt=0x%04x, bio->bi_max_vecs=0x%04x, offset=0x%08x",
size, bio->bi_vcnt, bio->bi_max_vecs, offset);
return size ? size : -EFAULT;
}
}
idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *C* nr_pages=0x%04x, size=0x%08x, offset==0x%04x", nr_pages, size, offset); // *C* nr_pages=0x009f, size=0x0009f000, offset==0x0000
}
/*
* Deep magic below: We need to walk the pinned pages backwards
* because we are abusing the space allocated for the bio_vecs
......@@ -870,15 +888,21 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
bv[idx].bv_len = PAGE_SIZE;
bv[idx].bv_offset = 0;
}
// trim first and last page
bv[0].bv_offset += offset;
bv[0].bv_len -= offset;
bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
iov_iter_advance(iter, size);
pr_debug("size = %d", size);
bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda* *D* nr_pages=0x00a0, size=0x000a0000, offset=0x0000, iter->count=0x000a0000, iter->nr_segs=0x00000001
pr_debug(" *D* nr_pages=0x%04x, size=0x%08x, offset=0x%04x, iter->count=0x%08x, iter->nr_segs=0x%08lx", nr_pages, size, offset, iter->count, iter->nr_segs);
}
iov_iter_advance(iter, size); // updates (normally zeros) iter->count, iter->nr_segs (by size)
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda* // *E* niter->type=1, iter->iov_offset=0x00000000, iter->count=0x00000000, iter->nr_segs=0x00000000
pr_debug(" *E* niter->type=%d, iter->iov_offset=0x%08x, iter->count=0x%08x, iter->nr_segs=0x%08lx",\
iter->type, iter->iov_offset, iter->count, iter->nr_segs);
pr_debug(" *F* nr_pages=0x%04x, size=0x%08x, offset==0x%04x", nr_pages, size, offset); // *F* nr_pages=0x009f, size=0x0009f000, offset==0x0000
}
return 0;
return 0;
}
/**
......@@ -908,6 +932,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
}
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
static void submit_bio_wait_endio(struct bio *bio)
{
complete(bio->bi_private);
......
......@@ -1172,6 +1172,9 @@ int blk_init_allocated_queue(struct request_queue *q)
INIT_WORK(&q->timeout_work, blk_timeout_work);
q->queue_flags |= QUEUE_FLAG_DEFAULT;
// if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
// pr_debug(" *!* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments);
// }
/*
* This also sets hw/phys segments, boundary and size
......@@ -1970,7 +1973,9 @@ out:
void blk_init_request_from_bio(struct request *req, struct bio *bio)
{
struct io_context *ioc = rq_ioc(bio);
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *!* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments); // already set
}
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
......@@ -1998,7 +2003,9 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int where = ELEVATOR_INSERT_SORT;
struct request *req, *free;
unsigned int request_count = 0;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *?* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments);
}
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
......@@ -2394,7 +2401,7 @@ blk_qc_t generic_make_request(struct bio *bio)
struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug("bio = 0x%08x", (int) bio);
pr_debug("bio = 0x%08x, bio->bi_phys_segments=0x%08x", (int) bio, bio->bi_phys_segments); // not yet calculated
}
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
......@@ -2458,7 +2465,9 @@ blk_qc_t generic_make_request(struct bio *bio)
q = NULL;
}
}
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *{* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x, enter_succeeded=%d",bio->bi_flags, bio->bi_phys_segments, enter_succeeded);
}
if (enter_succeeded) {
struct bio_list lower, same;
......@@ -2576,7 +2585,9 @@ blk_qc_t submit_bio(struct bio *bio)
bio_devname(bio, b), count);
}
}
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug("bio->bi_phys_segments=0x%08x", bio->bi_phys_segments);
}
return generic_make_request(bio);
}
EXPORT_SYMBOL(submit_bio);
......
......@@ -657,6 +657,8 @@ void blk_mq_start_request(struct request *rq)
*/
rq->nr_phys_segments++;
}
pr_debug("q->dma_drain_size=%u, blk_rq_bytes(rq)=%u",q->dma_drain_size,blk_rq_bytes(rq));
}
EXPORT_SYMBOL(blk_mq_start_request);
......@@ -1656,6 +1658,10 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *+* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments); // set
}
blk_init_request_from_bio(rq, bio);
blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
......@@ -1816,12 +1822,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *0* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments); // not yet set
}
blk_queue_bounce(q, &bio);
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *1* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments); // not yet set
}
blk_queue_split(q, &bio);
if (!bio_integrity_prep(bio))
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *2* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments); // already set
}
if (!bio_integrity_prep(bio))
return BLK_QC_T_NONE;
if (!is_flush_fua && !blk_queue_nomerges(q) &&
......@@ -1834,8 +1849,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio, NULL);
trace_block_getrq(q, bio, bio->bi_opf);
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *3* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments);
}
rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *4* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments);
}
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
......@@ -1848,6 +1869,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
cookie = request_to_qc_t(data.hctx, rq);
plug = current->plug;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *5* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments); // already set
}
if (unlikely(is_flush_fua)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
......@@ -2567,6 +2591,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q)
{
// if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *-* ");
// }
/* mark the queue as mq asap */
q->mq_ops = set->ops;
......
......@@ -551,15 +551,15 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc)
memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
}
/*
* Next, the S/G list.
*/
n_elem = 0;
ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
dev_dbg(ap->dev, "page_link = 0x%08lx, offs = 0x%08x, length=0x%08x, dma_address=0x%08x n_elem = %u\n",\
qc->sg->page_link, qc->sg->offset, qc->sg->length, qc->sg->dma_address, n_elem );
if (qc && qc->sg) { // some nulls here cause crash on reboot
dev_dbg(ap->dev, "page_link = 0x%08lx, offs = 0x%08x, length=0x%08x, dma_address=0x%08x n_elem = %u\n",\
qc->sg->page_link, qc->sg->offset, qc->sg->length, qc->sg->dma_address, qc->n_elem );
}
if (qc->flags & ATA_QCFLAG_DMAMAP) {
for_each_sg(qc->sg, sg, qc->n_elem, n_elem) {
dma_addr_t addr = sg_dma_address(sg);
......@@ -568,9 +568,10 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc)
ahci_sg[n_elem].addr = cpu_to_le32(addr & 0xffffffff);
ahci_sg[n_elem].addr_hi = cpu_to_le32((addr >> 16) >> 16);
ahci_sg[n_elem].flags_size = cpu_to_le32(sg_len - 1);
dev_dbg(ap->dev, "n_elem = %u, addr = 0x%x, len=%u, .addr=0x%08x .add_hi=0x%08x flags_size=0x%x\n",\
n_elem, addr, sg_len, ahci_sg[n_elem].addr, ahci_sg[n_elem].addr_hi, ahci_sg[n_elem].flags_size);
if (n_elem < 10) { // limiting log size
dev_dbg(ap->dev, "n_elem = %u, addr = 0x%x, len=%u, .addr=0x%08x .add_hi=0x%08x flags_size=0x%x\n",\
n_elem, addr, sg_len, ahci_sg[n_elem].addr, ahci_sg[n_elem].addr_hi, ahci_sg[n_elem].flags_size);
}
}
}
......@@ -592,19 +593,20 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc)
struct elphel_ahci_priv *dpriv = dev_get_dpriv(ap->dev);
set_dscope_tstamp(dpriv, TSTMP_CMD_SYS);
if ((qc->tf.command==ATA_CMD_WRITE) || (qc->tf.command==ATA_CMD_WRITE)) {
dev_dbg(ap->dev, "cmd = 0x%x, lba = %lu, nsect = %u\n",
qc->tf.command,
((long)(qc->tf.lbal + (qc->tf.lbam << 8)+ (qc->tf.lbah << 16))) +
(((long) (qc->tf.device & 0xf)) << 24),
qc->tf.nsect);
} else {
dev_dbg(ap->dev, "cmd = 0x%x, lba = %lu, nsect = %u\n",
qc->tf.command,
((long)(qc->tf.lbal + (qc->tf.lbam << 8)+ (qc->tf.lbah << 16))) +
(((long) (qc->tf.hob_lbal + (qc->tf.hob_lbam << 8)+ (qc->tf.hob_lbah << 16))) << 24),
qc->tf.nsect + (qc->tf.hob_nsect << 8));
if (qc) {
if ((qc->tf.command==ATA_CMD_WRITE) || (qc->tf.command==ATA_CMD_WRITE)) {
dev_dbg(ap->dev, "cmd = 0x%x, lba = %lu, nsect = %u\n",
qc->tf.command,
((long)(qc->tf.lbal + (qc->tf.lbam << 8)+ (qc->tf.lbah << 16))) +
(((long) (qc->tf.device & 0xf)) << 24),
qc->tf.nsect);
} else {
dev_dbg(ap->dev, "cmd = 0x%x, lba = %lu, nsect = %u\n",
qc->tf.command,
((long)(qc->tf.lbal + (qc->tf.lbam << 8)+ (qc->tf.lbah << 16))) +
(((long) (qc->tf.hob_lbal + (qc->tf.hob_lbam << 8)+ (qc->tf.hob_lbah << 16))) << 24),
qc->tf.nsect + (qc->tf.hob_nsect << 8));
}
}
//same
dev_dbg(ap->dev, "cmd = 0x%x, lba = %u, nob_lba = %u, nsect = %u, nob_nsect = %u\n",
......
......@@ -1281,7 +1281,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
unsigned long jiffies_at_alloc;
int retries;
pr_debug("blk_rq_is_scsi(rq) = %d",blk_rq_is_scsi(rq));
if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
flags |= SCMD_INITIALIZED;
scsi_initialize_rq(rq);
......@@ -1360,7 +1360,7 @@ static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
cmd->sc_data_direction = DMA_TO_DEVICE;
else
cmd->sc_data_direction = DMA_FROM_DEVICE;
pr_debug("blk_rq_is_scsi(req)=%d, cmd->sc_data_direction=%d", blk_rq_is_scsi(req), cmd->sc_data_direction);
if (blk_rq_is_scsi(req))
return scsi_setup_scsi_cmnd(sdev, req);
else
......@@ -2038,7 +2038,8 @@ static int scsi_mq_prep_fn(struct request *req)
}
blk_mq_start_request(req);
pr_debug("shost->hostt->cmd_size=%u, sizeof(struct scsi_cmnd)=%u, scsi_host_get_prot(shost)=0x%08x",\
shost->hostt->cmd_size, sizeof(struct scsi_cmnd), scsi_host_get_prot(shost));
return scsi_setup_cmnd(sdev, req);
}
......
......@@ -241,7 +241,16 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
// bio.bi_disk, bio.bi_iter.bi_sector, bio.bi_iter.bi_size, bio.bi_private, bio.bi_vcnt, bio.bi_max_vecs, bio.bi_io_vec, bio.bi_pool);
pr_debug("bi_disk=0x%08x, bi_sector=%llu, bi_size=%u, bi_private=0x%08x, bi_vcnt=%u, bi_max_vecs=%u, bi_io_vec=0x%08x, bi_pool=0x%08x",
(int)(bio.bi_disk), bio.bi_iter.bi_sector, bio.bi_iter.bi_size, (int)(bio.bi_private), bio.bi_vcnt, bio.bi_max_vecs, (int) (bio.bi_io_vec), (int) (bio.bi_pool ));
pr_debug("bi_phys_segments=0x%08x",bio.bi_phys_segments);
// for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
bio_for_each_segment_all(bvec, &bio, i) {
if (i < 10){
pr_debug("%3u: bvec->bv_page=0x%08x, phys=0x%08x bv_len=%#x, bv_offset=%#x, bvec=0x%x",\
i, (int) (bvec->bv_page), (int)(page_to_phys(bvec->bv_page)), bvec->bv_len, bvec->bv_offset, (int) bvec);
}
}
pr_debug("ARCH_PFN_OFFSET =0x%08lx, PHYS_PFN_OFFSET=0x%08lx, mem_map=0x%08x, bi_phys_segments=0x%08x", \
ARCH_PFN_OFFSET, PHYS_PFN_OFFSET, (int) mem_map, bio.bi_phys_segments);
qc = submit_bio(&bio);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
......@@ -252,11 +261,11 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
io_schedule();
}
__set_current_state(TASK_RUNNING);
pr_debug("after __set_current_state(TASK_RUNNING), sizeof(struct page)=%d",sizeof(struct page));
pr_debug("after __set_current_state(TASK_RUNNING), bi_vcnt=%u, sizeof(struct page)=%d, ",bio.bi_vcnt, sizeof(struct page));
bio_for_each_segment_all(bvec, &bio, i) {
if (should_dirty && !PageCompound(bvec->bv_page))
set_page_dirty_lock(bvec->bv_page);
pr_debug("%3u: bvec->bv_page=0x%08x, phys=0x%08x bv_len=%#x, bv_offset=%#x, bvec=0x%x",i, (int) (bvec->bv_page), (int)(page_to_phys(bvec->bv_page)), bvec->bv_len, bvec->bv_offset, (int) bvec);
if (i < 10){pr_debug("%3u: bvec->bv_page=0x%08x, phys=0x%08x bv_len=%#x, bv_offset=%#x, bvec=0x%x",i, (int) (bvec->bv_page), (int)(page_to_phys(bvec->bv_page)), bvec->bv_len, bvec->bv_offset, (int) bvec);}
put_page(bvec->bv_page);
if (i < 10){
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, bvec->bv_page, sizeof(struct page));
......
......@@ -1192,7 +1192,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
{
if (maxsize > i->count)
maxsize = i->count;
pr_debug("maxsize = %d, maxpages=%d, start (output) = 0x%x ", maxsize, maxpages, *start);
pr_debug("maxsize = %d, maxpages=%d, count=%d, start (output) = 0x%x ", maxsize, maxpages, i->count, *start);
if (unlikely(i->type & ITER_PIPE))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
iterate_all_kinds(i, maxsize, v, ({
......@@ -1226,6 +1226,42 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
}
EXPORT_SYMBOL(iov_iter_get_pages);
ssize_t iov_iter_get_pages_elphel(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
// temporary - fixed physical addresses
unsigned long circbuf_pa [4]= {0x0ae00000, 0x0ee00000, 0x12e00000, 0x16e00000};
unsigned long circbuf_pages = 0x4000; // per channel
int indx, n; // page index
// just testing, not thread -safe, not locking
const struct page * circbuf0_page0 = phys_to_page(circbuf_pa[0]);
size_t len;
const struct iovec *iov = i->iov; // only use first in array - supposed to be a single-element
unsigned long addr = (unsigned long) iov->iov_base;
// *** Putting temporary address here
struct page * cur_page = (struct page * ) circbuf0_page0;
addr = 0;
len = iov->iov_len + (*start = addr & (PAGE_SIZE - 1)); // to inlude beginning of the page - for now 0;
if (maxsize > i->count)
maxsize = i->count;
if (len > maxpages * PAGE_SIZE)
len = maxpages * PAGE_SIZE;
addr &= ~(PAGE_SIZE - 1);
n = DIV_ROUND_UP(len, PAGE_SIZE);
for (indx = 0; indx < n; indx++){
pages[indx] = cur_page++;
}
len -= *start;
pr_debug("iov_iter_get_pages_elphel()->0x%08x, n=%u", len, n);
return len;
}
EXPORT_SYMBOL(iov_iter_get_pages_elphel);
static struct page **get_pages_array(size_t n)
{
return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment