Commit 09fa190d authored by Andrey Filippov's avatar Andrey Filippov

making single-command write

parent 8209132f
...@@ -852,7 +852,23 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) ...@@ -852,7 +852,23 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
size_t offset; size_t offset;
ssize_t size; ssize_t size;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda* if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *A* nr_pages=0x%04x,bio->bi_max_vecs=0x%04x, bio->bi_vcnt==0x%04x", nr_pages, bio->bi_max_vecs, bio->bi_vcnt); // *A* nr_pages=0x009f,bio->bi_max_vecs=0x009f, bio->bi_vcnt==0x0000 pr_debug(" *A* nr_pages=0x%04x,bio->bi_max_vecs=0x%04x, bio->bi_vcnt=0x%04x", nr_pages, bio->bi_max_vecs, bio->bi_vcnt); // *A* nr_pages=0x009f,bio->bi_max_vecs=0x009f, bio->bi_vcnt==0x0000
pr_debug(" *A0* max_hw_sectors=%d, max_dev_sectors=%d, max_sectors=%d, cluster=%d, max_segment_size=0x%08x",\
bio->bi_disk->queue->limits.max_hw_sectors, bio->bi_disk->queue->limits.max_dev_sectors, bio->bi_disk->queue->limits.max_sectors, bio->bi_disk->queue->limits.cluster, bio->bi_disk->queue->limits.max_segment_size);
// For now - just testing, not clear who/what sets // bio:__bio_iov_iter_get_pages:857: *A0* max_hw_sectors=65535, max_dev_sectors=65535, max_sectors=2560
// limited by BLK_DEF_MAX_SECTORS = 2560!
/* blk_queue_cluster(q) return q->limits.cluster; q->limits.max_segment_size
https://patchwork.kernel.org/project/linux-block/patch/21cf85d32278bbe5acbc3def0a6db75db98a2670.1459269590.git.shli@fb.com/
bio_alloc_bioset() allocates bvecs from bvec_slabs which can only
allocate maximum 256 bvec (eg, 1M for 4k pages). We can't bump
BLK_DEF_MAX_SECTORS to exceed this value otherwise bio_alloc_bioset will
fail.
*/
blk_queue_max_segment_size(bio->bi_disk->queue, 0x100000);
// blk_queue_max_hw_sectors(bio->bi_disk->queue, bio->bi_disk->queue->limits.max_hw_sectors); // still used constant
bio->bi_disk->queue->limits.max_sectors = min(bio->bi_disk->queue->limits.max_hw_sectors, bio->bi_disk->queue->limits.max_dev_sectors);
pr_debug(" *A1* max_hw_sectors=%d, max_dev_sectors=%d, max_sectors=%d, cluster=%d, max_segment_size=0x%08x",\
bio->bi_disk->queue->limits.max_hw_sectors, bio->bi_disk->queue->limits.max_dev_sectors, bio->bi_disk->queue->limits.max_sectors, bio->bi_disk->queue->limits.cluster, bio->bi_disk->queue->limits.max_segment_size);
pr_debug(" *B* niter->type=%d, iter->iov_offset=0x%08x, iter->count=0x%08x, iter->nr_segs=0x%08lx",\ pr_debug(" *B* niter->type=%d, iter->iov_offset=0x%08x, iter->count=0x%08x, iter->nr_segs=0x%08lx",\
iter->type, iter->iov_offset,iter->count,iter->nr_segs); // *B* niter->type=1, iter->iov_offset=0x00000000, iter->count=0x0009f000, iter->nr_segs=0x00000001 iter->type, iter->iov_offset,iter->count,iter->nr_segs); // *B* niter->type=1, iter->iov_offset=0x00000000, iter->count=0x0009f000, iter->nr_segs=0x00000001
} }
......
This diff is collapsed.
...@@ -1832,7 +1832,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1832,7 +1832,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} }
blk_queue_split(q, &bio); blk_queue_split(q, &bio);
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda* if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda* already split - 0xa segments (16 pages each)
pr_debug(" *2* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments); // already set pr_debug(" *2* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments); // already set
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment