Commit d3ca86d1 authored by Andrey Filippov's avatar Andrey Filippov

more debugging, duplicated/edited kernel files

parent 531d1367
......@@ -2466,7 +2466,7 @@ blk_qc_t generic_make_request(struct bio *bio)
}
}
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *{* state before blk_mq_make_request(): bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x, enter_succeeded=%d",bio->bi_flags, bio->bi_phys_segments, enter_succeeded);
pr_debug(" *{* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x, enter_succeeded=%d",bio->bi_flags, bio->bi_phys_segments, enter_succeeded);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, bio, sizeof(struct bio));
}
if (enter_succeeded) {
......@@ -3717,14 +3717,18 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
pr_debug("enter");
flush_plug_callbacks(plug, from_schedule);
if (!list_empty(&plug->mq_list))
if (!list_empty(&plug->mq_list)) {
pr_debug("mq_list not empty");
blk_mq_flush_plug_list(plug, from_schedule);
}
if (list_empty(&plug->list))
if (list_empty(&plug->list)) {
pr_debug("list is empty");
return;
}
list_splice_init(&plug->list, &list);
......@@ -3734,6 +3738,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
depth = 0;
while (!list_empty(&list)) {
pr_debug("while");
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
BUG_ON(!rq->q);
......@@ -3772,14 +3777,16 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
*/
if (q)
queue_unplugged(q, depth, from_schedule);
pr_debug("exit");
}
void blk_finish_plug(struct blk_plug *plug)
{
if (plug != current->plug)
return;
pr_debug(" ** ** ");
blk_flush_plug_list(plug, false);
current->plug = NULL;
}
EXPORT_SYMBOL(blk_finish_plug);
......
......@@ -554,7 +554,7 @@ static void __blk_mq_complete_request_remote(void *data)
rq->q->softirq_done_fn(rq);
}
static void __blk_mq_complete_request(struct request *rq)
static void __blk_mq_complete_request(struct request *rq) // called from interrupt when the (SATA) command is finished
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
bool shared = false;
......@@ -615,6 +615,7 @@ static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
**/
void blk_mq_complete_request(struct request *rq)
{
pr_debug(" ->*<-");
if (unlikely(blk_should_fake_timeout(rq->q)))
return;
__blk_mq_complete_request(rq);
......@@ -1286,7 +1287,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
cpumask_empty(hctx->cpumask) ? "inactive": "active");
dump_stack();
}
pr_debug("__blk_mq_run_hw_queue");
pr_debug("__blk_mq_run_hw_queue, hctx->flags=0x%08lx, may sleep = 0x%08lx", hctx->flags, (hctx->flags & BLK_MQ_F_BLOCKING));
/*
* We can't run the queue inline with ints disabled. Ensure that
* we catch bad users of this early.
......@@ -1667,6 +1668,10 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
blk_account_io_start(rq, true);
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *++* bio->bi_flags=0x%04x, bio->bi_phys_segments=0x%08x",bio->bi_flags, bio->bi_phys_segments); // set
}
}
static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
......@@ -1940,7 +1945,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true, true);
}
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *6* cookie=0x%08x",cookie);
}
return cookie;
}
......
......@@ -594,13 +594,31 @@ static void elphel_qc_prep(struct ata_queued_cmd *qc)
struct elphel_ahci_priv *dpriv = dev_get_dpriv(ap->dev);
set_dscope_tstamp(dpriv, TSTMP_CMD_SYS);
if (qc) {
if ((qc->tf.command==ATA_CMD_WRITE) || (qc->tf.command==ATA_CMD_WRITE)) {
if (qc->tf.command==ATA_CMD_WRITE) { // 28-bit) Separate debug to be able to turn on/off individually
dev_dbg(ap->dev, "cmd = 0x%x, lba = %lu, nsect = %u\n",
qc->tf.command,
((long)(qc->tf.lbal + (qc->tf.lbam << 8)+ (qc->tf.lbah << 16))) +
(((long) (qc->tf.device & 0xf)) << 24),
qc->tf.nsect);
} else {
} else if (qc->tf.command==ATA_CMD_READ) { // 28-bit)
dev_dbg(ap->dev, "cmd = 0x%x, lba = %lu, nsect = %u\n",
qc->tf.command,
((long)(qc->tf.lbal + (qc->tf.lbam << 8)+ (qc->tf.lbah << 16))) +
(((long) (qc->tf.device & 0xf)) << 24),
qc->tf.nsect);
} else if (qc->tf.command==ATA_CMD_WRITE_EXT) { // 48-bit
dev_dbg(ap->dev, "cmd = 0x%x, lba = %lu, nsect = %u\n",
qc->tf.command,
((long)(qc->tf.lbal + (qc->tf.lbam << 8)+ (qc->tf.lbah << 16))) +
(((long) (qc->tf.hob_lbal + (qc->tf.hob_lbam << 8)+ (qc->tf.hob_lbah << 16))) << 24),
qc->tf.nsect + (qc->tf.hob_nsect << 8));
} else if (qc->tf.command==ATA_CMD_READ_EXT) { // 48-bit
dev_dbg(ap->dev, "cmd = 0x%x, lba = %lu, nsect = %u\n",
qc->tf.command,
((long)(qc->tf.lbal + (qc->tf.lbam << 8)+ (qc->tf.lbah << 16))) +
(((long) (qc->tf.hob_lbal + (qc->tf.hob_lbam << 8)+ (qc->tf.hob_lbah << 16))) << 24),
qc->tf.nsect + (qc->tf.hob_nsect << 8));
} else { // other commands
dev_dbg(ap->dev, "cmd = 0x%x, lba = %lu, nsect = %u\n",
qc->tf.command,
((long)(qc->tf.lbal + (qc->tf.lbam << 8)+ (qc->tf.lbah << 16))) +
......
......@@ -1842,8 +1842,11 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
*/
static void scsi_done(struct scsi_cmnd *cmd)
{
pr_debug(">1<"); // never?
trace_scsi_dispatch_cmd_done(cmd);
pr_debug(">2<"); // never?
blk_complete_request(cmd->request);
pr_debug(">3<"); // never?
}
/*
......@@ -1868,7 +1871,7 @@ static void scsi_request_fn(struct request_queue *q)
struct Scsi_Host *shost;
struct scsi_cmnd *cmd;
struct request *req;
pr_debug("entered"); // never?
/*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
......@@ -1958,7 +1961,7 @@ static void scsi_request_fn(struct request_queue *q)
}
spin_lock_irq(q->queue_lock);
}
pr_debug("return");
return;
host_not_ready:
......@@ -1973,12 +1976,14 @@ static void scsi_request_fn(struct request_queue *q)
* cases (host limits or settings) should run the queue at some
* later time.
*/
pr_debug("not_ready");
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req);
atomic_dec(&sdev->device_busy);
out_delay:
if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
blk_delay_queue(q, SCSI_QUEUE_DELAY);
pr_debug("exit after not_ready/out delay");
}
static inline blk_status_t prep_to_mq(int ret)
......@@ -2006,7 +2011,7 @@ static int scsi_mq_prep_fn(struct request *req)
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct scatterlist *sg;
pr_debug("SCSI start async");
scsi_init_command(sdev, cmd);
req->special = cmd;
......@@ -2043,10 +2048,14 @@ static int scsi_mq_prep_fn(struct request *req)
return scsi_setup_cmnd(sdev, req);
}
static void scsi_mq_done(struct scsi_cmnd *cmd)
static void scsi_mq_done(struct scsi_cmnd *cmd) // called from interrupt
{
trace_scsi_dispatch_cmd_done(cmd);
pr_debug(" <1> ");
trace_scsi_dispatch_cmd_done(cmd); // DEFINE_EVENT
pr_debug(" <2> ");
blk_mq_complete_request(cmd->request);
pr_debug(" <3> ");
}
static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
......@@ -2088,8 +2097,9 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
blk_status_t ret;
int reason;
pr_debug("scsi_queue_rq()");
pr_debug("ENTERED, req->rq_flags=0x%08x", req->rq_flags);
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
pr_debug(" ->1<- ret=%d", ret);
if (ret != BLK_STS_OK)
goto out_put_budget;
......@@ -2098,31 +2108,32 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
goto out_put_budget;
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
pr_debug(" ->2<- queues are ready");
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = prep_to_mq(scsi_mq_prep_fn(req));
ret = prep_to_mq(scsi_mq_prep_fn(req)); // down to elphel_qc_prep
pr_debug(" ->3<- ret=%d", ret);
if (ret != BLK_STS_OK)
goto out_dec_host_busy;
req->rq_flags |= RQF_DONTPREP;
} else {
blk_mq_start_request(req);
pr_debug(" ->4<- ");
}
if (sdev->simple_tags)
cmd->flags |= SCMD_TAGGED;
else
cmd->flags &= ~SCMD_TAGGED;
scsi_init_cmd_errh(cmd);
cmd->scsi_done = scsi_mq_done;
cmd->scsi_done = scsi_mq_done; // set function to run on interrupt
pr_debug(" ->5<- ");
reason = scsi_dispatch_cmd(cmd);
pr_debug(" ->6<- reason=%d",reason);
if (reason) {
scsi_set_blocked(cmd, reason);
ret = BLK_STS_RESOURCE;
goto out_dec_host_busy;
}
pr_debug(" exiting with BLK_STS_OK ");
return BLK_STS_OK;
out_dec_host_busy:
......@@ -2150,6 +2161,7 @@ out_put_budget:
scsi_mq_uninit_cmd(cmd);
break;
}
pr_debug(" exiting with ret=%d", ret);
return ret;
}
......@@ -2168,7 +2180,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
const bool unchecked_isa_dma = shost->unchecked_isa_dma;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
struct scatterlist *sg;
pr_debug("ENTEring");
if (unchecked_isa_dma)
cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
......@@ -2190,7 +2202,7 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
pr_debug(" enteRING");
scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
cmd->sense_buffer);
}
......@@ -2207,7 +2219,7 @@ static int scsi_map_queues(struct blk_mq_tag_set *set)
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
{
struct device *dev = shost->dma_dev;
pr_debug("starting");
/*
* this limit is imposed by hardware restrictions
*/
......@@ -2250,7 +2262,7 @@ static int scsi_old_init_rq(struct request_queue *q, struct request *rq,
struct Scsi_Host *shost = q->rq_alloc_data;
const bool unchecked_isa_dma = shost->unchecked_isa_dma;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
pr_debug("--starting OLD--");
memset(cmd, 0, sizeof(*cmd));
if (unchecked_isa_dma)
......@@ -2278,7 +2290,7 @@ fail:
static void scsi_old_exit_rq(struct request_queue *q, struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
pr_debug("-- exiting OLD--");
if (cmd->prot_sdb)
kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
......@@ -2289,7 +2301,7 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct request_queue *q;
pr_debug("-- allocating OLD--");
q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
if (!q)
return NULL;
......@@ -2457,6 +2469,7 @@ int __init scsi_init_queue(void)
void scsi_exit_queue(void)
{
pr_debug("-- exiting queue --");
kmem_cache_destroy(scsi_sense_cache);
kmem_cache_destroy(scsi_sense_isadma_cache);
kmem_cache_destroy(scsi_sdb_cache);
......@@ -3014,7 +3027,7 @@ static int scsi_request_fn_active(struct scsi_device *sdev)
static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
{
WARN_ON_ONCE(sdev->host->use_blk_mq);
pr_debug("-- waiting --");
while (scsi_request_fn_active(sdev))
msleep(20);
}
......
......@@ -176,10 +176,12 @@ static unsigned int dio_bio_write_op(struct kiocb *iocb)
#define DIO_INLINE_BIO_VECS BIO_MAX_PAGES // 4
static void blkdev_bio_end_io_simple(struct bio *bio)
static void blkdev_bio_end_io_simple(struct bio *bio) // called from interrupt
{
struct task_struct *waiter = bio->bi_private;
if (bio->bi_disk && (bio->bi_disk->major == 8)) { // sda, sda*
pr_debug(" *X* ");
}
WRITE_ONCE(bio->bi_private, NULL);
wake_up_process(waiter);
}
......@@ -220,8 +222,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
bio.bi_iter.bi_sector = pos >> 9;
bio.bi_write_hint = iocb->ki_hint;
bio.bi_private = current;
bio.bi_end_io = blkdev_bio_end_io_simple;
bio.bi_ioprio = iocb->ki_ioprio;
bio.bi_end_io = blkdev_bio_end_io_simple; // what to do on IO done?
bio.bi_ioprio = iocb->ki_ioprio; // ?
ret = bio_iov_iter_get_pages(&bio, iter); // normally returns 0
pr_debug("ret = %d, bio.bi_vcnt=%d", ret, bio.bi_vcnt); // -14
......@@ -257,17 +259,25 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
qc = submit_bio(&bio);
qc = submit_bio(&bio); // generic_make_request(&bio)
pr_debug("After qc = submit_bio(&bio), bio:");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, &bio, sizeof(struct bio));
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(bio.bi_private))
set_current_state(TASK_UNINTERRUPTIBLE); // ?
if (!READ_ONCE(bio.bi_private)) // set to current (process). Reset after - what?
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!blk_poll(bdev_get_queue(bdev), qc))
io_schedule();
if (!(iocb->ki_flags & IOCB_HIPRI) || // Is it correct? Should it be ((iocb->ki_flags & IOCB_HIPRI) || !blk_poll(bdev_get_queue(bdev), qc))?
!blk_poll(bdev_get_queue(bdev), qc)) { // (bdev->bd_disk->queue, qc) Does not have // disk queue
pr_debug("before io_schedule: iocb->ki_flags=0x%08x", iocb->ki_flags);
if (bio.bi_disk && (bio.bi_disk->major == 8) && (iocb->ki_flags & IOCB_DIRECT) && (iocb->ki_flags & IOCB_NOWAIT)) { // sda, sda*
io_schedule_elphel();
} else {
io_schedule();
}
pr_debug("after io_schedule, bi_private=0x%08x", (int) bio.bi_private);
}
}
pr_debug("before __set_current_state(TASK_RUNNING)"); // first after wait
__set_current_state(TASK_RUNNING);
pr_debug("after __set_current_state(TASK_RUNNING), bi_vcnt=%u, sizeof(struct page)=%d, ",bio.bi_vcnt, sizeof(struct page));
bio_for_each_segment_all(bvec, &bio, i) {
......@@ -364,7 +374,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
int ret = 0;
pr_debug("pos = %lld, nr_pages = %d", pos, nr_pages);
pr_debug("pos = %lld, nr_pages = %d, is_sync=%d", pos, nr_pages, is_sync_kiocb(iocb));
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1)) {
pr_debug("pos = %lld, iov_iter_alignment(iter) = %ld, nr_pages = %d, bdev_logical_block_size(bdev) = %d", pos, iov_iter_alignment(iter), nr_pages, bdev_logical_block_size(bdev));
......@@ -1930,11 +1940,11 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
loff_t size = i_size_read(bd_inode);
struct blk_plug plug;
ssize_t ret;
pr_debug("pos=%llu, size=%llu", iocb->ki_pos, size);
if (!strncmp(file->f_path.dentry->d_name.name, "sda2", 4)) {
pr_debug("pos=%llu, size=%llu, ki_flags=0x%08x", iocb->ki_pos, size, iocb->ki_flags);
}
if (bdev_read_only(I_BDEV(bd_inode)))
return -EPERM;
if (!iov_iter_count(from))
return 0;
......@@ -1943,12 +1953,23 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
return -EOPNOTSUPP;
iov_iter_truncate(from, size - iocb->ki_pos);
if (!strncmp(file->f_path.dentry->d_name.name, "sda2", 4)) {
pr_debug("Calling blk_start_plug. Disabled if it is continuing existing plug. current->plug = 0x%08x", (int) current-> plug);
}
blk_start_plug(&plug);
// pr_debug("after blk_start_plug()");
ret = __generic_file_write_iter(iocb, from);
if (ret > 0)
if (ret > 0) {
ret = generic_write_sync(iocb, ret);
if (!strncmp(file->f_path.dentry->d_name.name, "sda2", 4)) {
pr_debug("synchronizing written, ret=%d",ret);
}
}
// pr_debug("before blk_finish_plug()");
if (!strncmp(file->f_path.dentry->d_name.name, "sda2", 4)) {
pr_debug("Calling blk_finish_plug. Disable it???");
}
blk_finish_plug(&plug);
// pr_debug("after blk_finish_plug()");
return ret;
......
......@@ -471,7 +471,7 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
int debug_copied = 0;
if (!strncmp(filp->f_path.dentry->d_name.name, "sda2", 4)) {
pr_debug("pos=%llu, len=%d, buf=0x%p", *ppos, len, buf);
pr_debug("pos=%llu, len=%d, buf=0x%p, f_flags=0x%08x, f_mode=0x%08x", *ppos, len, buf, filp->f_flags, filp->f_mode);
}
init_sync_kiocb(&kiocb, filp);
kiocb.ki_pos = *ppos;
......@@ -487,6 +487,9 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
BUG_ON(ret == -EIOCBQUEUED);
if (ret > 0)
*ppos = kiocb.ki_pos;
if (!strncmp(filp->f_path.dentry->d_name.name, "sda2", 4)) {
pr_debug("new_sync_write() -> %d", ret);
}
return ret;
}
......@@ -497,7 +500,8 @@ ssize_t __vfs_write(struct file *file, const char __user *p, size_t count,
int debug_copied = 0;
if (!strncmp(file->f_path.dentry->d_name.name, "sda2", 4)) {
debug_copied = copy_from_user(&debug_data, p, 8);
pr_debug("pos=%llu, count=%d, p=0x%p, data=0x%llx, debug_copied=%d", *pos, count, p, debug_data, debug_copied);
pr_debug("pos=%llu, count=%d, p=0x%p, data=0x%llx, debug_copied=%d, file->f_op->write=0x%08x, file->f_op->write_iter=0x%08x",
*pos, count, p, debug_data, debug_copied, (int) file->f_op->write, (int) file->f_op->write_iter);
// pr_debug("pos=%llu, count=%d, p=0x%p, data=0x%llx", *pos, count, p, ((loff_t*) (p))[0]); // panic reading user space from kernel code
}
if (file->f_op->write)
......@@ -543,8 +547,10 @@ ssize_t kernel_write(struct file *file, const void *buf, size_t count,
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
res = vfs_write(file, (__force const char __user *)buf, count, pos);
if (!strncmp(file->f_path.dentry->d_name.name, "sda2", 4)) {
pr_debug("kernel_write() -> %d", res);
}
set_fs(old_fs);
return res;
}
EXPORT_SYMBOL(kernel_write);
......@@ -573,7 +579,9 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
inc_syscw(current);
file_end_write(file);
}
if (!strncmp(file->f_path.dentry->d_name.name, "sda2", 4)) {
pr_debug("vfs_write() -> %d", ret);
}
return ret;
}
......
This diff is collapsed.
......@@ -3229,6 +3229,11 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
pr_debug("iocb->ki_flags=0x%x", iocb->ki_flags);
}
if (iocb->ki_flags & IOCB_DIRECT) {
if (!strncmp(file->f_path.dentry->d_name.name, "sda2", 4)) {
iocb->ki_flags |= IOCB_NOWAIT;
pr_debug("Testing - adding IOCB_NOWAIT: iocb->ki_flags=0x%x", iocb->ki_flags);
}
loff_t pos, endbyte;
pr_debug("using IOCB_DIRECT, count = %d, type = %d",iov_iter_count(from), from->type);
written = generic_file_direct_write(iocb, from);
......@@ -3306,7 +3311,9 @@ ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
if (!strncmp(file->f_path.dentry->d_name.name, "sda2", 4)) {
pr_debug("pos=%llu, ki_flags=0x%08x", iocb->ki_pos, iocb->ki_flags);
}
inode_lock(inode);
ret = generic_write_checks(iocb, from);
if (ret > 0)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment