Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/fs/direct-io.c

  1 /*
  2  * fs/direct-io.c
  3  *
  4  * Copyright (C) 2002, Linus Torvalds.
  5  *
  6  * O_DIRECT
  7  *
  8  * 04Jul2002    Andrew Morton
  9  *              Initial version
 10  * 11Sep2002    janetinc@us.ibm.com
 11  *              added readv/writev support.
 12  * 29Oct2002    Andrew Morton
 13  *              rewrote bio_add_page() support.
 14  * 30Oct2002    pbadari@us.ibm.com
 15  *              added support for non-aligned IO.
 16  * 06Nov2002    pbadari@us.ibm.com
 17  *              added asynchronous IO support.
 18  * 21Jul2003    nathans@sgi.com
 19  *              added IO completion notifier.
 20  */
 21 
 22 #include <linux/kernel.h>
 23 #include <linux/module.h>
 24 #include <linux/types.h>
 25 #include <linux/fs.h>
 26 #include <linux/mm.h>
 27 #include <linux/slab.h>
 28 #include <linux/highmem.h>
 29 #include <linux/pagemap.h>
 30 #include <linux/task_io_accounting_ops.h>
 31 #include <linux/bio.h>
 32 #include <linux/wait.h>
 33 #include <linux/err.h>
 34 #include <linux/blkdev.h>
 35 #include <linux/buffer_head.h>
 36 #include <linux/rwsem.h>
 37 #include <linux/uio.h>
 38 #include <linux/atomic.h>
 39 #include <linux/prefetch.h>
 40 
 41 /*
 42  * How many user pages to map in one call to get_user_pages().  This determines
 43  * the size of a structure in the slab cache
 44  */
 45 #define DIO_PAGES       64
 46 
 47 /*
 48  * This code generally works in units of "dio_blocks".  A dio_block is
 49  * somewhere between the hard sector size and the filesystem block size.  it
 50  * is determined on a per-invocation basis.   When talking to the filesystem
 51  * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
 52  * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
 53  * to bio_block quantities by shifting left by blkfactor.
 54  *
 55  * If blkfactor is zero then the user's request was aligned to the filesystem's
 56  * blocksize.
 57  */
 58 
 59 /* dio_state only used in the submission path */
 60 
 61 struct dio_submit {
 62         struct bio *bio;                /* bio under assembly */
 63         unsigned blkbits;               /* doesn't change */
 64         unsigned blkfactor;             /* When we're using an alignment which
 65                                            is finer than the filesystem's soft
 66                                            blocksize, this specifies how much
 67                                            finer.  blkfactor=2 means 1/4-block
 68                                            alignment.  Does not change */
 69         unsigned start_zero_done;       /* flag: sub-blocksize zeroing has
 70                                            been performed at the start of a
 71                                            write */
 72         int pages_in_io;                /* approximate total IO pages */
 73         sector_t block_in_file;         /* Current offset into the underlying
 74                                            file in dio_block units. */
 75         unsigned blocks_available;      /* At block_in_file.  changes */
 76         int reap_counter;               /* rate limit reaping */
 77         sector_t final_block_in_request;/* doesn't change */
 78         int boundary;                   /* prev block is at a boundary */
 79         get_block_t *get_block;         /* block mapping function */
 80         dio_submit_t *submit_io;        /* IO submition function */
 81 
 82         loff_t logical_offset_in_bio;   /* current first logical block in bio */
 83         sector_t final_block_in_bio;    /* current final block in bio + 1 */
 84         sector_t next_block_for_io;     /* next block to be put under IO,
 85                                            in dio_blocks units */
 86 
 87         /*
 88          * Deferred addition of a page to the dio.  These variables are
 89          * private to dio_send_cur_page(), submit_page_section() and
 90          * dio_bio_add_page().
 91          */
 92         struct page *cur_page;          /* The page */
 93         unsigned cur_page_offset;       /* Offset into it, in bytes */
 94         unsigned cur_page_len;          /* Nr of bytes at cur_page_offset */
 95         sector_t cur_page_block;        /* Where it starts */
 96         loff_t cur_page_fs_offset;      /* Offset in file */
 97 
 98         struct iov_iter *iter;
 99         /*
100          * Page queue.  These variables belong to dio_refill_pages() and
101          * dio_get_page().
102          */
103         unsigned head;                  /* next page to process */
104         unsigned tail;                  /* last valid page + 1 */
105         size_t from, to;
106 };
107 
108 /* dio_state communicated between submission path and end_io */
109 struct dio {
110         int flags;                      /* doesn't change */
111         int op;
112         int op_flags;
113         blk_qc_t bio_cookie;
114         struct block_device *bio_bdev;
115         struct inode *inode;
116         loff_t i_size;                  /* i_size when submitted */
117         dio_iodone_t *end_io;           /* IO completion function */
118 
119         void *private;                  /* copy from map_bh.b_private */
120 
121         /* BIO completion state */
122         spinlock_t bio_lock;            /* protects BIO fields below */
123         int page_errors;                /* errno from get_user_pages() */
124         int is_async;                   /* is IO async ? */
125         bool defer_completion;          /* defer AIO completion to workqueue? */
126         bool should_dirty;              /* if pages should be dirtied */
127         int io_error;                   /* IO error in completion path */
128         unsigned long refcount;         /* direct_io_worker() and bios */
129         struct bio *bio_list;           /* singly linked via bi_private */
130         struct task_struct *waiter;     /* waiting task (NULL if none) */
131 
132         /* AIO related stuff */
133         struct kiocb *iocb;             /* kiocb */
134         ssize_t result;                 /* IO result */
135 
136         /*
137          * pages[] (and any fields placed after it) are not zeroed out at
138          * allocation time.  Don't add new fields after pages[] unless you
139          * wish that they not be zeroed.
140          */
141         union {
142                 struct page *pages[DIO_PAGES];  /* page buffer */
143                 struct work_struct complete_work;/* deferred AIO completion */
144         };
145 } ____cacheline_aligned_in_smp;
146 
147 static struct kmem_cache *dio_cache __read_mostly;
148 
149 /*
150  * How many pages are in the queue?
151  */
152 static inline unsigned dio_pages_present(struct dio_submit *sdio)
153 {
154         return sdio->tail - sdio->head;
155 }
156 
157 /*
158  * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
159  */
160 static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
161 {
162         ssize_t ret;
163 
164         ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
165                                 &sdio->from);
166 
167         if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
168                 struct page *page = ZERO_PAGE(0);
169                 /*
170                  * A memory fault, but the filesystem has some outstanding
171                  * mapped blocks.  We need to use those blocks up to avoid
172                  * leaking stale data in the file.
173                  */
174                 if (dio->page_errors == 0)
175                         dio->page_errors = ret;
176                 get_page(page);
177                 dio->pages[0] = page;
178                 sdio->head = 0;
179                 sdio->tail = 1;
180                 sdio->from = 0;
181                 sdio->to = PAGE_SIZE;
182                 return 0;
183         }
184 
185         if (ret >= 0) {
186                 iov_iter_advance(sdio->iter, ret);
187                 ret += sdio->from;
188                 sdio->head = 0;
189                 sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
190                 sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
191                 return 0;
192         }
193         return ret;     
194 }
195 
196 /*
197  * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
198  * buffered inside the dio so that we can call get_user_pages() against a
199  * decent number of pages, less frequently.  To provide nicer use of the
200  * L1 cache.
201  */
202 static inline struct page *dio_get_page(struct dio *dio,
203                                         struct dio_submit *sdio)
204 {
205         if (dio_pages_present(sdio) == 0) {
206                 int ret;
207 
208                 ret = dio_refill_pages(dio, sdio);
209                 if (ret)
210                         return ERR_PTR(ret);
211                 BUG_ON(dio_pages_present(sdio) == 0);
212         }
213         return dio->pages[sdio->head];
214 }
215 
216 /**
217  * dio_complete() - called when all DIO BIO I/O has been completed
218  * @offset: the byte offset in the file of the completed operation
219  *
220  * This drops i_dio_count, lets interested parties know that a DIO operation
221  * has completed, and calculates the resulting return code for the operation.
222  *
223  * It lets the filesystem know if it registered an interest earlier via
224  * get_block.  Pass the private field of the map buffer_head so that
225  * filesystems can use it to hold additional state between get_block calls and
226  * dio_complete.
227  */
228 static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
229 {
230         loff_t offset = dio->iocb->ki_pos;
231         ssize_t transferred = 0;
232 
233         /*
234          * AIO submission can race with bio completion to get here while
235          * expecting to have the last io completed by bio completion.
236          * In that case -EIOCBQUEUED is in fact not an error we want
237          * to preserve through this call.
238          */
239         if (ret == -EIOCBQUEUED)
240                 ret = 0;
241 
242         if (dio->result) {
243                 transferred = dio->result;
244 
245                 /* Check for short read case */
246                 if ((dio->op == REQ_OP_READ) &&
247                     ((offset + transferred) > dio->i_size))
248                         transferred = dio->i_size - offset;
249                 /* ignore EFAULT if some IO has been done */
250                 if (unlikely(ret == -EFAULT) && transferred)
251                         ret = 0;
252         }
253 
254         if (ret == 0)
255                 ret = dio->page_errors;
256         if (ret == 0)
257                 ret = dio->io_error;
258         if (ret == 0)
259                 ret = transferred;
260 
261         if (dio->end_io) {
262                 int err;
263 
264                 // XXX: ki_pos??
265                 err = dio->end_io(dio->iocb, offset, ret, dio->private);
266                 if (err)
267                         ret = err;
268         }
269 
270         if (!(dio->flags & DIO_SKIP_DIO_COUNT))
271                 inode_dio_end(dio->inode);
272 
273         if (is_async) {
274                 /*
275                  * generic_write_sync expects ki_pos to have been updated
276                  * already, but the submission path only does this for
277                  * synchronous I/O.
278                  */
279                 dio->iocb->ki_pos += transferred;
280 
281                 if (dio->op == REQ_OP_WRITE)
282                         ret = generic_write_sync(dio->iocb,  transferred);
283                 dio->iocb->ki_complete(dio->iocb, ret, 0);
284         }
285 
286         kmem_cache_free(dio_cache, dio);
287         return ret;
288 }
289 
290 static void dio_aio_complete_work(struct work_struct *work)
291 {
292         struct dio *dio = container_of(work, struct dio, complete_work);
293 
294         dio_complete(dio, 0, true);
295 }
296 
297 static int dio_bio_complete(struct dio *dio, struct bio *bio);
298 
299 /*
300  * Asynchronous IO callback. 
301  */
302 static void dio_bio_end_aio(struct bio *bio)
303 {
304         struct dio *dio = bio->bi_private;
305         unsigned long remaining;
306         unsigned long flags;
307 
308         /* cleanup the bio */
309         dio_bio_complete(dio, bio);
310 
311         spin_lock_irqsave(&dio->bio_lock, flags);
312         remaining = --dio->refcount;
313         if (remaining == 1 && dio->waiter)
314                 wake_up_process(dio->waiter);
315         spin_unlock_irqrestore(&dio->bio_lock, flags);
316 
317         if (remaining == 0) {
318                 if (dio->result && dio->defer_completion) {
319                         INIT_WORK(&dio->complete_work, dio_aio_complete_work);
320                         queue_work(dio->inode->i_sb->s_dio_done_wq,
321                                    &dio->complete_work);
322                 } else {
323                         dio_complete(dio, 0, true);
324                 }
325         }
326 }
327 
328 /*
329  * The BIO completion handler simply queues the BIO up for the process-context
330  * handler.
331  *
332  * During I/O bi_private points at the dio.  After I/O, bi_private is used to
333  * implement a singly-linked list of completed BIOs, at dio->bio_list.
334  */
335 static void dio_bio_end_io(struct bio *bio)
336 {
337         struct dio *dio = bio->bi_private;
338         unsigned long flags;
339 
340         spin_lock_irqsave(&dio->bio_lock, flags);
341         bio->bi_private = dio->bio_list;
342         dio->bio_list = bio;
343         if (--dio->refcount == 1 && dio->waiter)
344                 wake_up_process(dio->waiter);
345         spin_unlock_irqrestore(&dio->bio_lock, flags);
346 }
347 
348 /**
349  * dio_end_io - handle the end io action for the given bio
350  * @bio: The direct io bio thats being completed
351  * @error: Error if there was one
352  *
353  * This is meant to be called by any filesystem that uses their own dio_submit_t
354  * so that the DIO specific endio actions are dealt with after the filesystem
355  * has done it's completion work.
356  */
357 void dio_end_io(struct bio *bio, int error)
358 {
359         struct dio *dio = bio->bi_private;
360 
361         if (dio->is_async)
362                 dio_bio_end_aio(bio);
363         else
364                 dio_bio_end_io(bio);
365 }
366 EXPORT_SYMBOL_GPL(dio_end_io);
367 
368 static inline void
369 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
370               struct block_device *bdev,
371               sector_t first_sector, int nr_vecs)
372 {
373         struct bio *bio;
374 
375         /*
376          * bio_alloc() is guaranteed to return a bio when called with
377          * __GFP_RECLAIM and we request a valid number of vectors.
378          */
379         bio = bio_alloc(GFP_KERNEL, nr_vecs);
380 
381         bio->bi_bdev = bdev;
382         bio->bi_iter.bi_sector = first_sector;
383         bio_set_op_attrs(bio, dio->op, dio->op_flags);
384         if (dio->is_async)
385                 bio->bi_end_io = dio_bio_end_aio;
386         else
387                 bio->bi_end_io = dio_bio_end_io;
388 
389         sdio->bio = bio;
390         sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
391 }
392 
393 /*
394  * In the AIO read case we speculatively dirty the pages before starting IO.
395  * During IO completion, any of these pages which happen to have been written
396  * back will be redirtied by bio_check_pages_dirty().
397  *
398  * bios hold a dio reference between submit_bio and ->end_io.
399  */
400 static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
401 {
402         struct bio *bio = sdio->bio;
403         unsigned long flags;
404 
405         bio->bi_private = dio;
406 
407         spin_lock_irqsave(&dio->bio_lock, flags);
408         dio->refcount++;
409         spin_unlock_irqrestore(&dio->bio_lock, flags);
410 
411         if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
412                 bio_set_pages_dirty(bio);
413 
414         dio->bio_bdev = bio->bi_bdev;
415 
416         if (sdio->submit_io) {
417                 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
418                 dio->bio_cookie = BLK_QC_T_NONE;
419         } else
420                 dio->bio_cookie = submit_bio(bio);
421 
422         sdio->bio = NULL;
423         sdio->boundary = 0;
424         sdio->logical_offset_in_bio = 0;
425 }
426 
427 /*
428  * Release any resources in case of a failure
429  */
430 static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
431 {
432         while (sdio->head < sdio->tail)
433                 put_page(dio->pages[sdio->head++]);
434 }
435 
436 /*
437  * Wait for the next BIO to complete.  Remove it and return it.  NULL is
438  * returned once all BIOs have been completed.  This must only be called once
439  * all bios have been issued so that dio->refcount can only decrease.  This
440  * requires that that the caller hold a reference on the dio.
441  */
442 static struct bio *dio_await_one(struct dio *dio)
443 {
444         unsigned long flags;
445         struct bio *bio = NULL;
446 
447         spin_lock_irqsave(&dio->bio_lock, flags);
448 
449         /*
450          * Wait as long as the list is empty and there are bios in flight.  bio
451          * completion drops the count, maybe adds to the list, and wakes while
452          * holding the bio_lock so we don't need set_current_state()'s barrier
453          * and can call it after testing our condition.
454          */
455         while (dio->refcount > 1 && dio->bio_list == NULL) {
456                 __set_current_state(TASK_UNINTERRUPTIBLE);
457                 dio->waiter = current;
458                 spin_unlock_irqrestore(&dio->bio_lock, flags);
459                 if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
460                     !blk_mq_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
461                         io_schedule();
462                 /* wake up sets us TASK_RUNNING */
463                 spin_lock_irqsave(&dio->bio_lock, flags);
464                 dio->waiter = NULL;
465         }
466         if (dio->bio_list) {
467                 bio = dio->bio_list;
468                 dio->bio_list = bio->bi_private;
469         }
470         spin_unlock_irqrestore(&dio->bio_lock, flags);
471         return bio;
472 }
473 
474 /*
475  * Process one completed BIO.  No locks are held.
476  */
477 static int dio_bio_complete(struct dio *dio, struct bio *bio)
478 {
479         struct bio_vec *bvec;
480         unsigned i;
481         int err;
482 
483         if (bio->bi_error)
484                 dio->io_error = -EIO;
485 
486         if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
487                 err = bio->bi_error;
488                 bio_check_pages_dirty(bio);     /* transfers ownership */
489         } else {
490                 bio_for_each_segment_all(bvec, bio, i) {
491                         struct page *page = bvec->bv_page;
492 
493                         if (dio->op == REQ_OP_READ && !PageCompound(page) &&
494                                         dio->should_dirty)
495                                 set_page_dirty_lock(page);
496                         put_page(page);
497                 }
498                 err = bio->bi_error;
499                 bio_put(bio);
500         }
501         return err;
502 }
503 
504 /*
505  * Wait on and process all in-flight BIOs.  This must only be called once
506  * all bios have been issued so that the refcount can only decrease.
507  * This just waits for all bios to make it through dio_bio_complete.  IO
508  * errors are propagated through dio->io_error and should be propagated via
509  * dio_complete().
510  */
511 static void dio_await_completion(struct dio *dio)
512 {
513         struct bio *bio;
514         do {
515                 bio = dio_await_one(dio);
516                 if (bio)
517                         dio_bio_complete(dio, bio);
518         } while (bio);
519 }
520 
521 /*
522  * A really large O_DIRECT read or write can generate a lot of BIOs.  So
523  * to keep the memory consumption sane we periodically reap any completed BIOs
524  * during the BIO generation phase.
525  *
526  * This also helps to limit the peak amount of pinned userspace memory.
527  */
528 static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
529 {
530         int ret = 0;
531 
532         if (sdio->reap_counter++ >= 64) {
533                 while (dio->bio_list) {
534                         unsigned long flags;
535                         struct bio *bio;
536                         int ret2;
537 
538                         spin_lock_irqsave(&dio->bio_lock, flags);
539                         bio = dio->bio_list;
540                         dio->bio_list = bio->bi_private;
541                         spin_unlock_irqrestore(&dio->bio_lock, flags);
542                         ret2 = dio_bio_complete(dio, bio);
543                         if (ret == 0)
544                                 ret = ret2;
545                 }
546                 sdio->reap_counter = 0;
547         }
548         return ret;
549 }
550 
551 /*
552  * Create workqueue for deferred direct IO completions. We allocate the
553  * workqueue when it's first needed. This avoids creating workqueue for
554  * filesystems that don't need it and also allows us to create the workqueue
555  * late enough so the we can include s_id in the name of the workqueue.
556  */
557 int sb_init_dio_done_wq(struct super_block *sb)
558 {
559         struct workqueue_struct *old;
560         struct workqueue_struct *wq = alloc_workqueue("dio/%s",
561                                                       WQ_MEM_RECLAIM, 0,
562                                                       sb->s_id);
563         if (!wq)
564                 return -ENOMEM;
565         /*
566          * This has to be atomic as more DIOs can race to create the workqueue
567          */
568         old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
569         /* Someone created workqueue before us? Free ours... */
570         if (old)
571                 destroy_workqueue(wq);
572         return 0;
573 }
574 
575 static int dio_set_defer_completion(struct dio *dio)
576 {
577         struct super_block *sb = dio->inode->i_sb;
578 
579         if (dio->defer_completion)
580                 return 0;
581         dio->defer_completion = true;
582         if (!sb->s_dio_done_wq)
583                 return sb_init_dio_done_wq(sb);
584         return 0;
585 }
586 
587 /*
588  * Call into the fs to map some more disk blocks.  We record the current number
589  * of available blocks at sdio->blocks_available.  These are in units of the
590  * fs blocksize, (1 << inode->i_blkbits).
591  *
592  * The fs is allowed to map lots of blocks at once.  If it wants to do that,
593  * it uses the passed inode-relative block number as the file offset, as usual.
594  *
595  * get_block() is passed the number of i_blkbits-sized blocks which direct_io
596  * has remaining to do.  The fs should not map more than this number of blocks.
597  *
598  * If the fs has mapped a lot of blocks, it should populate bh->b_size to
599  * indicate how much contiguous disk space has been made available at
600  * bh->b_blocknr.
601  *
602  * If *any* of the mapped blocks are new, then the fs must set buffer_new().
603  * This isn't very efficient...
604  *
605  * In the case of filesystem holes: the fs may return an arbitrarily-large
606  * hole by returning an appropriate value in b_size and by clearing
607  * buffer_mapped().  However the direct-io code will only process holes one
608  * block at a time - it will repeatedly call get_block() as it walks the hole.
609  */
610 static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
611                            struct buffer_head *map_bh)
612 {
613         int ret;
614         sector_t fs_startblk;   /* Into file, in filesystem-sized blocks */
615         sector_t fs_endblk;     /* Into file, in filesystem-sized blocks */
616         unsigned long fs_count; /* Number of filesystem-sized blocks */
617         int create;
618         unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
619 
620         /*
621          * If there was a memory error and we've overwritten all the
622          * mapped blocks then we can now return that memory error
623          */
624         ret = dio->page_errors;
625         if (ret == 0) {
626                 BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
627                 fs_startblk = sdio->block_in_file >> sdio->blkfactor;
628                 fs_endblk = (sdio->final_block_in_request - 1) >>
629                                         sdio->blkfactor;
630                 fs_count = fs_endblk - fs_startblk + 1;
631 
632                 map_bh->b_state = 0;
633                 map_bh->b_size = fs_count << i_blkbits;
634 
635                 /*
636                  * For writes that could fill holes inside i_size on a
637                  * DIO_SKIP_HOLES filesystem we forbid block creations: only
638                  * overwrites are permitted. We will return early to the caller
639                  * once we see an unmapped buffer head returned, and the caller
640                  * will fall back to buffered I/O.
641                  *
642                  * Otherwise the decision is left to the get_blocks method,
643                  * which may decide to handle it or also return an unmapped
644                  * buffer head.
645                  */
646                 create = dio->op == REQ_OP_WRITE;
647                 if (dio->flags & DIO_SKIP_HOLES) {
648                         if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
649                                                         i_blkbits))
650                                 create = 0;
651                 }
652 
653                 ret = (*sdio->get_block)(dio->inode, fs_startblk,
654                                                 map_bh, create);
655 
656                 /* Store for completion */
657                 dio->private = map_bh->b_private;
658 
659                 if (ret == 0 && buffer_defer_completion(map_bh))
660                         ret = dio_set_defer_completion(dio);
661         }
662         return ret;
663 }
664 
665 /*
666  * There is no bio.  Make one now.
667  */
668 static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
669                 sector_t start_sector, struct buffer_head *map_bh)
670 {
671         sector_t sector;
672         int ret, nr_pages;
673 
674         ret = dio_bio_reap(dio, sdio);
675         if (ret)
676                 goto out;
677         sector = start_sector << (sdio->blkbits - 9);
678         nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
679         BUG_ON(nr_pages <= 0);
680         dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
681         sdio->boundary = 0;
682 out:
683         return ret;
684 }
685 
686 /*
687  * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
688  * that was successful then update final_block_in_bio and take a ref against
689  * the just-added page.
690  *
691  * Return zero on success.  Non-zero means the caller needs to start a new BIO.
692  */
693 static inline int dio_bio_add_page(struct dio_submit *sdio)
694 {
695         int ret;
696 
697         ret = bio_add_page(sdio->bio, sdio->cur_page,
698                         sdio->cur_page_len, sdio->cur_page_offset);
699         if (ret == sdio->cur_page_len) {
700                 /*
701                  * Decrement count only, if we are done with this page
702                  */
703                 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
704                         sdio->pages_in_io--;
705                 get_page(sdio->cur_page);
706                 sdio->final_block_in_bio = sdio->cur_page_block +
707                         (sdio->cur_page_len >> sdio->blkbits);
708                 ret = 0;
709         } else {
710                 ret = 1;
711         }
712         return ret;
713 }
714                 
715 /*
716  * Put cur_page under IO.  The section of cur_page which is described by
717  * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
718  * starts on-disk at cur_page_block.
719  *
720  * We take a ref against the page here (on behalf of its presence in the bio).
721  *
722  * The caller of this function is responsible for removing cur_page from the
723  * dio, and for dropping the refcount which came from that presence.
724  */
725 static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
726                 struct buffer_head *map_bh)
727 {
728         int ret = 0;
729 
730         if (sdio->bio) {
731                 loff_t cur_offset = sdio->cur_page_fs_offset;
732                 loff_t bio_next_offset = sdio->logical_offset_in_bio +
733                         sdio->bio->bi_iter.bi_size;
734 
735                 /*
736                  * See whether this new request is contiguous with the old.
737                  *
738                  * Btrfs cannot handle having logically non-contiguous requests
739                  * submitted.  For example if you have
740                  *
741                  * Logical:  [0-4095][HOLE][8192-12287]
742                  * Physical: [0-4095]      [4096-8191]
743                  *
744                  * We cannot submit those pages together as one BIO.  So if our
745                  * current logical offset in the file does not equal what would
746                  * be the next logical offset in the bio, submit the bio we
747                  * have.
748                  */
749                 if (sdio->final_block_in_bio != sdio->cur_page_block ||
750                     cur_offset != bio_next_offset)
751                         dio_bio_submit(dio, sdio);
752         }
753 
754         if (sdio->bio == NULL) {
755                 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
756                 if (ret)
757                         goto out;
758         }
759 
760         if (dio_bio_add_page(sdio) != 0) {
761                 dio_bio_submit(dio, sdio);
762                 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
763                 if (ret == 0) {
764                         ret = dio_bio_add_page(sdio);
765                         BUG_ON(ret != 0);
766                 }
767         }
768 out:
769         return ret;
770 }
771 
772 /*
773  * An autonomous function to put a chunk of a page under deferred IO.
774  *
775  * The caller doesn't actually know (or care) whether this piece of page is in
776  * a BIO, or is under IO or whatever.  We just take care of all possible 
777  * situations here.  The separation between the logic of do_direct_IO() and
778  * that of submit_page_section() is important for clarity.  Please don't break.
779  *
780  * The chunk of page starts on-disk at blocknr.
781  *
782  * We perform deferred IO, by recording the last-submitted page inside our
783  * private part of the dio structure.  If possible, we just expand the IO
784  * across that page here.
785  *
786  * If that doesn't work out then we put the old page into the bio and add this
787  * page to the dio instead.
788  */
789 static inline int
790 submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
791                     unsigned offset, unsigned len, sector_t blocknr,
792                     struct buffer_head *map_bh)
793 {
794         int ret = 0;
795 
796         if (dio->op == REQ_OP_WRITE) {
797                 /*
798                  * Read accounting is performed in submit_bio()
799                  */
800                 task_io_account_write(len);
801         }
802 
803         /*
804          * Can we just grow the current page's presence in the dio?
805          */
806         if (sdio->cur_page == page &&
807             sdio->cur_page_offset + sdio->cur_page_len == offset &&
808             sdio->cur_page_block +
809             (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
810                 sdio->cur_page_len += len;
811                 goto out;
812         }
813 
814         /*
815          * If there's a deferred page already there then send it.
816          */
817         if (sdio->cur_page) {
818                 ret = dio_send_cur_page(dio, sdio, map_bh);
819                 put_page(sdio->cur_page);
820                 sdio->cur_page = NULL;
821                 if (ret)
822                         return ret;
823         }
824 
825         get_page(page);         /* It is in dio */
826         sdio->cur_page = page;
827         sdio->cur_page_offset = offset;
828         sdio->cur_page_len = len;
829         sdio->cur_page_block = blocknr;
830         sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
831 out:
832         /*
833          * If sdio->boundary then we want to schedule the IO now to
834          * avoid metadata seeks.
835          */
836         if (sdio->boundary) {
837                 ret = dio_send_cur_page(dio, sdio, map_bh);
838                 dio_bio_submit(dio, sdio);
839                 put_page(sdio->cur_page);
840                 sdio->cur_page = NULL;
841         }
842         return ret;
843 }
844 
845 /*
846  * If we are not writing the entire block and get_block() allocated
847  * the block for us, we need to fill-in the unused portion of the
848  * block with zeros. This happens only if user-buffer, fileoffset or
849  * io length is not filesystem block-size multiple.
850  *
851  * `end' is zero if we're doing the start of the IO, 1 at the end of the
852  * IO.
853  */
854 static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
855                 int end, struct buffer_head *map_bh)
856 {
857         unsigned dio_blocks_per_fs_block;
858         unsigned this_chunk_blocks;     /* In dio_blocks */
859         unsigned this_chunk_bytes;
860         struct page *page;
861 
862         sdio->start_zero_done = 1;
863         if (!sdio->blkfactor || !buffer_new(map_bh))
864                 return;
865 
866         dio_blocks_per_fs_block = 1 << sdio->blkfactor;
867         this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
868 
869         if (!this_chunk_blocks)
870                 return;
871 
872         /*
873          * We need to zero out part of an fs block.  It is either at the
874          * beginning or the end of the fs block.
875          */
876         if (end) 
877                 this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
878 
879         this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
880 
881         page = ZERO_PAGE(0);
882         if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
883                                 sdio->next_block_for_io, map_bh))
884                 return;
885 
886         sdio->next_block_for_io += this_chunk_blocks;
887 }
888 
889 /*
890  * Walk the user pages, and the file, mapping blocks to disk and generating
891  * a sequence of (page,offset,len,block) mappings.  These mappings are injected
892  * into submit_page_section(), which takes care of the next stage of submission
893  *
894  * Direct IO against a blockdev is different from a file.  Because we can
895  * happily perform page-sized but 512-byte aligned IOs.  It is important that
896  * blockdev IO be able to have fine alignment and large sizes.
897  *
898  * So what we do is to permit the ->get_block function to populate bh.b_size
899  * with the size of IO which is permitted at this offset and this i_blkbits.
900  *
901  * For best results, the blockdev should be set up with 512-byte i_blkbits and
902  * it should set b_size to PAGE_SIZE or more inside get_block().  This gives
903  * fine alignment but still allows this function to work in PAGE_SIZE units.
904  */
905 static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
906                         struct buffer_head *map_bh)
907 {
908         const unsigned blkbits = sdio->blkbits;
909         const unsigned i_blkbits = blkbits + sdio->blkfactor;
910         int ret = 0;
911 
912         while (sdio->block_in_file < sdio->final_block_in_request) {
913                 struct page *page;
914                 size_t from, to;
915 
916                 page = dio_get_page(dio, sdio);
917                 if (IS_ERR(page)) {
918                         ret = PTR_ERR(page);
919                         goto out;
920                 }
921                 from = sdio->head ? 0 : sdio->from;
922                 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
923                 sdio->head++;
924 
925                 while (from < to) {
926                         unsigned this_chunk_bytes;      /* # of bytes mapped */
927                         unsigned this_chunk_blocks;     /* # of blocks */
928                         unsigned u;
929 
930                         if (sdio->blocks_available == 0) {
931                                 /*
932                                  * Need to go and map some more disk
933                                  */
934                                 unsigned long blkmask;
935                                 unsigned long dio_remainder;
936 
937                                 ret = get_more_blocks(dio, sdio, map_bh);
938                                 if (ret) {
939                                         put_page(page);
940                                         goto out;
941                                 }
942                                 if (!buffer_mapped(map_bh))
943                                         goto do_holes;
944 
945                                 sdio->blocks_available =
946                                                 map_bh->b_size >> blkbits;
947                                 sdio->next_block_for_io =
948                                         map_bh->b_blocknr << sdio->blkfactor;
949                                 if (buffer_new(map_bh)) {
950                                         clean_bdev_aliases(
951                                                 map_bh->b_bdev,
952                                                 map_bh->b_blocknr,
953                                                 map_bh->b_size >> i_blkbits);
954                                 }
955 
956                                 if (!sdio->blkfactor)
957                                         goto do_holes;
958 
959                                 blkmask = (1 << sdio->blkfactor) - 1;
960                                 dio_remainder = (sdio->block_in_file & blkmask);
961 
962                                 /*
963                                  * If we are at the start of IO and that IO
964                                  * starts partway into a fs-block,
965                                  * dio_remainder will be non-zero.  If the IO
966                                  * is a read then we can simply advance the IO
967                                  * cursor to the first block which is to be
968                                  * read.  But if the IO is a write and the
969                                  * block was newly allocated we cannot do that;
970                                  * the start of the fs block must be zeroed out
971                                  * on-disk
972                                  */
973                                 if (!buffer_new(map_bh))
974                                         sdio->next_block_for_io += dio_remainder;
975                                 sdio->blocks_available -= dio_remainder;
976                         }
977 do_holes:
978                         /* Handle holes */
979                         if (!buffer_mapped(map_bh)) {
980                                 loff_t i_size_aligned;
981 
982                                 /* AKPM: eargh, -ENOTBLK is a hack */
983                                 if (dio->op == REQ_OP_WRITE) {
984                                         put_page(page);
985                                         return -ENOTBLK;
986                                 }
987 
988                                 /*
989                                  * Be sure to account for a partial block as the
990                                  * last block in the file
991                                  */
992                                 i_size_aligned = ALIGN(i_size_read(dio->inode),
993                                                         1 << blkbits);
994                                 if (sdio->block_in_file >=
995                                                 i_size_aligned >> blkbits) {
996                                         /* We hit eof */
997                                         put_page(page);
998                                         goto out;
999                                 }
1000                                 zero_user(page, from, 1 << blkbits);
1001                                 sdio->block_in_file++;
1002                                 from += 1 << blkbits;
1003                                 dio->result += 1 << blkbits;
1004                                 goto next_block;
1005                         }
1006 
1007                         /*
1008                          * If we're performing IO which has an alignment which
1009                          * is finer than the underlying fs, go check to see if
1010                          * we must zero out the start of this block.
1011                          */
1012                         if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
1013                                 dio_zero_block(dio, sdio, 0, map_bh);
1014 
1015                         /*
1016                          * Work out, in this_chunk_blocks, how much disk we
1017                          * can add to this page
1018                          */
1019                         this_chunk_blocks = sdio->blocks_available;
1020                         u = (to - from) >> blkbits;
1021                         if (this_chunk_blocks > u)
1022                                 this_chunk_blocks = u;
1023                         u = sdio->final_block_in_request - sdio->block_in_file;
1024                         if (this_chunk_blocks > u)
1025                                 this_chunk_blocks = u;
1026                         this_chunk_bytes = this_chunk_blocks << blkbits;
1027                         BUG_ON(this_chunk_bytes == 0);
1028 
1029                         if (this_chunk_blocks == sdio->blocks_available)
1030                                 sdio->boundary = buffer_boundary(map_bh);
1031                         ret = submit_page_section(dio, sdio, page,
1032                                                   from,
1033                                                   this_chunk_bytes,
1034                                                   sdio->next_block_for_io,
1035                                                   map_bh);
1036                         if (ret) {
1037                                 put_page(page);
1038                                 goto out;
1039                         }
1040                         sdio->next_block_for_io += this_chunk_blocks;
1041 
1042                         sdio->block_in_file += this_chunk_blocks;
1043                         from += this_chunk_bytes;
1044                         dio->result += this_chunk_bytes;
1045                         sdio->blocks_available -= this_chunk_blocks;
1046 next_block:
1047                         BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
1048                         if (sdio->block_in_file == sdio->final_block_in_request)
1049                                 break;
1050                 }
1051 
1052                 /* Drop the ref which was taken in get_user_pages() */
1053                 put_page(page);
1054         }
1055 out:
1056         return ret;
1057 }
1058 
1059 static inline int drop_refcount(struct dio *dio)
1060 {
1061         int ret2;
1062         unsigned long flags;
1063 
1064         /*
1065          * Sync will always be dropping the final ref and completing the
1066          * operation.  AIO can if it was a broken operation described above or
1067          * in fact if all the bios race to complete before we get here.  In
1068          * that case dio_complete() translates the EIOCBQUEUED into the proper
1069          * return code that the caller will hand to ->complete().
1070          *
1071          * This is managed by the bio_lock instead of being an atomic_t so that
1072          * completion paths can drop their ref and use the remaining count to
1073          * decide to wake the submission path atomically.
1074          */
1075         spin_lock_irqsave(&dio->bio_lock, flags);
1076         ret2 = --dio->refcount;
1077         spin_unlock_irqrestore(&dio->bio_lock, flags);
1078         return ret2;
1079 }
1080 
1081 /*
1082  * This is a library function for use by filesystem drivers.
1083  *
1084  * The locking rules are governed by the flags parameter:
1085  *  - if the flags value contains DIO_LOCKING we use a fancy locking
1086  *    scheme for dumb filesystems.
1087  *    For writes this function is called under i_mutex and returns with
1088  *    i_mutex held, for reads, i_mutex is not held on entry, but it is
1089  *    taken and dropped again before returning.
1090  *  - if the flags value does NOT contain DIO_LOCKING we don't use any
1091  *    internal locking but rather rely on the filesystem to synchronize
1092  *    direct I/O reads/writes versus each other and truncate.
1093  *
1094  * To help with locking against truncate we incremented the i_dio_count
1095  * counter before starting direct I/O, and decrement it once we are done.
1096  * Truncate can wait for it to reach zero to provide exclusion.  It is
1097  * expected that filesystem provide exclusion between new direct I/O
1098  * and truncates.  For DIO_LOCKING filesystems this is done by i_mutex,
1099  * but other filesystems need to take care of this on their own.
1100  *
1101  * NOTE: if you pass "sdio" to anything by pointer make sure that function
1102  * is always inlined. Otherwise gcc is unable to split the structure into
1103  * individual fields and will generate much worse code. This is important
1104  * for the whole file.
1105  */
1106 static inline ssize_t
1107 do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1108                       struct block_device *bdev, struct iov_iter *iter,
1109                       get_block_t get_block, dio_iodone_t end_io,
1110                       dio_submit_t submit_io, int flags)
1111 {
1112         unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
1113         unsigned blkbits = i_blkbits;
1114         unsigned blocksize_mask = (1 << blkbits) - 1;
1115         ssize_t retval = -EINVAL;
1116         size_t count = iov_iter_count(iter);
1117         loff_t offset = iocb->ki_pos;
1118         loff_t end = offset + count;
1119         struct dio *dio;
1120         struct dio_submit sdio = { 0, };
1121         struct buffer_head map_bh = { 0, };
1122         struct blk_plug plug;
1123         unsigned long align = offset | iov_iter_alignment(iter);
1124 
1125         /*
1126          * Avoid references to bdev if not absolutely needed to give
1127          * the early prefetch in the caller enough time.
1128          */
1129 
1130         if (align & blocksize_mask) {
1131                 if (bdev)
1132                         blkbits = blksize_bits(bdev_logical_block_size(bdev));
1133                 blocksize_mask = (1 << blkbits) - 1;
1134                 if (align & blocksize_mask)
1135                         goto out;
1136         }
1137 
1138         /* watch out for a 0 len io from a tricksy fs */
1139         if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
1140                 return 0;
1141 
1142         dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
1143         retval = -ENOMEM;
1144         if (!dio)
1145                 goto out;
1146         /*
1147          * Believe it or not, zeroing out the page array caused a .5%
1148          * performance regression in a database benchmark.  So, we take
1149          * care to only zero out what's needed.
1150          */
1151         memset(dio, 0, offsetof(struct dio, pages));
1152 
1153         dio->flags = flags;
1154         if (dio->flags & DIO_LOCKING) {
1155                 if (iov_iter_rw(iter) == READ) {
1156                         struct address_space *mapping =
1157                                         iocb->ki_filp->f_mapping;
1158 
1159                         /* will be released by direct_io_worker */
1160                         inode_lock(inode);
1161 
1162                         retval = filemap_write_and_wait_range(mapping, offset,
1163                                                               end - 1);
1164                         if (retval) {
1165                                 inode_unlock(inode);
1166                                 kmem_cache_free(dio_cache, dio);
1167                                 goto out;
1168                         }
1169                 }
1170         }
1171 
1172         /* Once we sampled i_size check for reads beyond EOF */
1173         dio->i_size = i_size_read(inode);
1174         if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1175                 if (dio->flags & DIO_LOCKING)
1176                         inode_unlock(inode);
1177                 kmem_cache_free(dio_cache, dio);
1178                 retval = 0;
1179                 goto out;
1180         }
1181 
1182         /*
1183          * For file extending writes updating i_size before data writeouts
1184          * complete can expose uninitialized blocks in dumb filesystems.
1185          * In that case we need to wait for I/O completion even if asked
1186          * for an asynchronous write.
1187          */
1188         if (is_sync_kiocb(iocb))
1189                 dio->is_async = false;
1190         else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
1191                  iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
1192                 dio->is_async = false;
1193         else
1194                 dio->is_async = true;
1195 
1196         dio->inode = inode;
1197         if (iov_iter_rw(iter) == WRITE) {
1198                 dio->op = REQ_OP_WRITE;
1199                 dio->op_flags = REQ_SYNC | REQ_IDLE;
1200         } else {
1201                 dio->op = REQ_OP_READ;
1202         }
1203 
1204         /*
1205          * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
1206          * so that we can call ->fsync.
1207          */
1208         if (dio->is_async && iov_iter_rw(iter) == WRITE &&
1209             ((iocb->ki_filp->f_flags & O_DSYNC) ||
1210              IS_SYNC(iocb->ki_filp->f_mapping->host))) {
1211                 retval = dio_set_defer_completion(dio);
1212                 if (retval) {
1213                         /*
1214                          * We grab i_mutex only for reads so we don't have
1215                          * to release it here
1216                          */
1217                         kmem_cache_free(dio_cache, dio);
1218                         goto out;
1219                 }
1220         }
1221 
1222         /*
1223          * Will be decremented at I/O completion time.
1224          */
1225         if (!(dio->flags & DIO_SKIP_DIO_COUNT))
1226                 inode_dio_begin(inode);
1227 
1228         retval = 0;
1229         sdio.blkbits = blkbits;
1230         sdio.blkfactor = i_blkbits - blkbits;
1231         sdio.block_in_file = offset >> blkbits;
1232 
1233         sdio.get_block = get_block;
1234         dio->end_io = end_io;
1235         sdio.submit_io = submit_io;
1236         sdio.final_block_in_bio = -1;
1237         sdio.next_block_for_io = -1;
1238 
1239         dio->iocb = iocb;
1240 
1241         spin_lock_init(&dio->bio_lock);
1242         dio->refcount = 1;
1243 
1244         dio->should_dirty = (iter->type == ITER_IOVEC);
1245         sdio.iter = iter;
1246         sdio.final_block_in_request =
1247                 (offset + iov_iter_count(iter)) >> blkbits;
1248 
1249         /*
1250          * In case of non-aligned buffers, we may need 2 more
1251          * pages since we need to zero out first and last block.
1252          */
1253         if (unlikely(sdio.blkfactor))
1254                 sdio.pages_in_io = 2;
1255 
1256         sdio.pages_in_io += iov_iter_npages(iter, INT_MAX);
1257 
1258         blk_start_plug(&plug);
1259 
1260         retval = do_direct_IO(dio, &sdio, &map_bh);
1261         if (retval)
1262                 dio_cleanup(dio, &sdio);
1263 
1264         if (retval == -ENOTBLK) {
1265                 /*
1266                  * The remaining part of the request will be
1267                  * be handled by buffered I/O when we return
1268                  */
1269                 retval = 0;
1270         }
1271         /*
1272          * There may be some unwritten disk at the end of a part-written
1273          * fs-block-sized block.  Go zero that now.
1274          */
1275         dio_zero_block(dio, &sdio, 1, &map_bh);
1276 
1277         if (sdio.cur_page) {
1278                 ssize_t ret2;
1279 
1280                 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1281                 if (retval == 0)
1282                         retval = ret2;
1283                 put_page(sdio.cur_page);
1284                 sdio.cur_page = NULL;
1285         }
1286         if (sdio.bio)
1287                 dio_bio_submit(dio, &sdio);
1288 
1289         blk_finish_plug(&plug);
1290 
1291         /*
1292          * It is possible that, we return short IO due to end of file.
1293          * In that case, we need to release all the pages we got hold on.
1294          */
1295         dio_cleanup(dio, &sdio);
1296 
1297         /*
1298          * All block lookups have been performed. For READ requests
1299          * we can let i_mutex go now that its achieved its purpose
1300          * of protecting us from looking up uninitialized blocks.
1301          */
1302         if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
1303                 inode_unlock(dio->inode);
1304 
1305         /*
1306          * The only time we want to leave bios in flight is when a successful
1307          * partial aio read or full aio write have been setup.  In that case
1308          * bio completion will call aio_complete.  The only time it's safe to
1309          * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
1310          * This had *better* be the only place that raises -EIOCBQUEUED.
1311          */
1312         BUG_ON(retval == -EIOCBQUEUED);
1313         if (dio->is_async && retval == 0 && dio->result &&
1314             (iov_iter_rw(iter) == READ || dio->result == count))
1315                 retval = -EIOCBQUEUED;
1316         else
1317                 dio_await_completion(dio);
1318 
1319         if (drop_refcount(dio) == 0) {
1320                 retval = dio_complete(dio, retval, false);
1321         } else
1322                 BUG_ON(retval != -EIOCBQUEUED);
1323 
1324 out:
1325         return retval;
1326 }
1327 
1328 ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1329                              struct block_device *bdev, struct iov_iter *iter,
1330                              get_block_t get_block,
1331                              dio_iodone_t end_io, dio_submit_t submit_io,
1332                              int flags)
1333 {
1334         /*
1335          * The block device state is needed in the end to finally
1336          * submit everything.  Since it's likely to be cache cold
1337          * prefetch it here as first thing to hide some of the
1338          * latency.
1339          *
1340          * Attempt to prefetch the pieces we likely need later.
1341          */
1342         prefetch(&bdev->bd_disk->part_tbl);
1343         prefetch(bdev->bd_queue);
1344         prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
1345 
1346         return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
1347                                      end_io, submit_io, flags);
1348 }
1349 
1350 EXPORT_SYMBOL(__blockdev_direct_IO);
1351 
1352 static __init int dio_init(void)
1353 {
1354         dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
1355         return 0;
1356 }
1357 module_init(dio_init)
1358 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us