Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/block/blk-flush.c

  1 /*
  2  * Functions to sequence FLUSH and FUA writes.
  3  *
  4  * Copyright (C) 2011           Max Planck Institute for Gravitational Physics
  5  * Copyright (C) 2011           Tejun Heo <tj@kernel.org>
  6  *
  7  * This file is released under the GPLv2.
  8  *
  9  * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
 10  * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
 11  * properties and hardware capability.
 12  *
 13  * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
 14  * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
 15  * that the device cache should be flushed before the data is executed, and
 16  * REQ_FUA means that the data must be on non-volatile media on request
 17  * completion.
 18  *
 19  * If the device doesn't have writeback cache, FLUSH and FUA don't make any
 20  * difference.  The requests are either completed immediately if there's no
 21  * data or executed as normal requests otherwise.
 22  *
 23  * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
 24  * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
 25  *
 26  * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
 27  * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
 28  *
 29  * The actual execution of flush is double buffered.  Whenever a request
 30  * needs to execute PRE or POSTFLUSH, it queues at
 31  * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
 32  * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
 33  * completes, all the requests which were pending are proceeded to the next
 34  * step.  This allows arbitrary merging of different types of FLUSH/FUA
 35  * requests.
 36  *
 37  * Currently, the following conditions are used to determine when to issue
 38  * flush.
 39  *
 40  * C1. At any given time, only one flush shall be in progress.  This makes
 41  *     double buffering sufficient.
 42  *
 43  * C2. Flush is deferred if any request is executing DATA of its sequence.
 44  *     This avoids issuing separate POSTFLUSHes for requests which shared
 45  *     PREFLUSH.
 46  *
 47  * C3. The second condition is ignored if there is a request which has
 48  *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
 49  *     starvation in the unlikely case where there are continuous stream of
 50  *     FUA (without FLUSH) requests.
 51  *
 52  * For devices which support FUA, it isn't clear whether C2 (and thus C3)
 53  * is beneficial.
 54  *
 55  * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
 56  * Once while executing DATA and again after the whole sequence is
 57  * complete.  The first completion updates the contained bio but doesn't
 58  * finish it so that the bio submitter is notified only after the whole
 59  * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
 60  * req_bio_endio().
 61  *
 62  * The above peculiarity requires that each FLUSH/FUA request has only one
 63  * bio attached to it, which is guaranteed as they aren't allowed to be
 64  * merged in the usual way.
 65  */
 66 
 67 #include <linux/kernel.h>
 68 #include <linux/module.h>
 69 #include <linux/bio.h>
 70 #include <linux/blkdev.h>
 71 #include <linux/gfp.h>
 72 #include <linux/blk-mq.h>
 73 
 74 #include "blk.h"
 75 #include "blk-mq.h"
 76 #include "blk-mq-tag.h"
 77 
 78 /* FLUSH/FUA sequences */
 79 enum {
 80         REQ_FSEQ_PREFLUSH       = (1 << 0), /* pre-flushing in progress */
 81         REQ_FSEQ_DATA           = (1 << 1), /* data write in progress */
 82         REQ_FSEQ_POSTFLUSH      = (1 << 2), /* post-flushing in progress */
 83         REQ_FSEQ_DONE           = (1 << 3),
 84 
 85         REQ_FSEQ_ACTIONS        = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
 86                                   REQ_FSEQ_POSTFLUSH,
 87 
 88         /*
 89          * If flush has been pending longer than the following timeout,
 90          * it's issued even if flush_data requests are still in flight.
 91          */
 92         FLUSH_PENDING_TIMEOUT   = 5 * HZ,
 93 };
 94 
 95 static bool blk_kick_flush(struct request_queue *q,
 96                            struct blk_flush_queue *fq);
 97 
 98 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
 99 {
100         unsigned int policy = 0;
101 
102         if (blk_rq_sectors(rq))
103                 policy |= REQ_FSEQ_DATA;
104 
105         if (fflags & (1UL << QUEUE_FLAG_WC)) {
106                 if (rq->cmd_flags & REQ_PREFLUSH)
107                         policy |= REQ_FSEQ_PREFLUSH;
108                 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109                     (rq->cmd_flags & REQ_FUA))
110                         policy |= REQ_FSEQ_POSTFLUSH;
111         }
112         return policy;
113 }
114 
115 static unsigned int blk_flush_cur_seq(struct request *rq)
116 {
117         return 1 << ffz(rq->flush.seq);
118 }
119 
120 static void blk_flush_restore_request(struct request *rq)
121 {
122         /*
123          * After flush data completion, @rq->bio is %NULL but we need to
124          * complete the bio again.  @rq->biotail is guaranteed to equal the
125          * original @rq->bio.  Restore it.
126          */
127         rq->bio = rq->biotail;
128 
129         /* make @rq a normal request */
130         rq->rq_flags &= ~RQF_FLUSH_SEQ;
131         rq->end_io = rq->flush.saved_end_io;
132 }
133 
134 static bool blk_flush_queue_rq(struct request *rq, bool add_front)
135 {
136         if (rq->q->mq_ops) {
137                 blk_mq_add_to_requeue_list(rq, add_front, true);
138                 return false;
139         } else {
140                 if (add_front)
141                         list_add(&rq->queuelist, &rq->q->queue_head);
142                 else
143                         list_add_tail(&rq->queuelist, &rq->q->queue_head);
144                 return true;
145         }
146 }
147 
148 /**
149  * blk_flush_complete_seq - complete flush sequence
150  * @rq: FLUSH/FUA request being sequenced
151  * @fq: flush queue
152  * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
153  * @error: whether an error occurred
154  *
155  * @rq just completed @seq part of its flush sequence, record the
156  * completion and trigger the next step.
157  *
158  * CONTEXT:
159  * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
160  *
161  * RETURNS:
162  * %true if requests were added to the dispatch queue, %false otherwise.
163  */
164 static bool blk_flush_complete_seq(struct request *rq,
165                                    struct blk_flush_queue *fq,
166                                    unsigned int seq, int error)
167 {
168         struct request_queue *q = rq->q;
169         struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
170         bool queued = false, kicked;
171 
172         BUG_ON(rq->flush.seq & seq);
173         rq->flush.seq |= seq;
174 
175         if (likely(!error))
176                 seq = blk_flush_cur_seq(rq);
177         else
178                 seq = REQ_FSEQ_DONE;
179 
180         switch (seq) {
181         case REQ_FSEQ_PREFLUSH:
182         case REQ_FSEQ_POSTFLUSH:
183                 /* queue for flush */
184                 if (list_empty(pending))
185                         fq->flush_pending_since = jiffies;
186                 list_move_tail(&rq->flush.list, pending);
187                 break;
188 
189         case REQ_FSEQ_DATA:
190                 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
191                 queued = blk_flush_queue_rq(rq, true);
192                 break;
193 
194         case REQ_FSEQ_DONE:
195                 /*
196                  * @rq was previously adjusted by blk_flush_issue() for
197                  * flush sequencing and may already have gone through the
198                  * flush data request completion path.  Restore @rq for
199                  * normal completion and end it.
200                  */
201                 BUG_ON(!list_empty(&rq->queuelist));
202                 list_del_init(&rq->flush.list);
203                 blk_flush_restore_request(rq);
204                 if (q->mq_ops)
205                         blk_mq_end_request(rq, error);
206                 else
207                         __blk_end_request_all(rq, error);
208                 break;
209 
210         default:
211                 BUG();
212         }
213 
214         kicked = blk_kick_flush(q, fq);
215         return kicked | queued;
216 }
217 
218 static void flush_end_io(struct request *flush_rq, int error)
219 {
220         struct request_queue *q = flush_rq->q;
221         struct list_head *running;
222         bool queued = false;
223         struct request *rq, *n;
224         unsigned long flags = 0;
225         struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
226 
227         if (q->mq_ops) {
228                 struct blk_mq_hw_ctx *hctx;
229 
230                 /* release the tag's ownership to the req cloned from */
231                 spin_lock_irqsave(&fq->mq_flush_lock, flags);
232                 hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
233                 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
234                 flush_rq->tag = -1;
235         }
236 
237         running = &fq->flush_queue[fq->flush_running_idx];
238         BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
239 
240         /* account completion of the flush request */
241         fq->flush_running_idx ^= 1;
242 
243         if (!q->mq_ops)
244                 elv_completed_request(q, flush_rq);
245 
246         /* and push the waiting requests to the next stage */
247         list_for_each_entry_safe(rq, n, running, flush.list) {
248                 unsigned int seq = blk_flush_cur_seq(rq);
249 
250                 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
251                 queued |= blk_flush_complete_seq(rq, fq, seq, error);
252         }
253 
254         /*
255          * Kick the queue to avoid stall for two cases:
256          * 1. Moving a request silently to empty queue_head may stall the
257          * queue.
258          * 2. When flush request is running in non-queueable queue, the
259          * queue is hold. Restart the queue after flush request is finished
260          * to avoid stall.
261          * This function is called from request completion path and calling
262          * directly into request_fn may confuse the driver.  Always use
263          * kblockd.
264          */
265         if (queued || fq->flush_queue_delayed) {
266                 WARN_ON(q->mq_ops);
267                 blk_run_queue_async(q);
268         }
269         fq->flush_queue_delayed = 0;
270         if (q->mq_ops)
271                 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
272 }
273 
274 /**
275  * blk_kick_flush - consider issuing flush request
276  * @q: request_queue being kicked
277  * @fq: flush queue
278  *
279  * Flush related states of @q have changed, consider issuing flush request.
280  * Please read the comment at the top of this file for more info.
281  *
282  * CONTEXT:
283  * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
284  *
285  * RETURNS:
286  * %true if flush was issued, %false otherwise.
287  */
288 static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
289 {
290         struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
291         struct request *first_rq =
292                 list_first_entry(pending, struct request, flush.list);
293         struct request *flush_rq = fq->flush_rq;
294 
295         /* C1 described at the top of this file */
296         if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
297                 return false;
298 
299         /* C2 and C3 */
300         if (!list_empty(&fq->flush_data_in_flight) &&
301             time_before(jiffies,
302                         fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
303                 return false;
304 
305         /*
306          * Issue flush and toggle pending_idx.  This makes pending_idx
307          * different from running_idx, which means flush is in flight.
308          */
309         fq->flush_pending_idx ^= 1;
310 
311         blk_rq_init(q, flush_rq);
312 
313         /*
314          * Borrow tag from the first request since they can't
315          * be in flight at the same time. And acquire the tag's
316          * ownership for flush req.
317          */
318         if (q->mq_ops) {
319                 struct blk_mq_hw_ctx *hctx;
320 
321                 flush_rq->mq_ctx = first_rq->mq_ctx;
322                 flush_rq->tag = first_rq->tag;
323                 fq->orig_rq = first_rq;
324 
325                 hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
326                 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
327         }
328 
329         flush_rq->cmd_type = REQ_TYPE_FS;
330         flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
331         flush_rq->rq_flags |= RQF_FLUSH_SEQ;
332         flush_rq->rq_disk = first_rq->rq_disk;
333         flush_rq->end_io = flush_end_io;
334 
335         return blk_flush_queue_rq(flush_rq, false);
336 }
337 
338 static void flush_data_end_io(struct request *rq, int error)
339 {
340         struct request_queue *q = rq->q;
341         struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
342 
343         /*
344          * Updating q->in_flight[] here for making this tag usable
345          * early. Because in blk_queue_start_tag(),
346          * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
347          * reserve tags for sync I/O.
348          *
349          * More importantly this way can avoid the following I/O
350          * deadlock:
351          *
352          * - suppose there are 40 fua requests comming to flush queue
353          *   and queue depth is 31
354          * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
355          *   tag for async I/O any more
356          * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
357          *   and flush_data_end_io() is called
358          * - the other rqs still can't go ahead if not updating
359          *   q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
360          *   are held in flush data queue and make no progress of
361          *   handling post flush rq
362          * - only after the post flush rq is handled, all these rqs
363          *   can be completed
364          */
365 
366         elv_completed_request(q, rq);
367 
368         /* for avoiding double accounting */
369         rq->rq_flags &= ~RQF_STARTED;
370 
371         /*
372          * After populating an empty queue, kick it to avoid stall.  Read
373          * the comment in flush_end_io().
374          */
375         if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
376                 blk_run_queue_async(q);
377 }
378 
379 static void mq_flush_data_end_io(struct request *rq, int error)
380 {
381         struct request_queue *q = rq->q;
382         struct blk_mq_hw_ctx *hctx;
383         struct blk_mq_ctx *ctx = rq->mq_ctx;
384         unsigned long flags;
385         struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
386 
387         hctx = blk_mq_map_queue(q, ctx->cpu);
388 
389         /*
390          * After populating an empty queue, kick it to avoid stall.  Read
391          * the comment in flush_end_io().
392          */
393         spin_lock_irqsave(&fq->mq_flush_lock, flags);
394         if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
395                 blk_mq_run_hw_queue(hctx, true);
396         spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
397 }
398 
399 /**
400  * blk_insert_flush - insert a new FLUSH/FUA request
401  * @rq: request to insert
402  *
403  * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
404  * or __blk_mq_run_hw_queue() to dispatch request.
405  * @rq is being submitted.  Analyze what needs to be done and put it on the
406  * right queue.
407  *
408  * CONTEXT:
409  * spin_lock_irq(q->queue_lock) in !mq case
410  */
411 void blk_insert_flush(struct request *rq)
412 {
413         struct request_queue *q = rq->q;
414         unsigned long fflags = q->queue_flags;  /* may change, cache */
415         unsigned int policy = blk_flush_policy(fflags, rq);
416         struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
417 
418         /*
419          * @policy now records what operations need to be done.  Adjust
420          * REQ_PREFLUSH and FUA for the driver.
421          */
422         rq->cmd_flags &= ~REQ_PREFLUSH;
423         if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
424                 rq->cmd_flags &= ~REQ_FUA;
425 
426         /*
427          * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
428          * of those flags, we have to set REQ_SYNC to avoid skewing
429          * the request accounting.
430          */
431         rq->cmd_flags |= REQ_SYNC;
432 
433         /*
434          * An empty flush handed down from a stacking driver may
435          * translate into nothing if the underlying device does not
436          * advertise a write-back cache.  In this case, simply
437          * complete the request.
438          */
439         if (!policy) {
440                 if (q->mq_ops)
441                         blk_mq_end_request(rq, 0);
442                 else
443                         __blk_end_bidi_request(rq, 0, 0, 0);
444                 return;
445         }
446 
447         BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
448 
449         /*
450          * If there's data but flush is not necessary, the request can be
451          * processed directly without going through flush machinery.  Queue
452          * for normal execution.
453          */
454         if ((policy & REQ_FSEQ_DATA) &&
455             !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
456                 if (q->mq_ops) {
457                         blk_mq_insert_request(rq, false, true, false);
458                 } else
459                         list_add_tail(&rq->queuelist, &q->queue_head);
460                 return;
461         }
462 
463         /*
464          * @rq should go through flush machinery.  Mark it part of flush
465          * sequence and submit for further processing.
466          */
467         memset(&rq->flush, 0, sizeof(rq->flush));
468         INIT_LIST_HEAD(&rq->flush.list);
469         rq->rq_flags |= RQF_FLUSH_SEQ;
470         rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
471         if (q->mq_ops) {
472                 rq->end_io = mq_flush_data_end_io;
473 
474                 spin_lock_irq(&fq->mq_flush_lock);
475                 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
476                 spin_unlock_irq(&fq->mq_flush_lock);
477                 return;
478         }
479         rq->end_io = flush_data_end_io;
480 
481         blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
482 }
483 
484 /**
485  * blkdev_issue_flush - queue a flush
486  * @bdev:       blockdev to issue flush for
487  * @gfp_mask:   memory allocation flags (for bio_alloc)
488  * @error_sector:       error sector
489  *
490  * Description:
491  *    Issue a flush for the block device in question. Caller can supply
492  *    room for storing the error offset in case of a flush error, if they
493  *    wish to. If WAIT flag is not passed then caller may check only what
494  *    request was pushed in some internal queue for later handling.
495  */
496 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
497                 sector_t *error_sector)
498 {
499         struct request_queue *q;
500         struct bio *bio;
501         int ret = 0;
502 
503         if (bdev->bd_disk == NULL)
504                 return -ENXIO;
505 
506         q = bdev_get_queue(bdev);
507         if (!q)
508                 return -ENXIO;
509 
510         /*
511          * some block devices may not have their queue correctly set up here
512          * (e.g. loop device without a backing file) and so issuing a flush
513          * here will panic. Ensure there is a request function before issuing
514          * the flush.
515          */
516         if (!q->make_request_fn)
517                 return -ENXIO;
518 
519         bio = bio_alloc(gfp_mask, 0);
520         bio->bi_bdev = bdev;
521         bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
522 
523         ret = submit_bio_wait(bio);
524 
525         /*
526          * The driver must store the error location in ->bi_sector, if
527          * it supports it. For non-stacked drivers, this should be
528          * copied from blk_rq_pos(rq).
529          */
530         if (error_sector)
531                 *error_sector = bio->bi_iter.bi_sector;
532 
533         bio_put(bio);
534         return ret;
535 }
536 EXPORT_SYMBOL(blkdev_issue_flush);
537 
538 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
539                 int node, int cmd_size)
540 {
541         struct blk_flush_queue *fq;
542         int rq_sz = sizeof(struct request);
543 
544         fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
545         if (!fq)
546                 goto fail;
547 
548         if (q->mq_ops) {
549                 spin_lock_init(&fq->mq_flush_lock);
550                 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
551         }
552 
553         fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
554         if (!fq->flush_rq)
555                 goto fail_rq;
556 
557         INIT_LIST_HEAD(&fq->flush_queue[0]);
558         INIT_LIST_HEAD(&fq->flush_queue[1]);
559         INIT_LIST_HEAD(&fq->flush_data_in_flight);
560 
561         return fq;
562 
563  fail_rq:
564         kfree(fq);
565  fail:
566         return NULL;
567 }
568 
569 void blk_free_flush_queue(struct blk_flush_queue *fq)
570 {
571         /* bio based request queue hasn't flush queue */
572         if (!fq)
573                 return;
574 
575         kfree(fq->flush_rq);
576         kfree(fq);
577 }
578 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us