Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/block/blk-ioc.c

  1 /*
  2  * Functions related to io context handling
  3  */
  4 #include <linux/kernel.h>
  5 #include <linux/module.h>
  6 #include <linux/init.h>
  7 #include <linux/bio.h>
  8 #include <linux/blkdev.h>
  9 #include <linux/slab.h>
 10 
 11 #include "blk.h"
 12 
 13 /*
 14  * For io context allocations
 15  */
 16 static struct kmem_cache *iocontext_cachep;
 17 
 18 /**
 19  * get_io_context - increment reference count to io_context
 20  * @ioc: io_context to get
 21  *
 22  * Increment reference count to @ioc.
 23  */
 24 void get_io_context(struct io_context *ioc)
 25 {
 26         BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
 27         atomic_long_inc(&ioc->refcount);
 28 }
 29 EXPORT_SYMBOL(get_io_context);
 30 
 31 static void icq_free_icq_rcu(struct rcu_head *head)
 32 {
 33         struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
 34 
 35         kmem_cache_free(icq->__rcu_icq_cache, icq);
 36 }
 37 
 38 /* Exit an icq. Called with both ioc and q locked. */
 39 static void ioc_exit_icq(struct io_cq *icq)
 40 {
 41         struct elevator_type *et = icq->q->elevator->type;
 42 
 43         if (icq->flags & ICQ_EXITED)
 44                 return;
 45 
 46         if (et->ops.elevator_exit_icq_fn)
 47                 et->ops.elevator_exit_icq_fn(icq);
 48 
 49         icq->flags |= ICQ_EXITED;
 50 }
 51 
 52 /* Release an icq.  Called with both ioc and q locked. */
 53 static void ioc_destroy_icq(struct io_cq *icq)
 54 {
 55         struct io_context *ioc = icq->ioc;
 56         struct request_queue *q = icq->q;
 57         struct elevator_type *et = q->elevator->type;
 58 
 59         lockdep_assert_held(&ioc->lock);
 60         lockdep_assert_held(q->queue_lock);
 61 
 62         radix_tree_delete(&ioc->icq_tree, icq->q->id);
 63         hlist_del_init(&icq->ioc_node);
 64         list_del_init(&icq->q_node);
 65 
 66         /*
 67          * Both setting lookup hint to and clearing it from @icq are done
 68          * under queue_lock.  If it's not pointing to @icq now, it never
 69          * will.  Hint assignment itself can race safely.
 70          */
 71         if (rcu_access_pointer(ioc->icq_hint) == icq)
 72                 rcu_assign_pointer(ioc->icq_hint, NULL);
 73 
 74         ioc_exit_icq(icq);
 75 
 76         /*
 77          * @icq->q might have gone away by the time RCU callback runs
 78          * making it impossible to determine icq_cache.  Record it in @icq.
 79          */
 80         icq->__rcu_icq_cache = et->icq_cache;
 81         call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
 82 }
 83 
 84 /*
 85  * Slow path for ioc release in put_io_context().  Performs double-lock
 86  * dancing to unlink all icq's and then frees ioc.
 87  */
 88 static void ioc_release_fn(struct work_struct *work)
 89 {
 90         struct io_context *ioc = container_of(work, struct io_context,
 91                                               release_work);
 92         unsigned long flags;
 93 
 94         /*
 95          * Exiting icq may call into put_io_context() through elevator
 96          * which will trigger lockdep warning.  The ioc's are guaranteed to
 97          * be different, use a different locking subclass here.  Use
 98          * irqsave variant as there's no spin_lock_irq_nested().
 99          */
100         spin_lock_irqsave_nested(&ioc->lock, flags, 1);
101 
102         while (!hlist_empty(&ioc->icq_list)) {
103                 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
104                                                 struct io_cq, ioc_node);
105                 struct request_queue *q = icq->q;
106 
107                 if (spin_trylock(q->queue_lock)) {
108                         ioc_destroy_icq(icq);
109                         spin_unlock(q->queue_lock);
110                 } else {
111                         spin_unlock_irqrestore(&ioc->lock, flags);
112                         cpu_relax();
113                         spin_lock_irqsave_nested(&ioc->lock, flags, 1);
114                 }
115         }
116 
117         spin_unlock_irqrestore(&ioc->lock, flags);
118 
119         kmem_cache_free(iocontext_cachep, ioc);
120 }
121 
122 /**
123  * put_io_context - put a reference of io_context
124  * @ioc: io_context to put
125  *
126  * Decrement reference count of @ioc and release it if the count reaches
127  * zero.
128  */
129 void put_io_context(struct io_context *ioc)
130 {
131         unsigned long flags;
132         bool free_ioc = false;
133 
134         if (ioc == NULL)
135                 return;
136 
137         BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
138 
139         /*
140          * Releasing ioc requires reverse order double locking and we may
141          * already be holding a queue_lock.  Do it asynchronously from wq.
142          */
143         if (atomic_long_dec_and_test(&ioc->refcount)) {
144                 spin_lock_irqsave(&ioc->lock, flags);
145                 if (!hlist_empty(&ioc->icq_list))
146                         queue_work(system_power_efficient_wq,
147                                         &ioc->release_work);
148                 else
149                         free_ioc = true;
150                 spin_unlock_irqrestore(&ioc->lock, flags);
151         }
152 
153         if (free_ioc)
154                 kmem_cache_free(iocontext_cachep, ioc);
155 }
156 EXPORT_SYMBOL(put_io_context);
157 
158 /**
159  * put_io_context_active - put active reference on ioc
160  * @ioc: ioc of interest
161  *
162  * Undo get_io_context_active().  If active reference reaches zero after
163  * put, @ioc can never issue further IOs and ioscheds are notified.
164  */
165 void put_io_context_active(struct io_context *ioc)
166 {
167         unsigned long flags;
168         struct io_cq *icq;
169 
170         if (!atomic_dec_and_test(&ioc->active_ref)) {
171                 put_io_context(ioc);
172                 return;
173         }
174 
175         /*
176          * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
177          * reverse double locking.  Read comment in ioc_release_fn() for
178          * explanation on the nested locking annotation.
179          */
180 retry:
181         spin_lock_irqsave_nested(&ioc->lock, flags, 1);
182         hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
183                 if (icq->flags & ICQ_EXITED)
184                         continue;
185                 if (spin_trylock(icq->q->queue_lock)) {
186                         ioc_exit_icq(icq);
187                         spin_unlock(icq->q->queue_lock);
188                 } else {
189                         spin_unlock_irqrestore(&ioc->lock, flags);
190                         cpu_relax();
191                         goto retry;
192                 }
193         }
194         spin_unlock_irqrestore(&ioc->lock, flags);
195 
196         put_io_context(ioc);
197 }
198 
199 /* Called by the exiting task */
200 void exit_io_context(struct task_struct *task)
201 {
202         struct io_context *ioc;
203 
204         task_lock(task);
205         ioc = task->io_context;
206         task->io_context = NULL;
207         task_unlock(task);
208 
209         atomic_dec(&ioc->nr_tasks);
210         put_io_context_active(ioc);
211 }
212 
213 /**
214  * ioc_clear_queue - break any ioc association with the specified queue
215  * @q: request_queue being cleared
216  *
217  * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
218  */
219 void ioc_clear_queue(struct request_queue *q)
220 {
221         lockdep_assert_held(q->queue_lock);
222 
223         while (!list_empty(&q->icq_list)) {
224                 struct io_cq *icq = list_entry(q->icq_list.next,
225                                                struct io_cq, q_node);
226                 struct io_context *ioc = icq->ioc;
227 
228                 spin_lock(&ioc->lock);
229                 ioc_destroy_icq(icq);
230                 spin_unlock(&ioc->lock);
231         }
232 }
233 
234 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
235 {
236         struct io_context *ioc;
237         int ret;
238 
239         ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
240                                     node);
241         if (unlikely(!ioc))
242                 return -ENOMEM;
243 
244         /* initialize */
245         atomic_long_set(&ioc->refcount, 1);
246         atomic_set(&ioc->nr_tasks, 1);
247         atomic_set(&ioc->active_ref, 1);
248         spin_lock_init(&ioc->lock);
249         INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
250         INIT_HLIST_HEAD(&ioc->icq_list);
251         INIT_WORK(&ioc->release_work, ioc_release_fn);
252 
253         /*
254          * Try to install.  ioc shouldn't be installed if someone else
255          * already did or @task, which isn't %current, is exiting.  Note
256          * that we need to allow ioc creation on exiting %current as exit
257          * path may issue IOs from e.g. exit_files().  The exit path is
258          * responsible for not issuing IO after exit_io_context().
259          */
260         task_lock(task);
261         if (!task->io_context &&
262             (task == current || !(task->flags & PF_EXITING)))
263                 task->io_context = ioc;
264         else
265                 kmem_cache_free(iocontext_cachep, ioc);
266 
267         ret = task->io_context ? 0 : -EBUSY;
268 
269         task_unlock(task);
270 
271         return ret;
272 }
273 
274 /**
275  * get_task_io_context - get io_context of a task
276  * @task: task of interest
277  * @gfp_flags: allocation flags, used if allocation is necessary
278  * @node: allocation node, used if allocation is necessary
279  *
280  * Return io_context of @task.  If it doesn't exist, it is created with
281  * @gfp_flags and @node.  The returned io_context has its reference count
282  * incremented.
283  *
284  * This function always goes through task_lock() and it's better to use
285  * %current->io_context + get_io_context() for %current.
286  */
287 struct io_context *get_task_io_context(struct task_struct *task,
288                                        gfp_t gfp_flags, int node)
289 {
290         struct io_context *ioc;
291 
292         might_sleep_if(gfpflags_allow_blocking(gfp_flags));
293 
294         do {
295                 task_lock(task);
296                 ioc = task->io_context;
297                 if (likely(ioc)) {
298                         get_io_context(ioc);
299                         task_unlock(task);
300                         return ioc;
301                 }
302                 task_unlock(task);
303         } while (!create_task_io_context(task, gfp_flags, node));
304 
305         return NULL;
306 }
307 EXPORT_SYMBOL(get_task_io_context);
308 
309 /**
310  * ioc_lookup_icq - lookup io_cq from ioc
311  * @ioc: the associated io_context
312  * @q: the associated request_queue
313  *
314  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
315  * with @q->queue_lock held.
316  */
317 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
318 {
319         struct io_cq *icq;
320 
321         lockdep_assert_held(q->queue_lock);
322 
323         /*
324          * icq's are indexed from @ioc using radix tree and hint pointer,
325          * both of which are protected with RCU.  All removals are done
326          * holding both q and ioc locks, and we're holding q lock - if we
327          * find a icq which points to us, it's guaranteed to be valid.
328          */
329         rcu_read_lock();
330         icq = rcu_dereference(ioc->icq_hint);
331         if (icq && icq->q == q)
332                 goto out;
333 
334         icq = radix_tree_lookup(&ioc->icq_tree, q->id);
335         if (icq && icq->q == q)
336                 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
337         else
338                 icq = NULL;
339 out:
340         rcu_read_unlock();
341         return icq;
342 }
343 EXPORT_SYMBOL(ioc_lookup_icq);
344 
345 /**
346  * ioc_create_icq - create and link io_cq
347  * @ioc: io_context of interest
348  * @q: request_queue of interest
349  * @gfp_mask: allocation mask
350  *
351  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
352  * will be created using @gfp_mask.
353  *
354  * The caller is responsible for ensuring @ioc won't go away and @q is
355  * alive and will stay alive until this function returns.
356  */
357 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
358                              gfp_t gfp_mask)
359 {
360         struct elevator_type *et = q->elevator->type;
361         struct io_cq *icq;
362 
363         /* allocate stuff */
364         icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
365                                     q->node);
366         if (!icq)
367                 return NULL;
368 
369         if (radix_tree_maybe_preload(gfp_mask) < 0) {
370                 kmem_cache_free(et->icq_cache, icq);
371                 return NULL;
372         }
373 
374         icq->ioc = ioc;
375         icq->q = q;
376         INIT_LIST_HEAD(&icq->q_node);
377         INIT_HLIST_NODE(&icq->ioc_node);
378 
379         /* lock both q and ioc and try to link @icq */
380         spin_lock_irq(q->queue_lock);
381         spin_lock(&ioc->lock);
382 
383         if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
384                 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
385                 list_add(&icq->q_node, &q->icq_list);
386                 if (et->ops.elevator_init_icq_fn)
387                         et->ops.elevator_init_icq_fn(icq);
388         } else {
389                 kmem_cache_free(et->icq_cache, icq);
390                 icq = ioc_lookup_icq(ioc, q);
391                 if (!icq)
392                         printk(KERN_ERR "cfq: icq link failed!\n");
393         }
394 
395         spin_unlock(&ioc->lock);
396         spin_unlock_irq(q->queue_lock);
397         radix_tree_preload_end();
398         return icq;
399 }
400 
401 static int __init blk_ioc_init(void)
402 {
403         iocontext_cachep = kmem_cache_create("blkdev_ioc",
404                         sizeof(struct io_context), 0, SLAB_PANIC, NULL);
405         return 0;
406 }
407 subsys_initcall(blk_ioc_init);
408 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us