Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/kernel/padata.c

  1 /*
  2  * padata.c - generic interface to process data streams in parallel
  3  *
  4  * See Documentation/padata.txt for an api documentation.
  5  *
  6  * Copyright (C) 2008, 2009 secunet Security Networks AG
  7  * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
  8  *
  9  * This program is free software; you can redistribute it and/or modify it
 10  * under the terms and conditions of the GNU General Public License,
 11  * version 2, as published by the Free Software Foundation.
 12  *
 13  * This program is distributed in the hope it will be useful, but WITHOUT
 14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 16  * more details.
 17  *
 18  * You should have received a copy of the GNU General Public License along with
 19  * this program; if not, write to the Free Software Foundation, Inc.,
 20  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 21  */
 22 
 23 #include <linux/export.h>
 24 #include <linux/cpumask.h>
 25 #include <linux/err.h>
 26 #include <linux/cpu.h>
 27 #include <linux/padata.h>
 28 #include <linux/mutex.h>
 29 #include <linux/sched.h>
 30 #include <linux/slab.h>
 31 #include <linux/sysfs.h>
 32 #include <linux/rcupdate.h>
 33 #include <linux/module.h>
 34 
 35 #define MAX_OBJ_NUM 1000
 36 
 37 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
 38 {
 39         int cpu, target_cpu;
 40 
 41         target_cpu = cpumask_first(pd->cpumask.pcpu);
 42         for (cpu = 0; cpu < cpu_index; cpu++)
 43                 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
 44 
 45         return target_cpu;
 46 }
 47 
 48 static int padata_cpu_hash(struct parallel_data *pd)
 49 {
 50         unsigned int seq_nr;
 51         int cpu_index;
 52 
 53         /*
 54          * Hash the sequence numbers to the cpus by taking
 55          * seq_nr mod. number of cpus in use.
 56          */
 57 
 58         seq_nr = atomic_inc_return(&pd->seq_nr);
 59         cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
 60 
 61         return padata_index_to_cpu(pd, cpu_index);
 62 }
 63 
 64 static void padata_parallel_worker(struct work_struct *parallel_work)
 65 {
 66         struct padata_parallel_queue *pqueue;
 67         LIST_HEAD(local_list);
 68 
 69         local_bh_disable();
 70         pqueue = container_of(parallel_work,
 71                               struct padata_parallel_queue, work);
 72 
 73         spin_lock(&pqueue->parallel.lock);
 74         list_replace_init(&pqueue->parallel.list, &local_list);
 75         spin_unlock(&pqueue->parallel.lock);
 76 
 77         while (!list_empty(&local_list)) {
 78                 struct padata_priv *padata;
 79 
 80                 padata = list_entry(local_list.next,
 81                                     struct padata_priv, list);
 82 
 83                 list_del_init(&padata->list);
 84 
 85                 padata->parallel(padata);
 86         }
 87 
 88         local_bh_enable();
 89 }
 90 
 91 /**
 92  * padata_do_parallel - padata parallelization function
 93  *
 94  * @pinst: padata instance
 95  * @padata: object to be parallelized
 96  * @cb_cpu: cpu the serialization callback function will run on,
 97  *          must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
 98  *
 99  * The parallelization callback function will run with BHs off.
100  * Note: Every object which is parallelized by padata_do_parallel
101  * must be seen by padata_do_serial.
102  */
103 int padata_do_parallel(struct padata_instance *pinst,
104                        struct padata_priv *padata, int cb_cpu)
105 {
106         int target_cpu, err;
107         struct padata_parallel_queue *queue;
108         struct parallel_data *pd;
109 
110         rcu_read_lock_bh();
111 
112         pd = rcu_dereference_bh(pinst->pd);
113 
114         err = -EINVAL;
115         if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
116                 goto out;
117 
118         if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
119                 goto out;
120 
121         err =  -EBUSY;
122         if ((pinst->flags & PADATA_RESET))
123                 goto out;
124 
125         if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
126                 goto out;
127 
128         err = 0;
129         atomic_inc(&pd->refcnt);
130         padata->pd = pd;
131         padata->cb_cpu = cb_cpu;
132 
133         target_cpu = padata_cpu_hash(pd);
134         queue = per_cpu_ptr(pd->pqueue, target_cpu);
135 
136         spin_lock(&queue->parallel.lock);
137         list_add_tail(&padata->list, &queue->parallel.list);
138         spin_unlock(&queue->parallel.lock);
139 
140         queue_work_on(target_cpu, pinst->wq, &queue->work);
141 
142 out:
143         rcu_read_unlock_bh();
144 
145         return err;
146 }
147 EXPORT_SYMBOL(padata_do_parallel);
148 
149 /*
150  * padata_get_next - Get the next object that needs serialization.
151  *
152  * Return values are:
153  *
154  * A pointer to the control struct of the next object that needs
155  * serialization, if present in one of the percpu reorder queues.
156  *
157  * NULL, if all percpu reorder queues are empty.
158  *
159  * -EINPROGRESS, if the next object that needs serialization will
160  *  be parallel processed by another cpu and is not yet present in
161  *  the cpu's reorder queue.
162  *
163  * -ENODATA, if this cpu has to do the parallel processing for
164  *  the next object.
165  */
166 static struct padata_priv *padata_get_next(struct parallel_data *pd)
167 {
168         int cpu, num_cpus;
169         unsigned int next_nr, next_index;
170         struct padata_parallel_queue *next_queue;
171         struct padata_priv *padata;
172         struct padata_list *reorder;
173 
174         num_cpus = cpumask_weight(pd->cpumask.pcpu);
175 
176         /*
177          * Calculate the percpu reorder queue and the sequence
178          * number of the next object.
179          */
180         next_nr = pd->processed;
181         next_index = next_nr % num_cpus;
182         cpu = padata_index_to_cpu(pd, next_index);
183         next_queue = per_cpu_ptr(pd->pqueue, cpu);
184 
185         padata = NULL;
186 
187         reorder = &next_queue->reorder;
188 
189         if (!list_empty(&reorder->list)) {
190                 padata = list_entry(reorder->list.next,
191                                     struct padata_priv, list);
192 
193                 spin_lock(&reorder->lock);
194                 list_del_init(&padata->list);
195                 atomic_dec(&pd->reorder_objects);
196                 spin_unlock(&reorder->lock);
197 
198                 pd->processed++;
199 
200                 goto out;
201         }
202 
203         if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
204                 padata = ERR_PTR(-ENODATA);
205                 goto out;
206         }
207 
208         padata = ERR_PTR(-EINPROGRESS);
209 out:
210         return padata;
211 }
212 
213 static void padata_reorder(struct parallel_data *pd)
214 {
215         int cb_cpu;
216         struct padata_priv *padata;
217         struct padata_serial_queue *squeue;
218         struct padata_instance *pinst = pd->pinst;
219 
220         /*
221          * We need to ensure that only one cpu can work on dequeueing of
222          * the reorder queue the time. Calculating in which percpu reorder
223          * queue the next object will arrive takes some time. A spinlock
224          * would be highly contended. Also it is not clear in which order
225          * the objects arrive to the reorder queues. So a cpu could wait to
226          * get the lock just to notice that there is nothing to do at the
227          * moment. Therefore we use a trylock and let the holder of the lock
228          * care for all the objects enqueued during the holdtime of the lock.
229          */
230         if (!spin_trylock_bh(&pd->lock))
231                 return;
232 
233         while (1) {
234                 padata = padata_get_next(pd);
235 
236                 /*
237                  * All reorder queues are empty, or the next object that needs
238                  * serialization is parallel processed by another cpu and is
239                  * still on it's way to the cpu's reorder queue, nothing to
240                  * do for now.
241                  */
242                 if (!padata || PTR_ERR(padata) == -EINPROGRESS)
243                         break;
244 
245                 /*
246                  * This cpu has to do the parallel processing of the next
247                  * object. It's waiting in the cpu's parallelization queue,
248                  * so exit immediately.
249                  */
250                 if (PTR_ERR(padata) == -ENODATA) {
251                         del_timer(&pd->timer);
252                         spin_unlock_bh(&pd->lock);
253                         return;
254                 }
255 
256                 cb_cpu = padata->cb_cpu;
257                 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
258 
259                 spin_lock(&squeue->serial.lock);
260                 list_add_tail(&padata->list, &squeue->serial.list);
261                 spin_unlock(&squeue->serial.lock);
262 
263                 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
264         }
265 
266         spin_unlock_bh(&pd->lock);
267 
268         /*
269          * The next object that needs serialization might have arrived to
270          * the reorder queues in the meantime, we will be called again
271          * from the timer function if no one else cares for it.
272          */
273         if (atomic_read(&pd->reorder_objects)
274                         && !(pinst->flags & PADATA_RESET))
275                 mod_timer(&pd->timer, jiffies + HZ);
276         else
277                 del_timer(&pd->timer);
278 
279         return;
280 }
281 
282 static void padata_reorder_timer(unsigned long arg)
283 {
284         struct parallel_data *pd = (struct parallel_data *)arg;
285 
286         padata_reorder(pd);
287 }
288 
289 static void padata_serial_worker(struct work_struct *serial_work)
290 {
291         struct padata_serial_queue *squeue;
292         struct parallel_data *pd;
293         LIST_HEAD(local_list);
294 
295         local_bh_disable();
296         squeue = container_of(serial_work, struct padata_serial_queue, work);
297         pd = squeue->pd;
298 
299         spin_lock(&squeue->serial.lock);
300         list_replace_init(&squeue->serial.list, &local_list);
301         spin_unlock(&squeue->serial.lock);
302 
303         while (!list_empty(&local_list)) {
304                 struct padata_priv *padata;
305 
306                 padata = list_entry(local_list.next,
307                                     struct padata_priv, list);
308 
309                 list_del_init(&padata->list);
310 
311                 padata->serial(padata);
312                 atomic_dec(&pd->refcnt);
313         }
314         local_bh_enable();
315 }
316 
317 /**
318  * padata_do_serial - padata serialization function
319  *
320  * @padata: object to be serialized.
321  *
322  * padata_do_serial must be called for every parallelized object.
323  * The serialization callback function will run with BHs off.
324  */
325 void padata_do_serial(struct padata_priv *padata)
326 {
327         int cpu;
328         struct padata_parallel_queue *pqueue;
329         struct parallel_data *pd;
330 
331         pd = padata->pd;
332 
333         cpu = get_cpu();
334         pqueue = per_cpu_ptr(pd->pqueue, cpu);
335 
336         spin_lock(&pqueue->reorder.lock);
337         atomic_inc(&pd->reorder_objects);
338         list_add_tail(&padata->list, &pqueue->reorder.list);
339         spin_unlock(&pqueue->reorder.lock);
340 
341         put_cpu();
342 
343         padata_reorder(pd);
344 }
345 EXPORT_SYMBOL(padata_do_serial);
346 
347 static int padata_setup_cpumasks(struct parallel_data *pd,
348                                  const struct cpumask *pcpumask,
349                                  const struct cpumask *cbcpumask)
350 {
351         if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
352                 return -ENOMEM;
353 
354         cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
355         if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
356                 free_cpumask_var(pd->cpumask.cbcpu);
357                 return -ENOMEM;
358         }
359 
360         cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
361         return 0;
362 }
363 
364 static void __padata_list_init(struct padata_list *pd_list)
365 {
366         INIT_LIST_HEAD(&pd_list->list);
367         spin_lock_init(&pd_list->lock);
368 }
369 
370 /* Initialize all percpu queues used by serial workers */
371 static void padata_init_squeues(struct parallel_data *pd)
372 {
373         int cpu;
374         struct padata_serial_queue *squeue;
375 
376         for_each_cpu(cpu, pd->cpumask.cbcpu) {
377                 squeue = per_cpu_ptr(pd->squeue, cpu);
378                 squeue->pd = pd;
379                 __padata_list_init(&squeue->serial);
380                 INIT_WORK(&squeue->work, padata_serial_worker);
381         }
382 }
383 
384 /* Initialize all percpu queues used by parallel workers */
385 static void padata_init_pqueues(struct parallel_data *pd)
386 {
387         int cpu_index, cpu;
388         struct padata_parallel_queue *pqueue;
389 
390         cpu_index = 0;
391         for_each_cpu(cpu, pd->cpumask.pcpu) {
392                 pqueue = per_cpu_ptr(pd->pqueue, cpu);
393                 pqueue->pd = pd;
394                 pqueue->cpu_index = cpu_index;
395                 cpu_index++;
396 
397                 __padata_list_init(&pqueue->reorder);
398                 __padata_list_init(&pqueue->parallel);
399                 INIT_WORK(&pqueue->work, padata_parallel_worker);
400                 atomic_set(&pqueue->num_obj, 0);
401         }
402 }
403 
404 /* Allocate and initialize the internal cpumask dependend resources. */
405 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
406                                              const struct cpumask *pcpumask,
407                                              const struct cpumask *cbcpumask)
408 {
409         struct parallel_data *pd;
410 
411         pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
412         if (!pd)
413                 goto err;
414 
415         pd->pqueue = alloc_percpu(struct padata_parallel_queue);
416         if (!pd->pqueue)
417                 goto err_free_pd;
418 
419         pd->squeue = alloc_percpu(struct padata_serial_queue);
420         if (!pd->squeue)
421                 goto err_free_pqueue;
422         if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
423                 goto err_free_squeue;
424 
425         padata_init_pqueues(pd);
426         padata_init_squeues(pd);
427         setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
428         atomic_set(&pd->seq_nr, -1);
429         atomic_set(&pd->reorder_objects, 0);
430         atomic_set(&pd->refcnt, 0);
431         pd->pinst = pinst;
432         spin_lock_init(&pd->lock);
433 
434         return pd;
435 
436 err_free_squeue:
437         free_percpu(pd->squeue);
438 err_free_pqueue:
439         free_percpu(pd->pqueue);
440 err_free_pd:
441         kfree(pd);
442 err:
443         return NULL;
444 }
445 
446 static void padata_free_pd(struct parallel_data *pd)
447 {
448         free_cpumask_var(pd->cpumask.pcpu);
449         free_cpumask_var(pd->cpumask.cbcpu);
450         free_percpu(pd->pqueue);
451         free_percpu(pd->squeue);
452         kfree(pd);
453 }
454 
455 /* Flush all objects out of the padata queues. */
456 static void padata_flush_queues(struct parallel_data *pd)
457 {
458         int cpu;
459         struct padata_parallel_queue *pqueue;
460         struct padata_serial_queue *squeue;
461 
462         for_each_cpu(cpu, pd->cpumask.pcpu) {
463                 pqueue = per_cpu_ptr(pd->pqueue, cpu);
464                 flush_work(&pqueue->work);
465         }
466 
467         del_timer_sync(&pd->timer);
468 
469         if (atomic_read(&pd->reorder_objects))
470                 padata_reorder(pd);
471 
472         for_each_cpu(cpu, pd->cpumask.cbcpu) {
473                 squeue = per_cpu_ptr(pd->squeue, cpu);
474                 flush_work(&squeue->work);
475         }
476 
477         BUG_ON(atomic_read(&pd->refcnt) != 0);
478 }
479 
480 static void __padata_start(struct padata_instance *pinst)
481 {
482         pinst->flags |= PADATA_INIT;
483 }
484 
485 static void __padata_stop(struct padata_instance *pinst)
486 {
487         if (!(pinst->flags & PADATA_INIT))
488                 return;
489 
490         pinst->flags &= ~PADATA_INIT;
491 
492         synchronize_rcu();
493 
494         get_online_cpus();
495         padata_flush_queues(pinst->pd);
496         put_online_cpus();
497 }
498 
499 /* Replace the internal control structure with a new one. */
500 static void padata_replace(struct padata_instance *pinst,
501                            struct parallel_data *pd_new)
502 {
503         struct parallel_data *pd_old = pinst->pd;
504         int notification_mask = 0;
505 
506         pinst->flags |= PADATA_RESET;
507 
508         rcu_assign_pointer(pinst->pd, pd_new);
509 
510         synchronize_rcu();
511 
512         if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
513                 notification_mask |= PADATA_CPU_PARALLEL;
514         if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
515                 notification_mask |= PADATA_CPU_SERIAL;
516 
517         padata_flush_queues(pd_old);
518         padata_free_pd(pd_old);
519 
520         if (notification_mask)
521                 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
522                                              notification_mask,
523                                              &pd_new->cpumask);
524 
525         pinst->flags &= ~PADATA_RESET;
526 }
527 
528 /**
529  * padata_register_cpumask_notifier - Registers a notifier that will be called
530  *                             if either pcpu or cbcpu or both cpumasks change.
531  *
532  * @pinst: A poineter to padata instance
533  * @nblock: A pointer to notifier block.
534  */
535 int padata_register_cpumask_notifier(struct padata_instance *pinst,
536                                      struct notifier_block *nblock)
537 {
538         return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
539                                                 nblock);
540 }
541 EXPORT_SYMBOL(padata_register_cpumask_notifier);
542 
543 /**
544  * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
545  *        registered earlier  using padata_register_cpumask_notifier
546  *
547  * @pinst: A pointer to data instance.
548  * @nlock: A pointer to notifier block.
549  */
550 int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
551                                        struct notifier_block *nblock)
552 {
553         return blocking_notifier_chain_unregister(
554                 &pinst->cpumask_change_notifier,
555                 nblock);
556 }
557 EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
558 
559 
560 /* If cpumask contains no active cpu, we mark the instance as invalid. */
561 static bool padata_validate_cpumask(struct padata_instance *pinst,
562                                     const struct cpumask *cpumask)
563 {
564         if (!cpumask_intersects(cpumask, cpu_online_mask)) {
565                 pinst->flags |= PADATA_INVALID;
566                 return false;
567         }
568 
569         pinst->flags &= ~PADATA_INVALID;
570         return true;
571 }
572 
573 static int __padata_set_cpumasks(struct padata_instance *pinst,
574                                  cpumask_var_t pcpumask,
575                                  cpumask_var_t cbcpumask)
576 {
577         int valid;
578         struct parallel_data *pd;
579 
580         valid = padata_validate_cpumask(pinst, pcpumask);
581         if (!valid) {
582                 __padata_stop(pinst);
583                 goto out_replace;
584         }
585 
586         valid = padata_validate_cpumask(pinst, cbcpumask);
587         if (!valid)
588                 __padata_stop(pinst);
589 
590 out_replace:
591         pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
592         if (!pd)
593                 return -ENOMEM;
594 
595         cpumask_copy(pinst->cpumask.pcpu, pcpumask);
596         cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
597 
598         padata_replace(pinst, pd);
599 
600         if (valid)
601                 __padata_start(pinst);
602 
603         return 0;
604 }
605 
606 /**
607  * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
608  *                     equivalent to @cpumask.
609  *
610  * @pinst: padata instance
611  * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
612  *                to parallel and serial cpumasks respectively.
613  * @cpumask: the cpumask to use
614  */
615 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
616                        cpumask_var_t cpumask)
617 {
618         struct cpumask *serial_mask, *parallel_mask;
619         int err = -EINVAL;
620 
621         mutex_lock(&pinst->lock);
622         get_online_cpus();
623 
624         switch (cpumask_type) {
625         case PADATA_CPU_PARALLEL:
626                 serial_mask = pinst->cpumask.cbcpu;
627                 parallel_mask = cpumask;
628                 break;
629         case PADATA_CPU_SERIAL:
630                 parallel_mask = pinst->cpumask.pcpu;
631                 serial_mask = cpumask;
632                 break;
633         default:
634                  goto out;
635         }
636 
637         err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
638 
639 out:
640         put_online_cpus();
641         mutex_unlock(&pinst->lock);
642 
643         return err;
644 }
645 EXPORT_SYMBOL(padata_set_cpumask);
646 
647 /**
648  * padata_start - start the parallel processing
649  *
650  * @pinst: padata instance to start
651  */
652 int padata_start(struct padata_instance *pinst)
653 {
654         int err = 0;
655 
656         mutex_lock(&pinst->lock);
657 
658         if (pinst->flags & PADATA_INVALID)
659                 err = -EINVAL;
660 
661          __padata_start(pinst);
662 
663         mutex_unlock(&pinst->lock);
664 
665         return err;
666 }
667 EXPORT_SYMBOL(padata_start);
668 
669 /**
670  * padata_stop - stop the parallel processing
671  *
672  * @pinst: padata instance to stop
673  */
674 void padata_stop(struct padata_instance *pinst)
675 {
676         mutex_lock(&pinst->lock);
677         __padata_stop(pinst);
678         mutex_unlock(&pinst->lock);
679 }
680 EXPORT_SYMBOL(padata_stop);
681 
682 #ifdef CONFIG_HOTPLUG_CPU
683 
684 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
685 {
686         struct parallel_data *pd;
687 
688         if (cpumask_test_cpu(cpu, cpu_online_mask)) {
689                 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
690                                      pinst->cpumask.cbcpu);
691                 if (!pd)
692                         return -ENOMEM;
693 
694                 padata_replace(pinst, pd);
695 
696                 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
697                     padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
698                         __padata_start(pinst);
699         }
700 
701         return 0;
702 }
703 
704 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
705 {
706         struct parallel_data *pd = NULL;
707 
708         if (cpumask_test_cpu(cpu, cpu_online_mask)) {
709 
710                 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
711                     !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
712                         __padata_stop(pinst);
713 
714                 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
715                                      pinst->cpumask.cbcpu);
716                 if (!pd)
717                         return -ENOMEM;
718 
719                 padata_replace(pinst, pd);
720 
721                 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
722                 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
723         }
724 
725         return 0;
726 }
727 
728  /**
729  * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
730  *                     padata cpumasks.
731  *
732  * @pinst: padata instance
733  * @cpu: cpu to remove
734  * @mask: bitmask specifying from which cpumask @cpu should be removed
735  *        The @mask may be any combination of the following flags:
736  *          PADATA_CPU_SERIAL   - serial cpumask
737  *          PADATA_CPU_PARALLEL - parallel cpumask
738  */
739 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
740 {
741         int err;
742 
743         if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
744                 return -EINVAL;
745 
746         mutex_lock(&pinst->lock);
747 
748         get_online_cpus();
749         if (mask & PADATA_CPU_SERIAL)
750                 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
751         if (mask & PADATA_CPU_PARALLEL)
752                 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
753 
754         err = __padata_remove_cpu(pinst, cpu);
755         put_online_cpus();
756 
757         mutex_unlock(&pinst->lock);
758 
759         return err;
760 }
761 EXPORT_SYMBOL(padata_remove_cpu);
762 
763 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
764 {
765         return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
766                 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
767 }
768 
769 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
770 {
771         struct padata_instance *pinst;
772         int ret;
773 
774         pinst = hlist_entry_safe(node, struct padata_instance, node);
775         if (!pinst_has_cpu(pinst, cpu))
776                 return 0;
777 
778         mutex_lock(&pinst->lock);
779         ret = __padata_add_cpu(pinst, cpu);
780         mutex_unlock(&pinst->lock);
781         return ret;
782 }
783 
784 static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
785 {
786         struct padata_instance *pinst;
787         int ret;
788 
789         pinst = hlist_entry_safe(node, struct padata_instance, node);
790         if (!pinst_has_cpu(pinst, cpu))
791                 return 0;
792 
793         mutex_lock(&pinst->lock);
794         ret = __padata_remove_cpu(pinst, cpu);
795         mutex_unlock(&pinst->lock);
796         return ret;
797 }
798 
799 static enum cpuhp_state hp_online;
800 #endif
801 
802 static void __padata_free(struct padata_instance *pinst)
803 {
804 #ifdef CONFIG_HOTPLUG_CPU
805         cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
806 #endif
807 
808         padata_stop(pinst);
809         padata_free_pd(pinst->pd);
810         free_cpumask_var(pinst->cpumask.pcpu);
811         free_cpumask_var(pinst->cpumask.cbcpu);
812         kfree(pinst);
813 }
814 
815 #define kobj2pinst(_kobj)                                       \
816         container_of(_kobj, struct padata_instance, kobj)
817 #define attr2pentry(_attr)                                      \
818         container_of(_attr, struct padata_sysfs_entry, attr)
819 
820 static void padata_sysfs_release(struct kobject *kobj)
821 {
822         struct padata_instance *pinst = kobj2pinst(kobj);
823         __padata_free(pinst);
824 }
825 
826 struct padata_sysfs_entry {
827         struct attribute attr;
828         ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
829         ssize_t (*store)(struct padata_instance *, struct attribute *,
830                          const char *, size_t);
831 };
832 
833 static ssize_t show_cpumask(struct padata_instance *pinst,
834                             struct attribute *attr,  char *buf)
835 {
836         struct cpumask *cpumask;
837         ssize_t len;
838 
839         mutex_lock(&pinst->lock);
840         if (!strcmp(attr->name, "serial_cpumask"))
841                 cpumask = pinst->cpumask.cbcpu;
842         else
843                 cpumask = pinst->cpumask.pcpu;
844 
845         len = snprintf(buf, PAGE_SIZE, "%*pb\n",
846                        nr_cpu_ids, cpumask_bits(cpumask));
847         mutex_unlock(&pinst->lock);
848         return len < PAGE_SIZE ? len : -EINVAL;
849 }
850 
851 static ssize_t store_cpumask(struct padata_instance *pinst,
852                              struct attribute *attr,
853                              const char *buf, size_t count)
854 {
855         cpumask_var_t new_cpumask;
856         ssize_t ret;
857         int mask_type;
858 
859         if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
860                 return -ENOMEM;
861 
862         ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
863                            nr_cpumask_bits);
864         if (ret < 0)
865                 goto out;
866 
867         mask_type = !strcmp(attr->name, "serial_cpumask") ?
868                 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
869         ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
870         if (!ret)
871                 ret = count;
872 
873 out:
874         free_cpumask_var(new_cpumask);
875         return ret;
876 }
877 
878 #define PADATA_ATTR_RW(_name, _show_name, _store_name)          \
879         static struct padata_sysfs_entry _name##_attr =         \
880                 __ATTR(_name, 0644, _show_name, _store_name)
881 #define PADATA_ATTR_RO(_name, _show_name)               \
882         static struct padata_sysfs_entry _name##_attr = \
883                 __ATTR(_name, 0400, _show_name, NULL)
884 
885 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
886 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
887 
888 /*
889  * Padata sysfs provides the following objects:
890  * serial_cpumask   [RW] - cpumask for serial workers
891  * parallel_cpumask [RW] - cpumask for parallel workers
892  */
893 static struct attribute *padata_default_attrs[] = {
894         &serial_cpumask_attr.attr,
895         &parallel_cpumask_attr.attr,
896         NULL,
897 };
898 
899 static ssize_t padata_sysfs_show(struct kobject *kobj,
900                                  struct attribute *attr, char *buf)
901 {
902         struct padata_instance *pinst;
903         struct padata_sysfs_entry *pentry;
904         ssize_t ret = -EIO;
905 
906         pinst = kobj2pinst(kobj);
907         pentry = attr2pentry(attr);
908         if (pentry->show)
909                 ret = pentry->show(pinst, attr, buf);
910 
911         return ret;
912 }
913 
914 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
915                                   const char *buf, size_t count)
916 {
917         struct padata_instance *pinst;
918         struct padata_sysfs_entry *pentry;
919         ssize_t ret = -EIO;
920 
921         pinst = kobj2pinst(kobj);
922         pentry = attr2pentry(attr);
923         if (pentry->show)
924                 ret = pentry->store(pinst, attr, buf, count);
925 
926         return ret;
927 }
928 
929 static const struct sysfs_ops padata_sysfs_ops = {
930         .show = padata_sysfs_show,
931         .store = padata_sysfs_store,
932 };
933 
934 static struct kobj_type padata_attr_type = {
935         .sysfs_ops = &padata_sysfs_ops,
936         .default_attrs = padata_default_attrs,
937         .release = padata_sysfs_release,
938 };
939 
940 /**
941  * padata_alloc_possible - Allocate and initialize padata instance.
942  *                         Use the cpu_possible_mask for serial and
943  *                         parallel workers.
944  *
945  * @wq: workqueue to use for the allocated padata instance
946  */
947 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
948 {
949         return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
950 }
951 EXPORT_SYMBOL(padata_alloc_possible);
952 
953 /**
954  * padata_alloc - allocate and initialize a padata instance and specify
955  *                cpumasks for serial and parallel workers.
956  *
957  * @wq: workqueue to use for the allocated padata instance
958  * @pcpumask: cpumask that will be used for padata parallelization
959  * @cbcpumask: cpumask that will be used for padata serialization
960  */
961 struct padata_instance *padata_alloc(struct workqueue_struct *wq,
962                                      const struct cpumask *pcpumask,
963                                      const struct cpumask *cbcpumask)
964 {
965         struct padata_instance *pinst;
966         struct parallel_data *pd = NULL;
967 
968         pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
969         if (!pinst)
970                 goto err;
971 
972         get_online_cpus();
973         if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
974                 goto err_free_inst;
975         if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
976                 free_cpumask_var(pinst->cpumask.pcpu);
977                 goto err_free_inst;
978         }
979         if (!padata_validate_cpumask(pinst, pcpumask) ||
980             !padata_validate_cpumask(pinst, cbcpumask))
981                 goto err_free_masks;
982 
983         pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
984         if (!pd)
985                 goto err_free_masks;
986 
987         rcu_assign_pointer(pinst->pd, pd);
988 
989         pinst->wq = wq;
990 
991         cpumask_copy(pinst->cpumask.pcpu, pcpumask);
992         cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
993 
994         pinst->flags = 0;
995 
996         put_online_cpus();
997 
998         BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
999         kobject_init(&pinst->kobj, &padata_attr_type);
1000         mutex_init(&pinst->lock);
1001 
1002 #ifdef CONFIG_HOTPLUG_CPU
1003         cpuhp_state_add_instance_nocalls(hp_online, &pinst->node);
1004 #endif
1005         return pinst;
1006 
1007 err_free_masks:
1008         free_cpumask_var(pinst->cpumask.pcpu);
1009         free_cpumask_var(pinst->cpumask.cbcpu);
1010 err_free_inst:
1011         kfree(pinst);
1012         put_online_cpus();
1013 err:
1014         return NULL;
1015 }
1016 
1017 /**
1018  * padata_free - free a padata instance
1019  *
1020  * @padata_inst: padata instance to free
1021  */
1022 void padata_free(struct padata_instance *pinst)
1023 {
1024         kobject_put(&pinst->kobj);
1025 }
1026 EXPORT_SYMBOL(padata_free);
1027 
1028 #ifdef CONFIG_HOTPLUG_CPU
1029 
1030 static __init int padata_driver_init(void)
1031 {
1032         int ret;
1033 
1034         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1035                                       padata_cpu_online,
1036                                       padata_cpu_prep_down);
1037         if (ret < 0)
1038                 return ret;
1039         hp_online = ret;
1040         return 0;
1041 }
1042 module_init(padata_driver_init);
1043 
1044 static __exit void padata_driver_exit(void)
1045 {
1046         cpuhp_remove_multi_state(hp_online);
1047 }
1048 module_exit(padata_driver_exit);
1049 #endif
1050 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us