Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/kernel/sched/fair.c

  1 /*
  2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
  3  *
  4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5  *
  6  *  Interactivity improvements by Mike Galbraith
  7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
  8  *
  9  *  Various enhancements by Dmitry Adamushko.
 10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
 11  *
 12  *  Group scheduling enhancements by Srivatsa Vaddagiri
 13  *  Copyright IBM Corporation, 2007
 14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
 15  *
 16  *  Scaled math optimizations by Thomas Gleixner
 17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
 18  *
 19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
 20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
 21  */
 22 
 23 #include <linux/latencytop.h>
 24 #include <linux/sched.h>
 25 #include <linux/cpumask.h>
 26 #include <linux/slab.h>
 27 #include <linux/profile.h>
 28 #include <linux/interrupt.h>
 29 #include <linux/mempolicy.h>
 30 #include <linux/migrate.h>
 31 #include <linux/task_work.h>
 32 
 33 #include <trace/events/sched.h>
 34 
 35 #include "sched.h"
 36 
 37 /*
 38  * Targeted preemption latency for CPU-bound tasks:
 39  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
 40  *
 41  * NOTE: this latency value is not the same as the concept of
 42  * 'timeslice length' - timeslices in CFS are of variable length
 43  * and have no persistent notion like in traditional, time-slice
 44  * based scheduling concepts.
 45  *
 46  * (to see the precise effective timeslice length of your workload,
 47  *  run vmstat and monitor the context-switches (cs) field)
 48  */
 49 unsigned int sysctl_sched_latency = 6000000ULL;
 50 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
 51 
 52 /*
 53  * The initial- and re-scaling of tunables is configurable
 54  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
 55  *
 56  * Options are:
 57  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
 58  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
 59  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
 60  */
 61 enum sched_tunable_scaling sysctl_sched_tunable_scaling
 62         = SCHED_TUNABLESCALING_LOG;
 63 
 64 /*
 65  * Minimal preemption granularity for CPU-bound tasks:
 66  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
 67  */
 68 unsigned int sysctl_sched_min_granularity = 750000ULL;
 69 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
 70 
 71 /*
 72  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
 73  */
 74 static unsigned int sched_nr_latency = 8;
 75 
 76 /*
 77  * After fork, child runs first. If set to 0 (default) then
 78  * parent will (try to) run first.
 79  */
 80 unsigned int sysctl_sched_child_runs_first __read_mostly;
 81 
 82 /*
 83  * SCHED_OTHER wake-up granularity.
 84  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
 85  *
 86  * This option delays the preemption effects of decoupled workloads
 87  * and reduces their over-scheduling. Synchronous workloads will still
 88  * have immediate wakeup/sleep latencies.
 89  */
 90 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
 91 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
 92 
 93 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
 94 
 95 /*
 96  * The exponential sliding  window over which load is averaged for shares
 97  * distribution.
 98  * (default: 10msec)
 99  */
100 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101 
102 #ifdef CONFIG_CFS_BANDWIDTH
103 /*
104  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105  * each time a cfs_rq requests quota.
106  *
107  * Note: in the case that the slice exceeds the runtime remaining (either due
108  * to consumption or the quota being specified to be smaller than the slice)
109  * we will always only issue the remaining available time.
110  *
111  * default: 5 msec, units: microseconds
112   */
113 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114 #endif
115 
116 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117 {
118         lw->weight += inc;
119         lw->inv_weight = 0;
120 }
121 
122 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123 {
124         lw->weight -= dec;
125         lw->inv_weight = 0;
126 }
127 
128 static inline void update_load_set(struct load_weight *lw, unsigned long w)
129 {
130         lw->weight = w;
131         lw->inv_weight = 0;
132 }
133 
134 /*
135  * Increase the granularity value when there are more CPUs,
136  * because with more CPUs the 'effective latency' as visible
137  * to users decreases. But the relationship is not linear,
138  * so pick a second-best guess by going with the log2 of the
139  * number of CPUs.
140  *
141  * This idea comes from the SD scheduler of Con Kolivas:
142  */
143 static int get_update_sysctl_factor(void)
144 {
145         unsigned int cpus = min_t(int, num_online_cpus(), 8);
146         unsigned int factor;
147 
148         switch (sysctl_sched_tunable_scaling) {
149         case SCHED_TUNABLESCALING_NONE:
150                 factor = 1;
151                 break;
152         case SCHED_TUNABLESCALING_LINEAR:
153                 factor = cpus;
154                 break;
155         case SCHED_TUNABLESCALING_LOG:
156         default:
157                 factor = 1 + ilog2(cpus);
158                 break;
159         }
160 
161         return factor;
162 }
163 
164 static void update_sysctl(void)
165 {
166         unsigned int factor = get_update_sysctl_factor();
167 
168 #define SET_SYSCTL(name) \
169         (sysctl_##name = (factor) * normalized_sysctl_##name)
170         SET_SYSCTL(sched_min_granularity);
171         SET_SYSCTL(sched_latency);
172         SET_SYSCTL(sched_wakeup_granularity);
173 #undef SET_SYSCTL
174 }
175 
176 void sched_init_granularity(void)
177 {
178         update_sysctl();
179 }
180 
181 #define WMULT_CONST     (~0U)
182 #define WMULT_SHIFT     32
183 
184 static void __update_inv_weight(struct load_weight *lw)
185 {
186         unsigned long w;
187 
188         if (likely(lw->inv_weight))
189                 return;
190 
191         w = scale_load_down(lw->weight);
192 
193         if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
194                 lw->inv_weight = 1;
195         else if (unlikely(!w))
196                 lw->inv_weight = WMULT_CONST;
197         else
198                 lw->inv_weight = WMULT_CONST / w;
199 }
200 
201 /*
202  * delta_exec * weight / lw.weight
203  *   OR
204  * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
205  *
206  * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
207  * we're guaranteed shift stays positive because inv_weight is guaranteed to
208  * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
209  *
210  * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
211  * weight/lw.weight <= 1, and therefore our shift will also be positive.
212  */
213 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
214 {
215         u64 fact = scale_load_down(weight);
216         int shift = WMULT_SHIFT;
217 
218         __update_inv_weight(lw);
219 
220         if (unlikely(fact >> 32)) {
221                 while (fact >> 32) {
222                         fact >>= 1;
223                         shift--;
224                 }
225         }
226 
227         /* hint to use a 32x32->64 mul */
228         fact = (u64)(u32)fact * lw->inv_weight;
229 
230         while (fact >> 32) {
231                 fact >>= 1;
232                 shift--;
233         }
234 
235         return mul_u64_u32_shr(delta_exec, fact, shift);
236 }
237 
238 
239 const struct sched_class fair_sched_class;
240 
241 /**************************************************************
242  * CFS operations on generic schedulable entities:
243  */
244 
245 #ifdef CONFIG_FAIR_GROUP_SCHED
246 
247 /* cpu runqueue to which this cfs_rq is attached */
248 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
249 {
250         return cfs_rq->rq;
251 }
252 
253 /* An entity is a task if it doesn't "own" a runqueue */
254 #define entity_is_task(se)      (!se->my_q)
255 
256 static inline struct task_struct *task_of(struct sched_entity *se)
257 {
258 #ifdef CONFIG_SCHED_DEBUG
259         WARN_ON_ONCE(!entity_is_task(se));
260 #endif
261         return container_of(se, struct task_struct, se);
262 }
263 
264 /* Walk up scheduling entities hierarchy */
265 #define for_each_sched_entity(se) \
266                 for (; se; se = se->parent)
267 
268 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
269 {
270         return p->se.cfs_rq;
271 }
272 
273 /* runqueue on which this entity is (to be) queued */
274 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
275 {
276         return se->cfs_rq;
277 }
278 
279 /* runqueue "owned" by this group */
280 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
281 {
282         return grp->my_q;
283 }
284 
285 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
286                                        int force_update);
287 
288 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
289 {
290         if (!cfs_rq->on_list) {
291                 /*
292                  * Ensure we either appear before our parent (if already
293                  * enqueued) or force our parent to appear after us when it is
294                  * enqueued.  The fact that we always enqueue bottom-up
295                  * reduces this to two cases.
296                  */
297                 if (cfs_rq->tg->parent &&
298                     cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
299                         list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
300                                 &rq_of(cfs_rq)->leaf_cfs_rq_list);
301                 } else {
302                         list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
303                                 &rq_of(cfs_rq)->leaf_cfs_rq_list);
304                 }
305 
306                 cfs_rq->on_list = 1;
307                 /* We should have no load, but we need to update last_decay. */
308                 update_cfs_rq_blocked_load(cfs_rq, 0);
309         }
310 }
311 
312 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
313 {
314         if (cfs_rq->on_list) {
315                 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
316                 cfs_rq->on_list = 0;
317         }
318 }
319 
320 /* Iterate thr' all leaf cfs_rq's on a runqueue */
321 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
322         list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
323 
324 /* Do the two (enqueued) entities belong to the same group ? */
325 static inline struct cfs_rq *
326 is_same_group(struct sched_entity *se, struct sched_entity *pse)
327 {
328         if (se->cfs_rq == pse->cfs_rq)
329                 return se->cfs_rq;
330 
331         return NULL;
332 }
333 
334 static inline struct sched_entity *parent_entity(struct sched_entity *se)
335 {
336         return se->parent;
337 }
338 
339 static void
340 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
341 {
342         int se_depth, pse_depth;
343 
344         /*
345          * preemption test can be made between sibling entities who are in the
346          * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
347          * both tasks until we find their ancestors who are siblings of common
348          * parent.
349          */
350 
351         /* First walk up until both entities are at same depth */
352         se_depth = (*se)->depth;
353         pse_depth = (*pse)->depth;
354 
355         while (se_depth > pse_depth) {
356                 se_depth--;
357                 *se = parent_entity(*se);
358         }
359 
360         while (pse_depth > se_depth) {
361                 pse_depth--;
362                 *pse = parent_entity(*pse);
363         }
364 
365         while (!is_same_group(*se, *pse)) {
366                 *se = parent_entity(*se);
367                 *pse = parent_entity(*pse);
368         }
369 }
370 
371 #else   /* !CONFIG_FAIR_GROUP_SCHED */
372 
373 static inline struct task_struct *task_of(struct sched_entity *se)
374 {
375         return container_of(se, struct task_struct, se);
376 }
377 
378 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
379 {
380         return container_of(cfs_rq, struct rq, cfs);
381 }
382 
383 #define entity_is_task(se)      1
384 
385 #define for_each_sched_entity(se) \
386                 for (; se; se = NULL)
387 
388 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
389 {
390         return &task_rq(p)->cfs;
391 }
392 
393 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
394 {
395         struct task_struct *p = task_of(se);
396         struct rq *rq = task_rq(p);
397 
398         return &rq->cfs;
399 }
400 
401 /* runqueue "owned" by this group */
402 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
403 {
404         return NULL;
405 }
406 
407 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408 {
409 }
410 
411 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
412 {
413 }
414 
415 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
416                 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
417 
418 static inline struct sched_entity *parent_entity(struct sched_entity *se)
419 {
420         return NULL;
421 }
422 
423 static inline void
424 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
425 {
426 }
427 
428 #endif  /* CONFIG_FAIR_GROUP_SCHED */
429 
430 static __always_inline
431 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
432 
433 /**************************************************************
434  * Scheduling class tree data structure manipulation methods:
435  */
436 
437 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
438 {
439         s64 delta = (s64)(vruntime - max_vruntime);
440         if (delta > 0)
441                 max_vruntime = vruntime;
442 
443         return max_vruntime;
444 }
445 
446 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
447 {
448         s64 delta = (s64)(vruntime - min_vruntime);
449         if (delta < 0)
450                 min_vruntime = vruntime;
451 
452         return min_vruntime;
453 }
454 
455 static inline int entity_before(struct sched_entity *a,
456                                 struct sched_entity *b)
457 {
458         return (s64)(a->vruntime - b->vruntime) < 0;
459 }
460 
461 static void update_min_vruntime(struct cfs_rq *cfs_rq)
462 {
463         u64 vruntime = cfs_rq->min_vruntime;
464 
465         if (cfs_rq->curr)
466                 vruntime = cfs_rq->curr->vruntime;
467 
468         if (cfs_rq->rb_leftmost) {
469                 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
470                                                    struct sched_entity,
471                                                    run_node);
472 
473                 if (!cfs_rq->curr)
474                         vruntime = se->vruntime;
475                 else
476                         vruntime = min_vruntime(vruntime, se->vruntime);
477         }
478 
479         /* ensure we never gain time by being placed backwards. */
480         cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
481 #ifndef CONFIG_64BIT
482         smp_wmb();
483         cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
484 #endif
485 }
486 
487 /*
488  * Enqueue an entity into the rb-tree:
489  */
490 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
491 {
492         struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
493         struct rb_node *parent = NULL;
494         struct sched_entity *entry;
495         int leftmost = 1;
496 
497         /*
498          * Find the right place in the rbtree:
499          */
500         while (*link) {
501                 parent = *link;
502                 entry = rb_entry(parent, struct sched_entity, run_node);
503                 /*
504                  * We dont care about collisions. Nodes with
505                  * the same key stay together.
506                  */
507                 if (entity_before(se, entry)) {
508                         link = &parent->rb_left;
509                 } else {
510                         link = &parent->rb_right;
511                         leftmost = 0;
512                 }
513         }
514 
515         /*
516          * Maintain a cache of leftmost tree entries (it is frequently
517          * used):
518          */
519         if (leftmost)
520                 cfs_rq->rb_leftmost = &se->run_node;
521 
522         rb_link_node(&se->run_node, parent, link);
523         rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
524 }
525 
526 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
527 {
528         if (cfs_rq->rb_leftmost == &se->run_node) {
529                 struct rb_node *next_node;
530 
531                 next_node = rb_next(&se->run_node);
532                 cfs_rq->rb_leftmost = next_node;
533         }
534 
535         rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
536 }
537 
538 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
539 {
540         struct rb_node *left = cfs_rq->rb_leftmost;
541 
542         if (!left)
543                 return NULL;
544 
545         return rb_entry(left, struct sched_entity, run_node);
546 }
547 
548 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
549 {
550         struct rb_node *next = rb_next(&se->run_node);
551 
552         if (!next)
553                 return NULL;
554 
555         return rb_entry(next, struct sched_entity, run_node);
556 }
557 
558 #ifdef CONFIG_SCHED_DEBUG
559 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
560 {
561         struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
562 
563         if (!last)
564                 return NULL;
565 
566         return rb_entry(last, struct sched_entity, run_node);
567 }
568 
569 /**************************************************************
570  * Scheduling class statistics methods:
571  */
572 
573 int sched_proc_update_handler(struct ctl_table *table, int write,
574                 void __user *buffer, size_t *lenp,
575                 loff_t *ppos)
576 {
577         int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
578         int factor = get_update_sysctl_factor();
579 
580         if (ret || !write)
581                 return ret;
582 
583         sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
584                                         sysctl_sched_min_granularity);
585 
586 #define WRT_SYSCTL(name) \
587         (normalized_sysctl_##name = sysctl_##name / (factor))
588         WRT_SYSCTL(sched_min_granularity);
589         WRT_SYSCTL(sched_latency);
590         WRT_SYSCTL(sched_wakeup_granularity);
591 #undef WRT_SYSCTL
592 
593         return 0;
594 }
595 #endif
596 
597 /*
598  * delta /= w
599  */
600 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
601 {
602         if (unlikely(se->load.weight != NICE_0_LOAD))
603                 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
604 
605         return delta;
606 }
607 
608 /*
609  * The idea is to set a period in which each task runs once.
610  *
611  * When there are too many tasks (sched_nr_latency) we have to stretch
612  * this period because otherwise the slices get too small.
613  *
614  * p = (nr <= nl) ? l : l*nr/nl
615  */
616 static u64 __sched_period(unsigned long nr_running)
617 {
618         u64 period = sysctl_sched_latency;
619         unsigned long nr_latency = sched_nr_latency;
620 
621         if (unlikely(nr_running > nr_latency)) {
622                 period = sysctl_sched_min_granularity;
623                 period *= nr_running;
624         }
625 
626         return period;
627 }
628 
629 /*
630  * We calculate the wall-time slice from the period by taking a part
631  * proportional to the weight.
632  *
633  * s = p*P[w/rw]
634  */
635 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
636 {
637         u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
638 
639         for_each_sched_entity(se) {
640                 struct load_weight *load;
641                 struct load_weight lw;
642 
643                 cfs_rq = cfs_rq_of(se);
644                 load = &cfs_rq->load;
645 
646                 if (unlikely(!se->on_rq)) {
647                         lw = cfs_rq->load;
648 
649                         update_load_add(&lw, se->load.weight);
650                         load = &lw;
651                 }
652                 slice = __calc_delta(slice, se->load.weight, load);
653         }
654         return slice;
655 }
656 
657 /*
658  * We calculate the vruntime slice of a to-be-inserted task.
659  *
660  * vs = s/w
661  */
662 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
663 {
664         return calc_delta_fair(sched_slice(cfs_rq, se), se);
665 }
666 
667 #ifdef CONFIG_SMP
668 static unsigned long task_h_load(struct task_struct *p);
669 
670 static inline void __update_task_entity_contrib(struct sched_entity *se);
671 
672 /* Give new task start runnable values to heavy its load in infant time */
673 void init_task_runnable_average(struct task_struct *p)
674 {
675         u32 slice;
676 
677         p->se.avg.decay_count = 0;
678         slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
679         p->se.avg.runnable_avg_sum = slice;
680         p->se.avg.runnable_avg_period = slice;
681         __update_task_entity_contrib(&p->se);
682 }
683 #else
684 void init_task_runnable_average(struct task_struct *p)
685 {
686 }
687 #endif
688 
689 /*
690  * Update the current task's runtime statistics.
691  */
692 static void update_curr(struct cfs_rq *cfs_rq)
693 {
694         struct sched_entity *curr = cfs_rq->curr;
695         u64 now = rq_clock_task(rq_of(cfs_rq));
696         u64 delta_exec;
697 
698         if (unlikely(!curr))
699                 return;
700 
701         delta_exec = now - curr->exec_start;
702         if (unlikely((s64)delta_exec <= 0))
703                 return;
704 
705         curr->exec_start = now;
706 
707         schedstat_set(curr->statistics.exec_max,
708                       max(delta_exec, curr->statistics.exec_max));
709 
710         curr->sum_exec_runtime += delta_exec;
711         schedstat_add(cfs_rq, exec_clock, delta_exec);
712 
713         curr->vruntime += calc_delta_fair(delta_exec, curr);
714         update_min_vruntime(cfs_rq);
715 
716         if (entity_is_task(curr)) {
717                 struct task_struct *curtask = task_of(curr);
718 
719                 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
720                 cpuacct_charge(curtask, delta_exec);
721                 account_group_exec_runtime(curtask, delta_exec);
722         }
723 
724         account_cfs_rq_runtime(cfs_rq, delta_exec);
725 }
726 
727 static inline void
728 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
729 {
730         schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
731 }
732 
733 /*
734  * Task is being enqueued - update stats:
735  */
736 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
737 {
738         /*
739          * Are we enqueueing a waiting task? (for current tasks
740          * a dequeue/enqueue event is a NOP)
741          */
742         if (se != cfs_rq->curr)
743                 update_stats_wait_start(cfs_rq, se);
744 }
745 
746 static void
747 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
748 {
749         schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
750                         rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
751         schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
752         schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
753                         rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
754 #ifdef CONFIG_SCHEDSTATS
755         if (entity_is_task(se)) {
756                 trace_sched_stat_wait(task_of(se),
757                         rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
758         }
759 #endif
760         schedstat_set(se->statistics.wait_start, 0);
761 }
762 
763 static inline void
764 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
765 {
766         /*
767          * Mark the end of the wait period if dequeueing a
768          * waiting task:
769          */
770         if (se != cfs_rq->curr)
771                 update_stats_wait_end(cfs_rq, se);
772 }
773 
774 /*
775  * We are picking a new current task - update its stats:
776  */
777 static inline void
778 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
779 {
780         /*
781          * We are starting a new run period:
782          */
783         se->exec_start = rq_clock_task(rq_of(cfs_rq));
784 }
785 
786 /**************************************************
787  * Scheduling class queueing methods:
788  */
789 
790 #ifdef CONFIG_NUMA_BALANCING
791 /*
792  * Approximate time to scan a full NUMA task in ms. The task scan period is
793  * calculated based on the tasks virtual memory size and
794  * numa_balancing_scan_size.
795  */
796 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
797 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
798 
799 /* Portion of address space to scan in MB */
800 unsigned int sysctl_numa_balancing_scan_size = 256;
801 
802 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
803 unsigned int sysctl_numa_balancing_scan_delay = 1000;
804 
805 static unsigned int task_nr_scan_windows(struct task_struct *p)
806 {
807         unsigned long rss = 0;
808         unsigned long nr_scan_pages;
809 
810         /*
811          * Calculations based on RSS as non-present and empty pages are skipped
812          * by the PTE scanner and NUMA hinting faults should be trapped based
813          * on resident pages
814          */
815         nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
816         rss = get_mm_rss(p->mm);
817         if (!rss)
818                 rss = nr_scan_pages;
819 
820         rss = round_up(rss, nr_scan_pages);
821         return rss / nr_scan_pages;
822 }
823 
824 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
825 #define MAX_SCAN_WINDOW 2560
826 
827 static unsigned int task_scan_min(struct task_struct *p)
828 {
829         unsigned int scan, floor;
830         unsigned int windows = 1;
831 
832         if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
833                 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
834         floor = 1000 / windows;
835 
836         scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
837         return max_t(unsigned int, floor, scan);
838 }
839 
840 static unsigned int task_scan_max(struct task_struct *p)
841 {
842         unsigned int smin = task_scan_min(p);
843         unsigned int smax;
844 
845         /* Watch for min being lower than max due to floor calculations */
846         smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
847         return max(smin, smax);
848 }
849 
850 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
851 {
852         rq->nr_numa_running += (p->numa_preferred_nid != -1);
853         rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
854 }
855 
856 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
857 {
858         rq->nr_numa_running -= (p->numa_preferred_nid != -1);
859         rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
860 }
861 
862 struct numa_group {
863         atomic_t refcount;
864 
865         spinlock_t lock; /* nr_tasks, tasks */
866         int nr_tasks;
867         pid_t gid;
868         struct list_head task_list;
869 
870         struct rcu_head rcu;
871         nodemask_t active_nodes;
872         unsigned long total_faults;
873         /*
874          * Faults_cpu is used to decide whether memory should move
875          * towards the CPU. As a consequence, these stats are weighted
876          * more by CPU use than by memory faults.
877          */
878         unsigned long *faults_cpu;
879         unsigned long faults[0];
880 };
881 
882 /* Shared or private faults. */
883 #define NR_NUMA_HINT_FAULT_TYPES 2
884 
885 /* Memory and CPU locality */
886 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
887 
888 /* Averaged statistics, and temporary buffers. */
889 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
890 
891 pid_t task_numa_group_id(struct task_struct *p)
892 {
893         return p->numa_group ? p->numa_group->gid : 0;
894 }
895 
896 static inline int task_faults_idx(int nid, int priv)
897 {
898         return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
899 }
900 
901 static inline unsigned long task_faults(struct task_struct *p, int nid)
902 {
903         if (!p->numa_faults_memory)
904                 return 0;
905 
906         return p->numa_faults_memory[task_faults_idx(nid, 0)] +
907                 p->numa_faults_memory[task_faults_idx(nid, 1)];
908 }
909 
910 static inline unsigned long group_faults(struct task_struct *p, int nid)
911 {
912         if (!p->numa_group)
913                 return 0;
914 
915         return p->numa_group->faults[task_faults_idx(nid, 0)] +
916                 p->numa_group->faults[task_faults_idx(nid, 1)];
917 }
918 
919 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
920 {
921         return group->faults_cpu[task_faults_idx(nid, 0)] +
922                 group->faults_cpu[task_faults_idx(nid, 1)];
923 }
924 
925 /*
926  * These return the fraction of accesses done by a particular task, or
927  * task group, on a particular numa node.  The group weight is given a
928  * larger multiplier, in order to group tasks together that are almost
929  * evenly spread out between numa nodes.
930  */
931 static inline unsigned long task_weight(struct task_struct *p, int nid)
932 {
933         unsigned long total_faults;
934 
935         if (!p->numa_faults_memory)
936                 return 0;
937 
938         total_faults = p->total_numa_faults;
939 
940         if (!total_faults)
941                 return 0;
942 
943         return 1000 * task_faults(p, nid) / total_faults;
944 }
945 
946 static inline unsigned long group_weight(struct task_struct *p, int nid)
947 {
948         if (!p->numa_group || !p->numa_group->total_faults)
949                 return 0;
950 
951         return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
952 }
953 
954 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
955                                 int src_nid, int dst_cpu)
956 {
957         struct numa_group *ng = p->numa_group;
958         int dst_nid = cpu_to_node(dst_cpu);
959         int last_cpupid, this_cpupid;
960 
961         this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
962 
963         /*
964          * Multi-stage node selection is used in conjunction with a periodic
965          * migration fault to build a temporal task<->page relation. By using
966          * a two-stage filter we remove short/unlikely relations.
967          *
968          * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
969          * a task's usage of a particular page (n_p) per total usage of this
970          * page (n_t) (in a given time-span) to a probability.
971          *
972          * Our periodic faults will sample this probability and getting the
973          * same result twice in a row, given these samples are fully
974          * independent, is then given by P(n)^2, provided our sample period
975          * is sufficiently short compared to the usage pattern.
976          *
977          * This quadric squishes small probabilities, making it less likely we
978          * act on an unlikely task<->page relation.
979          */
980         last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
981         if (!cpupid_pid_unset(last_cpupid) &&
982                                 cpupid_to_nid(last_cpupid) != dst_nid)
983                 return false;
984 
985         /* Always allow migrate on private faults */
986         if (cpupid_match_pid(p, last_cpupid))
987                 return true;
988 
989         /* A shared fault, but p->numa_group has not been set up yet. */
990         if (!ng)
991                 return true;
992 
993         /*
994          * Do not migrate if the destination is not a node that
995          * is actively used by this numa group.
996          */
997         if (!node_isset(dst_nid, ng->active_nodes))
998                 return false;
999 
1000         /*
1001          * Source is a node that is not actively used by this
1002          * numa group, while the destination is. Migrate.
1003          */
1004         if (!node_isset(src_nid, ng->active_nodes))
1005                 return true;
1006 
1007         /*
1008          * Both source and destination are nodes in active
1009          * use by this numa group. Maximize memory bandwidth
1010          * by migrating from more heavily used groups, to less
1011          * heavily used ones, spreading the load around.
1012          * Use a 1/4 hysteresis to avoid spurious page movement.
1013          */
1014         return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
1015 }
1016 
1017 static unsigned long weighted_cpuload(const int cpu);
1018 static unsigned long source_load(int cpu, int type);
1019 static unsigned long target_load(int cpu, int type);
1020 static unsigned long power_of(int cpu);
1021 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1022 
1023 /* Cached statistics for all CPUs within a node */
1024 struct numa_stats {
1025         unsigned long nr_running;
1026         unsigned long load;
1027 
1028         /* Total compute capacity of CPUs on a node */
1029         unsigned long power;
1030 
1031         /* Approximate capacity in terms of runnable tasks on a node */
1032         unsigned long capacity;
1033         int has_capacity;
1034 };
1035 
1036 /*
1037  * XXX borrowed from update_sg_lb_stats
1038  */
1039 static void update_numa_stats(struct numa_stats *ns, int nid)
1040 {
1041         int cpu, cpus = 0;
1042 
1043         memset(ns, 0, sizeof(*ns));
1044         for_each_cpu(cpu, cpumask_of_node(nid)) {
1045                 struct rq *rq = cpu_rq(cpu);
1046 
1047                 ns->nr_running += rq->nr_running;
1048                 ns->load += weighted_cpuload(cpu);
1049                 ns->power += power_of(cpu);
1050 
1051                 cpus++;
1052         }
1053 
1054         /*
1055          * If we raced with hotplug and there are no CPUs left in our mask
1056          * the @ns structure is NULL'ed and task_numa_compare() will
1057          * not find this node attractive.
1058          *
1059          * We'll either bail at !has_capacity, or we'll detect a huge imbalance
1060          * and bail there.
1061          */
1062         if (!cpus)
1063                 return;
1064 
1065         ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
1066         ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
1067         ns->has_capacity = (ns->nr_running < ns->capacity);
1068 }
1069 
1070 struct task_numa_env {
1071         struct task_struct *p;
1072 
1073         int src_cpu, src_nid;
1074         int dst_cpu, dst_nid;
1075 
1076         struct numa_stats src_stats, dst_stats;
1077 
1078         int imbalance_pct;
1079 
1080         struct task_struct *best_task;
1081         long best_imp;
1082         int best_cpu;
1083 };
1084 
1085 static void task_numa_assign(struct task_numa_env *env,
1086                              struct task_struct *p, long imp)
1087 {
1088         if (env->best_task)
1089                 put_task_struct(env->best_task);
1090         if (p)
1091                 get_task_struct(p);
1092 
1093         env->best_task = p;
1094         env->best_imp = imp;
1095         env->best_cpu = env->dst_cpu;
1096 }
1097 
1098 /*
1099  * This checks if the overall compute and NUMA accesses of the system would
1100  * be improved if the source tasks was migrated to the target dst_cpu taking
1101  * into account that it might be best if task running on the dst_cpu should
1102  * be exchanged with the source task
1103  */
1104 static void task_numa_compare(struct task_numa_env *env,
1105                               long taskimp, long groupimp)
1106 {
1107         struct rq *src_rq = cpu_rq(env->src_cpu);
1108         struct rq *dst_rq = cpu_rq(env->dst_cpu);
1109         struct task_struct *cur;
1110         long dst_load, src_load;
1111         long load;
1112         long imp = (groupimp > 0) ? groupimp : taskimp;
1113 
1114         rcu_read_lock();
1115         cur = ACCESS_ONCE(dst_rq->curr);
1116         if (cur->pid == 0) /* idle */
1117                 cur = NULL;
1118 
1119         /*
1120          * "imp" is the fault differential for the source task between the
1121          * source and destination node. Calculate the total differential for
1122          * the source task and potential destination task. The more negative
1123          * the value is, the more rmeote accesses that would be expected to
1124          * be incurred if the tasks were swapped.
1125          */
1126         if (cur) {
1127                 /* Skip this swap candidate if cannot move to the source cpu */
1128                 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1129                         goto unlock;
1130 
1131                 /*
1132                  * If dst and source tasks are in the same NUMA group, or not
1133                  * in any group then look only at task weights.
1134                  */
1135                 if (cur->numa_group == env->p->numa_group) {
1136                         imp = taskimp + task_weight(cur, env->src_nid) -
1137                               task_weight(cur, env->dst_nid);
1138                         /*
1139                          * Add some hysteresis to prevent swapping the
1140                          * tasks within a group over tiny differences.
1141                          */
1142                         if (cur->numa_group)
1143                                 imp -= imp/16;
1144                 } else {
1145                         /*
1146                          * Compare the group weights. If a task is all by
1147                          * itself (not part of a group), use the task weight
1148                          * instead.
1149                          */
1150                         if (env->p->numa_group)
1151                                 imp = groupimp;
1152                         else
1153                                 imp = taskimp;
1154 
1155                         if (cur->numa_group)
1156                                 imp += group_weight(cur, env->src_nid) -
1157                                        group_weight(cur, env->dst_nid);
1158                         else
1159                                 imp += task_weight(cur, env->src_nid) -
1160                                        task_weight(cur, env->dst_nid);
1161                 }
1162         }
1163 
1164         if (imp < env->best_imp)
1165                 goto unlock;
1166 
1167         if (!cur) {
1168                 /* Is there capacity at our destination? */
1169                 if (env->src_stats.has_capacity &&
1170                     !env->dst_stats.has_capacity)
1171                         goto unlock;
1172 
1173                 goto balance;
1174         }
1175 
1176         /* Balance doesn't matter much if we're running a task per cpu */
1177         if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
1178                 goto assign;
1179 
1180         /*
1181          * In the overloaded case, try and keep the load balanced.
1182          */
1183 balance:
1184         dst_load = env->dst_stats.load;
1185         src_load = env->src_stats.load;
1186 
1187         /* XXX missing power terms */
1188         load = task_h_load(env->p);
1189         dst_load += load;
1190         src_load -= load;
1191 
1192         if (cur) {
1193                 load = task_h_load(cur);
1194                 dst_load -= load;
1195                 src_load += load;
1196         }
1197 
1198         /* make src_load the smaller */
1199         if (dst_load < src_load)
1200                 swap(dst_load, src_load);
1201 
1202         if (src_load * env->imbalance_pct < dst_load * 100)
1203                 goto unlock;
1204 
1205 assign:
1206         task_numa_assign(env, cur, imp);
1207 unlock:
1208         rcu_read_unlock();
1209 }
1210 
1211 static void task_numa_find_cpu(struct task_numa_env *env,
1212                                 long taskimp, long groupimp)
1213 {
1214         int cpu;
1215 
1216         for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1217                 /* Skip this CPU if the source task cannot migrate */
1218                 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1219                         continue;
1220 
1221                 env->dst_cpu = cpu;
1222                 task_numa_compare(env, taskimp, groupimp);
1223         }
1224 }
1225 
1226 static int task_numa_migrate(struct task_struct *p)
1227 {
1228         struct task_numa_env env = {
1229                 .p = p,
1230 
1231                 .src_cpu = task_cpu(p),
1232                 .src_nid = task_node(p),
1233 
1234                 .imbalance_pct = 112,
1235 
1236                 .best_task = NULL,
1237                 .best_imp = 0,
1238                 .best_cpu = -1
1239         };
1240         struct sched_domain *sd;
1241         unsigned long taskweight, groupweight;
1242         int nid, ret;
1243         long taskimp, groupimp;
1244 
1245         /*
1246          * Pick the lowest SD_NUMA domain, as that would have the smallest
1247          * imbalance and would be the first to start moving tasks about.
1248          *
1249          * And we want to avoid any moving of tasks about, as that would create
1250          * random movement of tasks -- counter the numa conditions we're trying
1251          * to satisfy here.
1252          */
1253         rcu_read_lock();
1254         sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1255         if (sd)
1256                 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1257         rcu_read_unlock();
1258 
1259         /*
1260          * Cpusets can break the scheduler domain tree into smaller
1261          * balance domains, some of which do not cross NUMA boundaries.
1262          * Tasks that are "trapped" in such domains cannot be migrated
1263          * elsewhere, so there is no point in (re)trying.
1264          */
1265         if (unlikely(!sd)) {
1266                 p->numa_preferred_nid = task_node(p);
1267                 return -EINVAL;
1268         }
1269 
1270         taskweight = task_weight(p, env.src_nid);
1271         groupweight = group_weight(p, env.src_nid);
1272         update_numa_stats(&env.src_stats, env.src_nid);
1273         env.dst_nid = p->numa_preferred_nid;
1274         taskimp = task_weight(p, env.dst_nid) - taskweight;
1275         groupimp = group_weight(p, env.dst_nid) - groupweight;
1276         update_numa_stats(&env.dst_stats, env.dst_nid);
1277 
1278         /* If the preferred nid has capacity, try to use it. */
1279         if (env.dst_stats.has_capacity)
1280                 task_numa_find_cpu(&env, taskimp, groupimp);
1281 
1282         /* No space available on the preferred nid. Look elsewhere. */
1283         if (env.best_cpu == -1) {
1284                 for_each_online_node(nid) {
1285                         if (nid == env.src_nid || nid == p->numa_preferred_nid)
1286                                 continue;
1287 
1288                         /* Only consider nodes where both task and groups benefit */
1289                         taskimp = task_weight(p, nid) - taskweight;
1290                         groupimp = group_weight(p, nid) - groupweight;
1291                         if (taskimp < 0 && groupimp < 0)
1292                                 continue;
1293 
1294                         env.dst_nid = nid;
1295                         update_numa_stats(&env.dst_stats, env.dst_nid);
1296                         task_numa_find_cpu(&env, taskimp, groupimp);
1297                 }
1298         }
1299 
1300         /* No better CPU than the current one was found. */
1301         if (env.best_cpu == -1)
1302                 return -EAGAIN;
1303 
1304         sched_setnuma(p, env.dst_nid);
1305 
1306         /*
1307          * Reset the scan period if the task is being rescheduled on an
1308          * alternative node to recheck if the tasks is now properly placed.
1309          */
1310         p->numa_scan_period = task_scan_min(p);
1311 
1312         if (env.best_task == NULL) {
1313                 ret = migrate_task_to(p, env.best_cpu);
1314                 if (ret != 0)
1315                         trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1316                 return ret;
1317         }
1318 
1319         ret = migrate_swap(p, env.best_task);
1320         if (ret != 0)
1321                 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1322         put_task_struct(env.best_task);
1323         return ret;
1324 }
1325 
1326 /* Attempt to migrate a task to a CPU on the preferred node. */
1327 static void numa_migrate_preferred(struct task_struct *p)
1328 {
1329         /* This task has no NUMA fault statistics yet */
1330         if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
1331                 return;
1332 
1333         /* Periodically retry migrating the task to the preferred node */
1334         p->numa_migrate_retry = jiffies + HZ;
1335 
1336         /* Success if task is already running on preferred CPU */
1337         if (task_node(p) == p->numa_preferred_nid)
1338                 return;
1339 
1340         /* Otherwise, try migrate to a CPU on the preferred node */
1341         task_numa_migrate(p);
1342 }
1343 
1344 /*
1345  * Find the nodes on which the workload is actively running. We do this by
1346  * tracking the nodes from which NUMA hinting faults are triggered. This can
1347  * be different from the set of nodes where the workload's memory is currently
1348  * located.
1349  *
1350  * The bitmask is used to make smarter decisions on when to do NUMA page
1351  * migrations, To prevent flip-flopping, and excessive page migrations, nodes
1352  * are added when they cause over 6/16 of the maximum number of faults, but
1353  * only removed when they drop below 3/16.
1354  */
1355 static void update_numa_active_node_mask(struct numa_group *numa_group)
1356 {
1357         unsigned long faults, max_faults = 0;
1358         int nid;
1359 
1360         for_each_online_node(nid) {
1361                 faults = group_faults_cpu(numa_group, nid);
1362                 if (faults > max_faults)
1363                         max_faults = faults;
1364         }
1365 
1366         for_each_online_node(nid) {
1367                 faults = group_faults_cpu(numa_group, nid);
1368                 if (!node_isset(nid, numa_group->active_nodes)) {
1369                         if (faults > max_faults * 6 / 16)
1370                                 node_set(nid, numa_group->active_nodes);
1371                 } else if (faults < max_faults * 3 / 16)
1372                         node_clear(nid, numa_group->active_nodes);
1373         }
1374 }
1375 
1376 /*
1377  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1378  * increments. The more local the fault statistics are, the higher the scan
1379  * period will be for the next scan window. If local/remote ratio is below
1380  * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the
1381  * scan period will decrease
1382  */
1383 #define NUMA_PERIOD_SLOTS 10
1384 #define NUMA_PERIOD_THRESHOLD 3
1385 
1386 /*
1387  * Increase the scan period (slow down scanning) if the majority of
1388  * our memory is already on our local node, or if the majority of
1389  * the page accesses are shared with other processes.
1390  * Otherwise, decrease the scan period.
1391  */
1392 static void update_task_scan_period(struct task_struct *p,
1393                         unsigned long shared, unsigned long private)
1394 {
1395         unsigned int period_slot;
1396         int ratio;
1397         int diff;
1398 
1399         unsigned long remote = p->numa_faults_locality[0];
1400         unsigned long local = p->numa_faults_locality[1];
1401 
1402         /*
1403          * If there were no record hinting faults then either the task is
1404          * completely idle or all activity is areas that are not of interest
1405          * to automatic numa balancing. Scan slower
1406          */
1407         if (local + shared == 0) {
1408                 p->numa_scan_period = min(p->numa_scan_period_max,
1409                         p->numa_scan_period << 1);
1410 
1411                 p->mm->numa_next_scan = jiffies +
1412                         msecs_to_jiffies(p->numa_scan_period);
1413 
1414                 return;
1415         }
1416 
1417         /*
1418          * Prepare to scale scan period relative to the current period.
1419          *       == NUMA_PERIOD_THRESHOLD scan period stays the same
1420          *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1421          *       >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1422          */
1423         period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1424         ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1425         if (ratio >= NUMA_PERIOD_THRESHOLD) {
1426                 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1427                 if (!slot)
1428                         slot = 1;
1429                 diff = slot * period_slot;
1430         } else {
1431                 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1432 
1433                 /*
1434                  * Scale scan rate increases based on sharing. There is an
1435                  * inverse relationship between the degree of sharing and
1436                  * the adjustment made to the scanning period. Broadly
1437                  * speaking the intent is that there is little point
1438                  * scanning faster if shared accesses dominate as it may
1439                  * simply bounce migrations uselessly
1440                  */
1441                 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
1442                 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1443         }
1444 
1445         p->numa_scan_period = clamp(p->numa_scan_period + diff,
1446                         task_scan_min(p), task_scan_max(p));
1447         memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1448 }
1449 
1450 /*
1451  * Get the fraction of time the task has been running since the last
1452  * NUMA placement cycle. The scheduler keeps similar statistics, but
1453  * decays those on a 32ms period, which is orders of magnitude off
1454  * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1455  * stats only if the task is so new there are no NUMA statistics yet.
1456  */
1457 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1458 {
1459         u64 runtime, delta, now;
1460         /* Use the start of this time slice to avoid calculations. */
1461         now = p->se.exec_start;
1462         runtime = p->se.sum_exec_runtime;
1463 
1464         if (p->last_task_numa_placement) {
1465                 delta = runtime - p->last_sum_exec_runtime;
1466                 *period = now - p->last_task_numa_placement;
1467         } else {
1468                 delta = p->se.avg.runnable_avg_sum;
1469                 *period = p->se.avg.runnable_avg_period;
1470         }
1471 
1472         p->last_sum_exec_runtime = runtime;
1473         p->last_task_numa_placement = now;
1474 
1475         return delta;
1476 }
1477 
1478 static void task_numa_placement(struct task_struct *p)
1479 {
1480         int seq, nid, max_nid = -1, max_group_nid = -1;
1481         unsigned long max_faults = 0, max_group_faults = 0;
1482         unsigned long fault_types[2] = { 0, 0 };
1483         unsigned long total_faults;
1484         u64 runtime, period;
1485         spinlock_t *group_lock = NULL;
1486 
1487         seq = ACCESS_ONCE(p->mm->numa_scan_seq);
1488         if (p->numa_scan_seq == seq)
1489                 return;
1490         p->numa_scan_seq = seq;
1491         p->numa_scan_period_max = task_scan_max(p);
1492 
1493         total_faults = p->numa_faults_locality[0] +
1494                        p->numa_faults_locality[1];
1495         runtime = numa_get_avg_runtime(p, &period);
1496 
1497         /* If the task is part of a group prevent parallel updates to group stats */
1498         if (p->numa_group) {
1499                 group_lock = &p->numa_group->lock;
1500                 spin_lock_irq(group_lock);
1501         }
1502 
1503         /* Find the node with the highest number of faults */
1504         for_each_online_node(nid) {
1505                 unsigned long faults = 0, group_faults = 0;
1506                 int priv, i;
1507 
1508                 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
1509                         long diff, f_diff, f_weight;
1510 
1511                         i = task_faults_idx(nid, priv);
1512 
1513                         /* Decay existing window, copy faults since last scan */
1514                         diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
1515                         fault_types[priv] += p->numa_faults_buffer_memory[i];
1516                         p->numa_faults_buffer_memory[i] = 0;
1517 
1518                         /*
1519                          * Normalize the faults_from, so all tasks in a group
1520                          * count according to CPU use, instead of by the raw
1521                          * number of faults. Tasks with little runtime have
1522                          * little over-all impact on throughput, and thus their
1523                          * faults are less important.
1524                          */
1525                         f_weight = div64_u64(runtime << 16, period + 1);
1526                         f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
1527                                    (total_faults + 1);
1528                         f_diff = f_weight - p->numa_faults_cpu[i] / 2;
1529                         p->numa_faults_buffer_cpu[i] = 0;
1530 
1531                         p->numa_faults_memory[i] += diff;
1532                         p->numa_faults_cpu[i] += f_diff;
1533                         faults += p->numa_faults_memory[i];
1534                         p->total_numa_faults += diff;
1535                         if (p->numa_group) {
1536                                 /* safe because we can only change our own group */
1537                                 p->numa_group->faults[i] += diff;
1538                                 p->numa_group->faults_cpu[i] += f_diff;
1539                                 p->numa_group->total_faults += diff;
1540                                 group_faults += p->numa_group->faults[i];
1541                         }
1542                 }
1543 
1544                 if (faults > max_faults) {
1545                         max_faults = faults;
1546                         max_nid = nid;
1547                 }
1548 
1549                 if (group_faults > max_group_faults) {
1550                         max_group_faults = group_faults;
1551                         max_group_nid = nid;
1552                 }
1553         }
1554 
1555         update_task_scan_period(p, fault_types[0], fault_types[1]);
1556 
1557         if (p->numa_group) {
1558                 update_numa_active_node_mask(p->numa_group);
1559                 /*
1560                  * If the preferred task and group nids are different,
1561                  * iterate over the nodes again to find the best place.
1562                  */
1563                 if (max_nid != max_group_nid) {
1564                         unsigned long weight, max_weight = 0;
1565 
1566                         for_each_online_node(nid) {
1567                                 weight = task_weight(p, nid) + group_weight(p, nid);
1568                                 if (weight > max_weight) {
1569                                         max_weight = weight;
1570                                         max_nid = nid;
1571                                 }
1572                         }
1573                 }
1574 
1575                 spin_unlock_irq(group_lock);
1576         }
1577 
1578         /* Preferred node as the node with the most faults */
1579         if (max_faults && max_nid != p->numa_preferred_nid) {
1580                 /* Update the preferred nid and migrate task if possible */
1581                 sched_setnuma(p, max_nid);
1582                 numa_migrate_preferred(p);
1583         }
1584 }
1585 
1586 static inline int get_numa_group(struct numa_group *grp)
1587 {
1588         return atomic_inc_not_zero(&grp->refcount);
1589 }
1590 
1591 static inline void put_numa_group(struct numa_group *grp)
1592 {
1593         if (atomic_dec_and_test(&grp->refcount))
1594                 kfree_rcu(grp, rcu);
1595 }
1596 
1597 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1598                         int *priv)
1599 {
1600         struct numa_group *grp, *my_grp;
1601         struct task_struct *tsk;
1602         bool join = false;
1603         int cpu = cpupid_to_cpu(cpupid);
1604         int i;
1605 
1606         if (unlikely(!p->numa_group)) {
1607                 unsigned int size = sizeof(struct numa_group) +
1608                                     4*nr_node_ids*sizeof(unsigned long);
1609 
1610                 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1611                 if (!grp)
1612                         return;
1613 
1614                 atomic_set(&grp->refcount, 1);
1615                 spin_lock_init(&grp->lock);
1616                 INIT_LIST_HEAD(&grp->task_list);
1617                 grp->gid = p->pid;
1618                 /* Second half of the array tracks nids where faults happen */
1619                 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
1620                                                 nr_node_ids;
1621 
1622                 node_set(task_node(current), grp->active_nodes);
1623 
1624                 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1625                         grp->faults[i] = p->numa_faults_memory[i];
1626 
1627                 grp->total_faults = p->total_numa_faults;
1628 
1629                 list_add(&p->numa_entry, &grp->task_list);
1630                 grp->nr_tasks++;
1631                 rcu_assign_pointer(p->numa_group, grp);
1632         }
1633 
1634         rcu_read_lock();
1635         tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1636 
1637         if (!cpupid_match_pid(tsk, cpupid))
1638                 goto no_join;
1639 
1640         grp = rcu_dereference(tsk->numa_group);
1641         if (!grp)
1642                 goto no_join;
1643 
1644         my_grp = p->numa_group;
1645         if (grp == my_grp)
1646                 goto no_join;
1647 
1648         /*
1649          * Only join the other group if its bigger; if we're the bigger group,
1650          * the other task will join us.
1651          */
1652         if (my_grp->nr_tasks > grp->nr_tasks)
1653                 goto no_join;
1654 
1655         /*
1656          * Tie-break on the grp address.
1657          */
1658         if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
1659                 goto no_join;
1660 
1661         /* Always join threads in the same process. */
1662         if (tsk->mm == current->mm)
1663                 join = true;
1664 
1665         /* Simple filter to avoid false positives due to PID collisions */
1666         if (flags & TNF_SHARED)
1667                 join = true;
1668 
1669         /* Update priv based on whether false sharing was detected */
1670         *priv = !join;
1671 
1672         if (join && !get_numa_group(grp))
1673                 goto no_join;
1674 
1675         rcu_read_unlock();
1676 
1677         if (!join)
1678                 return;
1679 
1680         BUG_ON(irqs_disabled());
1681         double_lock_irq(&my_grp->lock, &grp->lock);
1682 
1683         for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
1684                 my_grp->faults[i] -= p->numa_faults_memory[i];
1685                 grp->faults[i] += p->numa_faults_memory[i];
1686         }
1687         my_grp->total_faults -= p->total_numa_faults;
1688         grp->total_faults += p->total_numa_faults;
1689 
1690         list_move(&p->numa_entry, &grp->task_list);
1691         my_grp->nr_tasks--;
1692         grp->nr_tasks++;
1693 
1694         spin_unlock(&my_grp->lock);
1695         spin_unlock_irq(&grp->lock);
1696 
1697         rcu_assign_pointer(p->numa_group, grp);
1698 
1699         put_numa_group(my_grp);
1700         return;
1701 
1702 no_join:
1703         rcu_read_unlock();
1704         return;
1705 }
1706 
1707 void task_numa_free(struct task_struct *p)
1708 {
1709         struct numa_group *grp = p->numa_group;
1710         void *numa_faults = p->numa_faults_memory;
1711         unsigned long flags;
1712         int i;
1713 
1714         if (grp) {
1715                 spin_lock_irqsave(&grp->lock, flags);
1716                 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1717                         grp->faults[i] -= p->numa_faults_memory[i];
1718                 grp->total_faults -= p->total_numa_faults;
1719 
1720                 list_del(&p->numa_entry);
1721                 grp->nr_tasks--;
1722                 spin_unlock_irqrestore(&grp->lock, flags);
1723                 rcu_assign_pointer(p->numa_group, NULL);
1724                 put_numa_group(grp);
1725         }
1726 
1727         p->numa_faults_memory = NULL;
1728         p->numa_faults_buffer_memory = NULL;
1729         p->numa_faults_cpu= NULL;
1730         p->numa_faults_buffer_cpu = NULL;
1731         kfree(numa_faults);
1732 }
1733 
1734 /*
1735  * Got a PROT_NONE fault for a page on @node.
1736  */
1737 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
1738 {
1739         struct task_struct *p = current;
1740         bool migrated = flags & TNF_MIGRATED;
1741         int cpu_node = task_node(current);
1742         int priv;
1743 
1744         if (!numabalancing_enabled)
1745                 return;
1746 
1747         /* for example, ksmd faulting in a user's mm */
1748         if (!p->mm)
1749                 return;
1750 
1751         /* Do not worry about placement if exiting */
1752         if (p->state == TASK_DEAD)
1753                 return;
1754 
1755         /* Allocate buffer to track faults on a per-node basis */
1756         if (unlikely(!p->numa_faults_memory)) {
1757                 int size = sizeof(*p->numa_faults_memory) *
1758                            NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
1759 
1760                 p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
1761                 if (!p->numa_faults_memory)
1762                         return;
1763 
1764                 BUG_ON(p->numa_faults_buffer_memory);
1765                 /*
1766                  * The averaged statistics, shared & private, memory & cpu,
1767                  * occupy the first half of the array. The second half of the
1768                  * array is for current counters, which are averaged into the
1769                  * first set by task_numa_placement.
1770                  */
1771                 p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
1772                 p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
1773                 p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
1774                 p->total_numa_faults = 0;
1775                 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1776         }
1777 
1778         /*
1779          * First accesses are treated as private, otherwise consider accesses
1780          * to be private if the accessing pid has not changed
1781          */
1782         if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
1783                 priv = 1;
1784         } else {
1785                 priv = cpupid_match_pid(p, last_cpupid);
1786                 if (!priv && !(flags & TNF_NO_GROUP))
1787                         task_numa_group(p, last_cpupid, flags, &priv);
1788         }
1789 
1790         task_numa_placement(p);
1791 
1792         /*
1793          * Retry task to preferred node migration periodically, in case it
1794          * case it previously failed, or the scheduler moved us.
1795          */
1796         if (time_after(jiffies, p->numa_migrate_retry))
1797                 numa_migrate_preferred(p);
1798 
1799         if (migrated)
1800                 p->numa_pages_migrated += pages;
1801 
1802         p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
1803         p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
1804         p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
1805 }
1806 
1807 static void reset_ptenuma_scan(struct task_struct *p)
1808 {
1809         ACCESS_ONCE(p->mm->numa_scan_seq)++;
1810         p->mm->numa_scan_offset = 0;
1811 }
1812 
1813 /*
1814  * The expensive part of numa migration is done from task_work context.
1815  * Triggered from task_tick_numa().
1816  */
1817 void task_numa_work(struct callback_head *work)
1818 {
1819         unsigned long migrate, next_scan, now = jiffies;
1820         struct task_struct *p = current;
1821         struct mm_struct *mm = p->mm;
1822         struct vm_area_struct *vma;
1823         unsigned long start, end;
1824         unsigned long nr_pte_updates = 0;
1825         long pages;
1826 
1827         WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1828 
1829         work->next = work; /* protect against double add */
1830         /*
1831          * Who cares about NUMA placement when they're dying.
1832          *
1833          * NOTE: make sure not to dereference p->mm before this check,
1834          * exit_task_work() happens _after_ exit_mm() so we could be called
1835          * without p->mm even though we still had it when we enqueued this
1836          * work.
1837          */
1838         if (p->flags & PF_EXITING)
1839                 return;
1840 
1841         if (!mm->numa_next_scan) {
1842                 mm->numa_next_scan = now +
1843                         msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1844         }
1845 
1846         /*
1847          * Enforce maximal scan/migration frequency..
1848          */
1849         migrate = mm->numa_next_scan;
1850         if (time_before(now, migrate))
1851                 return;
1852 
1853         if (p->numa_scan_period == 0) {
1854                 p->numa_scan_period_max = task_scan_max(p);
1855                 p->numa_scan_period = task_scan_min(p);
1856         }
1857 
1858         next_scan = now + msecs_to_jiffies(p->numa_scan_period);
1859         if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1860                 return;
1861 
1862         /*
1863          * Delay this task enough that another task of this mm will likely win
1864          * the next time around.
1865          */
1866         p->node_stamp += 2 * TICK_NSEC;
1867 
1868         start = mm->numa_scan_offset;
1869         pages = sysctl_numa_balancing_scan_size;
1870         pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1871         if (!pages)
1872                 return;
1873 
1874         down_read(&mm->mmap_sem);
1875         vma = find_vma(mm, start);
1876         if (!vma) {
1877                 reset_ptenuma_scan(p);
1878                 start = 0;
1879                 vma = mm->mmap;
1880         }
1881         for (; vma; vma = vma->vm_next) {
1882                 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
1883                         continue;
1884 
1885                 /*
1886                  * Shared library pages mapped by multiple processes are not
1887                  * migrated as it is expected they are cache replicated. Avoid
1888                  * hinting faults in read-only file-backed mappings or the vdso
1889                  * as migrating the pages will be of marginal benefit.
1890                  */
1891                 if (!vma->vm_mm ||
1892                     (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1893                         continue;
1894 
1895                 /*
1896                  * Skip inaccessible VMAs to avoid any confusion between
1897                  * PROT_NONE and NUMA hinting ptes
1898                  */
1899                 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1900                         continue;
1901 
1902                 do {
1903                         start = max(start, vma->vm_start);
1904                         end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1905                         end = min(end, vma->vm_end);
1906                         nr_pte_updates += change_prot_numa(vma, start, end);
1907 
1908                         /*
1909                          * Scan sysctl_numa_balancing_scan_size but ensure that
1910                          * at least one PTE is updated so that unused virtual
1911                          * address space is quickly skipped.
1912                          */
1913                         if (nr_pte_updates)
1914                                 pages -= (end - start) >> PAGE_SHIFT;
1915 
1916                         start = end;
1917                         if (pages <= 0)
1918                                 goto out;
1919 
1920                         cond_resched();
1921                 } while (end != vma->vm_end);
1922         }
1923 
1924 out:
1925         /*
1926          * It is possible to reach the end of the VMA list but the last few
1927          * VMAs are not guaranteed to the vma_migratable. If they are not, we
1928          * would find the !migratable VMA on the next scan but not reset the
1929          * scanner to the start so check it now.
1930          */
1931         if (vma)
1932                 mm->numa_scan_offset = start;
1933         else
1934                 reset_ptenuma_scan(p);
1935         up_read(&mm->mmap_sem);
1936 }
1937 
1938 /*
1939  * Drive the periodic memory faults..
1940  */
1941 void task_tick_numa(struct rq *rq, struct task_struct *curr)
1942 {
1943         struct callback_head *work = &curr->numa_work;
1944         u64 period, now;
1945 
1946         /*
1947          * We don't care about NUMA placement if we don't have memory.
1948          */
1949         if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1950                 return;
1951 
1952         /*
1953          * Using runtime rather than walltime has the dual advantage that
1954          * we (mostly) drive the selection from busy threads and that the
1955          * task needs to have done some actual work before we bother with
1956          * NUMA placement.
1957          */
1958         now = curr->se.sum_exec_runtime;
1959         period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1960 
1961         if (now - curr->node_stamp > period) {
1962                 if (!curr->node_stamp)
1963                         curr->numa_scan_period = task_scan_min(curr);
1964                 curr->node_stamp += period;
1965 
1966                 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1967                         init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1968                         task_work_add(curr, work, true);
1969                 }
1970         }
1971 }
1972 #else
1973 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1974 {
1975 }
1976 
1977 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1978 {
1979 }
1980 
1981 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1982 {
1983 }
1984 #endif /* CONFIG_NUMA_BALANCING */
1985 
1986 static void
1987 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1988 {
1989         update_load_add(&cfs_rq->load, se->load.weight);
1990         if (!parent_entity(se))
1991                 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
1992 #ifdef CONFIG_SMP
1993         if (entity_is_task(se)) {
1994                 struct rq *rq = rq_of(cfs_rq);
1995 
1996                 account_numa_enqueue(rq, task_of(se));
1997                 list_add(&se->group_node, &rq->cfs_tasks);
1998         }
1999 #endif
2000         cfs_rq->nr_running++;
2001 }
2002 
2003 static void
2004 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2005 {
2006         update_load_sub(&cfs_rq->load, se->load.weight);
2007         if (!parent_entity(se))
2008                 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2009         if (entity_is_task(se)) {
2010                 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2011                 list_del_init(&se->group_node);
2012         }
2013         cfs_rq->nr_running--;
2014 }
2015 
2016 #ifdef CONFIG_FAIR_GROUP_SCHED
2017 # ifdef CONFIG_SMP
2018 static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2019 {
2020         long tg_weight;
2021 
2022         /*
2023          * Use this CPU's actual weight instead of the last load_contribution
2024          * to gain a more accurate current total weight. See
2025          * update_cfs_rq_load_contribution().
2026          */
2027         tg_weight = atomic_long_read(&tg->load_avg);
2028         tg_weight -= cfs_rq->tg_load_contrib;
2029         tg_weight += cfs_rq->load.weight;
2030 
2031         return tg_weight;
2032 }
2033 
2034 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2035 {
2036         long tg_weight, load, shares;
2037 
2038         tg_weight = calc_tg_weight(tg, cfs_rq);
2039         load = cfs_rq->load.weight;
2040 
2041         shares = (tg->shares * load);
2042         if (tg_weight)
2043                 shares /= tg_weight;
2044 
2045         if (shares < MIN_SHARES)
2046                 shares = MIN_SHARES;
2047         if (shares > tg->shares)
2048                 shares = tg->shares;
2049 
2050         return shares;
2051 }
2052 # else /* CONFIG_SMP */
2053 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2054 {
2055         return tg->shares;
2056 }
2057 # endif /* CONFIG_SMP */
2058 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2059                             unsigned long weight)
2060 {
2061         if (se->on_rq) {
2062                 /* commit outstanding execution time */
2063                 if (cfs_rq->curr == se)
2064                         update_curr(cfs_rq);
2065                 account_entity_dequeue(cfs_rq, se);
2066         }
2067 
2068         update_load_set(&se->load, weight);
2069 
2070         if (se->on_rq)
2071                 account_entity_enqueue(cfs_rq, se);
2072 }
2073 
2074 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2075 
2076 static void update_cfs_shares(struct cfs_rq *cfs_rq)
2077 {
2078         struct task_group *tg;
2079         struct sched_entity *se;
2080         long shares;
2081 
2082         tg = cfs_rq->tg;
2083         se = tg->se[cpu_of(rq_of(cfs_rq))];
2084         if (!se || throttled_hierarchy(cfs_rq))
2085                 return;
2086 #ifndef CONFIG_SMP
2087         if (likely(se->load.weight == tg->shares))
2088                 return;
2089 #endif
2090         shares = calc_cfs_shares(cfs_rq, tg);
2091 
2092         reweight_entity(cfs_rq_of(se), se, shares);
2093 }
2094 #else /* CONFIG_FAIR_GROUP_SCHED */
2095 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2096 {
2097 }
2098 #endif /* CONFIG_FAIR_GROUP_SCHED */
2099 
2100 #ifdef CONFIG_SMP
2101 /*
2102  * We choose a half-life close to 1 scheduling period.
2103  * Note: The tables below are dependent on this value.
2104  */
2105 #define LOAD_AVG_PERIOD 32
2106 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
2107 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
2108 
2109 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2110 static const u32 runnable_avg_yN_inv[] = {
2111         0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2112         0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2113         0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2114         0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2115         0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2116         0x85aac367, 0x82cd8698,
2117 };
2118 
2119 /*
2120  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
2121  * over-estimates when re-combining.
2122  */
2123 static const u32 runnable_avg_yN_sum[] = {
2124             0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2125          9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2126         17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2127 };
2128 
2129 /*
2130  * Approximate:
2131  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
2132  */
2133 static __always_inline u64 decay_load(u64 val, u64 n)
2134 {
2135         unsigned int local_n;
2136 
2137         if (!n)
2138                 return val;
2139         else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2140                 return 0;
2141 
2142         /* after bounds checking we can collapse to 32-bit */
2143         local_n = n;
2144 
2145         /*
2146          * As y^PERIOD = 1/2, we can combine
2147          *    y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
2148          * With a look-up table which covers k^n (n<PERIOD)
2149          *
2150          * To achieve constant time decay_load.
2151          */
2152         if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2153                 val >>= local_n / LOAD_AVG_PERIOD;
2154                 local_n %= LOAD_AVG_PERIOD;
2155         }
2156 
2157         val *= runnable_avg_yN_inv[local_n];
2158         /* We don't use SRR here since we always want to round down. */
2159         return val >> 32;
2160 }
2161 
2162 /*
2163  * For updates fully spanning n periods, the contribution to runnable
2164  * average will be: \Sum 1024*y^n
2165  *
2166  * We can compute this reasonably efficiently by combining:
2167  *   y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for  n <PERIOD}
2168  */
2169 static u32 __compute_runnable_contrib(u64 n)
2170 {
2171         u32 contrib = 0;
2172 
2173         if (likely(n <= LOAD_AVG_PERIOD))
2174                 return runnable_avg_yN_sum[n];
2175         else if (unlikely(n >= LOAD_AVG_MAX_N))
2176                 return LOAD_AVG_MAX;
2177 
2178         /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2179         do {
2180                 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2181                 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2182 
2183                 n -= LOAD_AVG_PERIOD;
2184         } while (n > LOAD_AVG_PERIOD);
2185 
2186         contrib = decay_load(contrib, n);
2187         return contrib + runnable_avg_yN_sum[n];
2188 }
2189 
2190 /*
2191  * We can represent the historical contribution to runnable average as the
2192  * coefficients of a geometric series.  To do this we sub-divide our runnable
2193  * history into segments of approximately 1ms (1024us); label the segment that
2194  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2195  *
2196  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2197  *      p0            p1           p2
2198  *     (now)       (~1ms ago)  (~2ms ago)
2199  *
2200  * Let u_i denote the fraction of p_i that the entity was runnable.
2201  *
2202  * We then designate the fractions u_i as our co-efficients, yielding the
2203  * following representation of historical load:
2204  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2205  *
2206  * We choose y based on the with of a reasonably scheduling period, fixing:
2207  *   y^32 = 0.5
2208  *
2209  * This means that the contribution to load ~32ms ago (u_32) will be weighted
2210  * approximately half as much as the contribution to load within the last ms
2211  * (u_0).
2212  *
2213  * When a period "rolls over" and we have new u_0`, multiplying the previous
2214  * sum again by y is sufficient to update:
2215  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2216  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2217  */
2218 static __always_inline int __update_entity_runnable_avg(u64 now,
2219                                                         struct sched_avg *sa,
2220                                                         int runnable)
2221 {
2222         u64 delta, periods;
2223         u32 runnable_contrib;
2224         int delta_w, decayed = 0;
2225 
2226         delta = now - sa->last_runnable_update;
2227         /*
2228          * This should only happen when time goes backwards, which it
2229          * unfortunately does during sched clock init when we swap over to TSC.
2230          */
2231         if ((s64)delta < 0) {
2232                 sa->last_runnable_update = now;
2233                 return 0;
2234         }
2235 
2236         /*
2237          * Use 1024ns as the unit of measurement since it's a reasonable
2238          * approximation of 1us and fast to compute.
2239          */
2240         delta >>= 10;
2241         if (!delta)
2242                 return 0;
2243         sa->last_runnable_update = now;
2244 
2245         /* delta_w is the amount already accumulated against our next period */
2246         delta_w = sa->runnable_avg_period % 1024;
2247         if (delta + delta_w >= 1024) {
2248                 /* period roll-over */
2249                 decayed = 1;
2250 
2251                 /*
2252                  * Now that we know we're crossing a period boundary, figure
2253                  * out how much from delta we need to complete the current
2254                  * period and accrue it.
2255                  */
2256                 delta_w = 1024 - delta_w;
2257                 if (runnable)
2258                         sa->runnable_avg_sum += delta_w;
2259                 sa->runnable_avg_period += delta_w;
2260 
2261                 delta -= delta_w;
2262 
2263                 /* Figure out how many additional periods this update spans */
2264                 periods = delta / 1024;
2265                 delta %= 1024;
2266 
2267                 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2268                                                   periods + 1);
2269                 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
2270                                                      periods + 1);
2271 
2272                 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2273                 runnable_contrib = __compute_runnable_contrib(periods);
2274                 if (runnable)
2275                         sa->runnable_avg_sum += runnable_contrib;
2276                 sa->runnable_avg_period += runnable_contrib;
2277         }
2278 
2279         /* Remainder of delta accrued against u_0` */
2280         if (runnable)
2281                 sa->runnable_avg_sum += delta;
2282         sa->runnable_avg_period += delta;
2283 
2284         return decayed;
2285 }
2286 
2287 /* Synchronize an entity's decay with its parenting cfs_rq.*/
2288 static inline u64 __synchronize_entity_decay(struct sched_entity *se)
2289 {
2290         struct cfs_rq *cfs_rq = cfs_rq_of(se);
2291         u64 decays = atomic64_read(&cfs_rq->decay_counter);
2292 
2293         decays -= se->avg.decay_count;
2294         if (!decays)
2295                 return 0;
2296 
2297         se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2298         se->avg.decay_count = 0;
2299 
2300         return decays;
2301 }
2302 
2303 #ifdef CONFIG_FAIR_GROUP_SCHED
2304 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2305                                                  int force_update)
2306 {
2307         struct task_group *tg = cfs_rq->tg;
2308         long tg_contrib;
2309 
2310         tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2311         tg_contrib -= cfs_rq->tg_load_contrib;
2312 
2313         if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2314                 atomic_long_add(tg_contrib, &tg->load_avg);
2315                 cfs_rq->tg_load_contrib += tg_contrib;
2316         }
2317 }
2318 
2319 /*
2320  * Aggregate cfs_rq runnable averages into an equivalent task_group
2321  * representation for computing load contributions.
2322  */
2323 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2324                                                   struct cfs_rq *cfs_rq)
2325 {
2326         struct task_group *tg = cfs_rq->tg;
2327         long contrib;
2328 
2329         /* The fraction of a cpu used by this cfs_rq */
2330         contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
2331                           sa->runnable_avg_period + 1);
2332         contrib -= cfs_rq->tg_runnable_contrib;
2333 
2334         if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2335                 atomic_add(contrib, &tg->runnable_avg);
2336                 cfs_rq->tg_runnable_contrib += contrib;
2337         }
2338 }
2339 
2340 static inline void __update_group_entity_contrib(struct sched_entity *se)
2341 {
2342         struct cfs_rq *cfs_rq = group_cfs_rq(se);
2343         struct task_group *tg = cfs_rq->tg;
2344         int runnable_avg;
2345 
2346         u64 contrib;
2347 
2348         contrib = cfs_rq->tg_load_contrib * tg->shares;
2349         se->avg.load_avg_contrib = div_u64(contrib,
2350                                      atomic_long_read(&tg->load_avg) + 1);
2351 
2352         /*
2353          * For group entities we need to compute a correction term in the case
2354          * that they are consuming <1 cpu so that we would contribute the same
2355          * load as a task of equal weight.
2356          *
2357          * Explicitly co-ordinating this measurement would be expensive, but
2358          * fortunately the sum of each cpus contribution forms a usable
2359          * lower-bound on the true value.
2360          *
2361          * Consider the aggregate of 2 contributions.  Either they are disjoint
2362          * (and the sum represents true value) or they are disjoint and we are
2363          * understating by the aggregate of their overlap.
2364          *
2365          * Extending this to N cpus, for a given overlap, the maximum amount we
2366          * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2367          * cpus that overlap for this interval and w_i is the interval width.
2368          *
2369          * On a small machine; the first term is well-bounded which bounds the
2370          * total error since w_i is a subset of the period.  Whereas on a
2371          * larger machine, while this first term can be larger, if w_i is the
2372          * of consequential size guaranteed to see n_i*w_i quickly converge to
2373          * our upper bound of 1-cpu.
2374          */
2375         runnable_avg = atomic_read(&tg->runnable_avg);
2376         if (runnable_avg < NICE_0_LOAD) {
2377                 se->avg.load_avg_contrib *= runnable_avg;
2378                 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2379         }
2380 }
2381 
2382 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2383 {
2384         __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
2385         __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2386 }
2387 #else /* CONFIG_FAIR_GROUP_SCHED */
2388 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2389                                                  int force_update) {}
2390 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2391                                                   struct cfs_rq *cfs_rq) {}
2392 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
2393 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2394 #endif /* CONFIG_FAIR_GROUP_SCHED */
2395 
2396 static inline void __update_task_entity_contrib(struct sched_entity *se)
2397 {
2398         u32 contrib;
2399 
2400         /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2401         contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2402         contrib /= (se->avg.runnable_avg_period + 1);
2403         se->avg.load_avg_contrib = scale_load(contrib);
2404 }
2405 
2406 /* Compute the current contribution to load_avg by se, return any delta */
2407 static long __update_entity_load_avg_contrib(struct sched_entity *se)
2408 {
2409         long old_contrib = se->avg.load_avg_contrib;
2410 
2411         if (entity_is_task(se)) {
2412                 __update_task_entity_contrib(se);
2413         } else {
2414                 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
2415                 __update_group_entity_contrib(se);
2416         }
2417 
2418         return se->avg.load_avg_contrib - old_contrib;
2419 }
2420 
2421 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2422                                                  long load_contrib)
2423 {
2424         if (likely(load_contrib < cfs_rq->blocked_load_avg))
2425                 cfs_rq->blocked_load_avg -= load_contrib;
2426         else
2427                 cfs_rq->blocked_load_avg = 0;
2428 }
2429 
2430 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2431 
2432 /* Update a sched_entity's runnable average */
2433 static inline void update_entity_load_avg(struct sched_entity *se,
2434                                           int update_cfs_rq)
2435 {
2436         struct cfs_rq *cfs_rq = cfs_rq_of(se);
2437         long contrib_delta;
2438         u64 now;
2439 
2440         /*
2441          * For a group entity we need to use their owned cfs_rq_clock_task() in
2442          * case they are the parent of a throttled hierarchy.
2443          */
2444         if (entity_is_task(se))
2445                 now = cfs_rq_clock_task(cfs_rq);
2446         else
2447                 now = cfs_rq_clock_task(group_cfs_rq(se));
2448 
2449         if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
2450                 return;
2451 
2452         contrib_delta = __update_entity_load_avg_contrib(se);
2453 
2454         if (!update_cfs_rq)
2455                 return;
2456 
2457         if (se->on_rq)
2458                 cfs_rq->runnable_load_avg += contrib_delta;
2459         else
2460                 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2461 }
2462 
2463 /*
2464  * Decay the load contributed by all blocked children and account this so that
2465  * their contribution may appropriately discounted when they wake up.
2466  */
2467 static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
2468 {
2469         u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
2470         u64 decays;
2471 
2472         decays = now - cfs_rq->last_decay;
2473         if (!decays && !force_update)
2474                 return;
2475 
2476         if (atomic_long_read(&cfs_rq->removed_load)) {
2477                 unsigned long removed_load;
2478                 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
2479                 subtract_blocked_load_contrib(cfs_rq, removed_load);
2480         }
2481 
2482         if (decays) {
2483                 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2484                                                       decays);
2485                 atomic64_add(decays, &cfs_rq->decay_counter);
2486                 cfs_rq->last_decay = now;
2487         }
2488 
2489         __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
2490 }
2491 
2492 /* Add the load generated by se into cfs_rq's child load-average */
2493 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2494                                                   struct sched_entity *se,
2495                                                   int wakeup)
2496 {
2497         /*
2498          * We track migrations using entity decay_count <= 0, on a wake-up
2499          * migration we use a negative decay count to track the remote decays
2500          * accumulated while sleeping.
2501          *
2502          * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2503          * are seen by enqueue_entity_load_avg() as a migration with an already
2504          * constructed load_avg_contrib.
2505          */
2506         if (unlikely(se->avg.decay_count <= 0)) {
2507                 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
2508                 if (se->avg.decay_count) {
2509                         /*
2510                          * In a wake-up migration we have to approximate the
2511                          * time sleeping.  This is because we can't synchronize
2512                          * clock_task between the two cpus, and it is not
2513                          * guaranteed to be read-safe.  Instead, we can
2514                          * approximate this using our carried decays, which are
2515                          * explicitly atomically readable.
2516                          */
2517                         se->avg.last_runnable_update -= (-se->avg.decay_count)
2518                                                         << 20;
2519                         update_entity_load_avg(se, 0);
2520                         /* Indicate that we're now synchronized and on-rq */
2521                         se->avg.decay_count = 0;
2522                 }
2523                 wakeup = 0;
2524         } else {
2525                 __synchronize_entity_decay(se);
2526         }
2527 
2528         /* migrated tasks did not contribute to our blocked load */
2529         if (wakeup) {
2530                 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
2531                 update_entity_load_avg(se, 0);
2532         }
2533 
2534         cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
2535         /* we force update consideration on load-balancer moves */
2536         update_cfs_rq_blocked_load(cfs_rq, !wakeup);
2537 }
2538 
2539 /*
2540  * Remove se's load from this cfs_rq child load-average, if the entity is
2541  * transitioning to a blocked state we track its projected decay using
2542  * blocked_load_avg.
2543  */
2544 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2545                                                   struct sched_entity *se,
2546                                                   int sleep)
2547 {
2548         update_entity_load_avg(se, 1);
2549         /* we force update consideration on load-balancer moves */
2550         update_cfs_rq_blocked_load(cfs_rq, !sleep);
2551 
2552         cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
2553         if (sleep) {
2554                 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2555                 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2556         } /* migrations, e.g. sleep=0 leave decay_count == 0 */
2557 }
2558 
2559 /*
2560  * Update the rq's load with the elapsed running time before entering
2561  * idle. if the last scheduled task is not a CFS task, idle_enter will
2562  * be the only way to update the runnable statistic.
2563  */
2564 void idle_enter_fair(struct rq *this_rq)
2565 {
2566         update_rq_runnable_avg(this_rq, 1);
2567 }
2568 
2569 /*
2570  * Update the rq's load with the elapsed idle time before a task is
2571  * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2572  * be the only way to update the runnable statistic.
2573  */
2574 void idle_exit_fair(struct rq *this_rq)
2575 {
2576         update_rq_runnable_avg(this_rq, 0);
2577 }
2578 
2579 static int idle_balance(struct rq *this_rq);
2580 
2581 #else /* CONFIG_SMP */
2582 
2583 static inline void update_entity_load_avg(struct sched_entity *se,
2584                                           int update_cfs_rq) {}
2585 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
2586 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
2587                                            struct sched_entity *se,
2588                                            int wakeup) {}
2589 static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
2590                                            struct sched_entity *se,
2591                                            int sleep) {}
2592 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2593                                               int force_update) {}
2594 
2595 static inline int idle_balance(struct rq *rq)
2596 {
2597         return 0;
2598 }
2599 
2600 #endif /* CONFIG_SMP */
2601 
2602 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
2603 {
2604 #ifdef CONFIG_SCHEDSTATS
2605         struct task_struct *tsk = NULL;
2606 
2607         if (entity_is_task(se))
2608                 tsk = task_of(se);
2609 
2610         if (se->statistics.sleep_start) {
2611                 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
2612 
2613                 if ((s64)delta < 0)
2614                         delta = 0;
2615 
2616                 if (unlikely(delta > se->statistics.sleep_max))
2617                         se->statistics.sleep_max = delta;
2618 
2619                 se->statistics.sleep_start = 0;
2620                 se->statistics.sum_sleep_runtime += delta;
2621 
2622                 if (tsk) {
2623                         account_scheduler_latency(tsk, delta >> 10, 1);
2624                         trace_sched_stat_sleep(tsk, delta);
2625                 }
2626         }
2627         if (se->statistics.block_start) {
2628                 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
2629 
2630                 if ((s64)delta < 0)
2631                         delta = 0;
2632 
2633                 if (unlikely(delta > se->statistics.block_max))
2634                         se->statistics.block_max = delta;
2635 
2636                 se->statistics.block_start = 0;
2637                 se->statistics.sum_sleep_runtime += delta;
2638 
2639                 if (tsk) {
2640                         if (tsk->in_iowait) {
2641                                 se->statistics.iowait_sum += delta;
2642                                 se->statistics.iowait_count++;
2643                                 trace_sched_stat_iowait(tsk, delta);
2644                         }
2645 
2646                         trace_sched_stat_blocked(tsk, delta);
2647 
2648                         /*
2649                          * Blocking time is in units of nanosecs, so shift by
2650                          * 20 to get a milliseconds-range estimation of the
2651                          * amount of time that the task spent sleeping:
2652                          */
2653                         if (unlikely(prof_on == SLEEP_PROFILING)) {
2654                                 profile_hits(SLEEP_PROFILING,
2655                                                 (void *)get_wchan(tsk),
2656                                                 delta >> 20);
2657                         }
2658                         account_scheduler_latency(tsk, delta >> 10, 0);
2659                 }
2660         }
2661 #endif
2662 }
2663 
2664 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2665 {
2666 #ifdef CONFIG_SCHED_DEBUG
2667         s64 d = se->vruntime - cfs_rq->min_vruntime;
2668 
2669         if (d < 0)
2670                 d = -d;
2671 
2672         if (d > 3*sysctl_sched_latency)
2673                 schedstat_inc(cfs_rq, nr_spread_over);
2674 #endif
2675 }
2676 
2677 static void
2678 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2679 {
2680         u64 vruntime = cfs_rq->min_vruntime;
2681 
2682         /*
2683          * The 'current' period is already promised to the current tasks,
2684          * however the extra weight of the new task will slow them down a
2685          * little, place the new task so that it fits in the slot that
2686          * stays open at the end.
2687          */
2688         if (initial && sched_feat(START_DEBIT))
2689                 vruntime += sched_vslice(cfs_rq, se);
2690 
2691         /* sleeps up to a single latency don't count. */
2692         if (!initial) {
2693                 unsigned long thresh = sysctl_sched_latency;
2694 
2695                 /*
2696                  * Halve their sleep time's effect, to allow
2697                  * for a gentler effect of sleepers:
2698                  */
2699                 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2700                         thresh >>= 1;
2701 
2702                 vruntime -= thresh;
2703         }
2704 
2705         /* ensure we never gain time by being placed backwards. */
2706         se->vruntime = max_vruntime(se->vruntime, vruntime);
2707 }
2708 
2709 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2710 
2711 static void
2712 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2713 {
2714         /*
2715          * Update the normalized vruntime before updating min_vruntime
2716          * through calling update_curr().
2717          */
2718         if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
2719                 se->vruntime += cfs_rq->min_vruntime;
2720 
2721         /*
2722          * Update run-time statistics of the 'current'.
2723          */
2724         update_curr(cfs_rq);
2725         enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
2726         account_entity_enqueue(cfs_rq, se);
2727         update_cfs_shares(cfs_rq);
2728 
2729         if (flags & ENQUEUE_WAKEUP) {
2730                 place_entity(cfs_rq, se, 0);
2731                 enqueue_sleeper(cfs_rq, se);
2732         }
2733 
2734         update_stats_enqueue(cfs_rq, se);
2735         check_spread(cfs_rq, se);
2736         if (se != cfs_rq->curr)
2737                 __enqueue_entity(cfs_rq, se);
2738         se->on_rq = 1;
2739 
2740         if (cfs_rq->nr_running == 1) {
2741                 list_add_leaf_cfs_rq(cfs_rq);
2742                 check_enqueue_throttle(cfs_rq);
2743         }
2744 }
2745 
2746 static void __clear_buddies_last(struct sched_entity *se)
2747 {
2748         for_each_sched_entity(se) {
2749                 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2750                 if (cfs_rq->last != se)
2751                         break;
2752 
2753                 cfs_rq->last = NULL;
2754         }
2755 }
2756 
2757 static void __clear_buddies_next(struct sched_entity *se)
2758 {
2759         for_each_sched_entity(se) {
2760                 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2761                 if (cfs_rq->next != se)
2762                         break;
2763 
2764                 cfs_rq->next = NULL;
2765         }
2766 }
2767 
2768 static void __clear_buddies_skip(struct sched_entity *se)
2769 {
2770         for_each_sched_entity(se) {
2771                 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2772                 if (cfs_rq->skip != se)
2773                         break;
2774 
2775                 cfs_rq->skip = NULL;
2776         }
2777 }
2778 
2779 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2780 {
2781         if (cfs_rq->last == se)
2782                 __clear_buddies_last(se);
2783 
2784         if (cfs_rq->next == se)
2785                 __clear_buddies_next(se);
2786 
2787         if (cfs_rq->skip == se)
2788                 __clear_buddies_skip(se);
2789 }
2790 
2791 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2792 
2793 static void
2794 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2795 {
2796         /*
2797          * Update run-time statistics of the 'current'.
2798          */
2799         update_curr(cfs_rq);
2800         dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
2801 
2802         update_stats_dequeue(cfs_rq, se);
2803         if (flags & DEQUEUE_SLEEP) {
2804 #ifdef CONFIG_SCHEDSTATS
2805                 if (entity_is_task(se)) {
2806                         struct task_struct *tsk = task_of(se);
2807 
2808                         if (tsk->state & TASK_INTERRUPTIBLE)
2809                                 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
2810                         if (tsk->state & TASK_UNINTERRUPTIBLE)
2811                                 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
2812                 }
2813 #endif
2814         }
2815 
2816         clear_buddies(cfs_rq, se);
2817 
2818         if (se != cfs_rq->curr)
2819                 __dequeue_entity(cfs_rq, se);
2820         se->on_rq = 0;
2821         account_entity_dequeue(cfs_rq, se);
2822 
2823         /*
2824          * Normalize the entity after updating the min_vruntime because the
2825          * update can refer to the ->curr item and we need to reflect this
2826          * movement in our normalized position.
2827          */
2828         if (!(flags & DEQUEUE_SLEEP))
2829                 se->vruntime -= cfs_rq->min_vruntime;
2830 
2831         /* return excess runtime on last dequeue */
2832         return_cfs_rq_runtime(cfs_rq);
2833 
2834         update_min_vruntime(cfs_rq);
2835         update_cfs_shares(cfs_rq);
2836 }
2837 
2838 /*
2839  * Preempt the current task with a newly woken task if needed:
2840  */
2841 static void
2842 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
2843 {
2844         unsigned long ideal_runtime, delta_exec;
2845         struct sched_entity *se;
2846         s64 delta;
2847 
2848         ideal_runtime = sched_slice(cfs_rq, curr);
2849         delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
2850         if (delta_exec > ideal_runtime) {
2851                 resched_task(rq_of(cfs_rq)->curr);
2852                 /*
2853                  * The current task ran long enough, ensure it doesn't get
2854                  * re-elected due to buddy favours.
2855                  */
2856                 clear_buddies(cfs_rq, curr);
2857                 return;
2858         }
2859 
2860         /*
2861          * Ensure that a task that missed wakeup preemption by a
2862          * narrow margin doesn't have to wait for a full slice.
2863          * This also mitigates buddy induced latencies under load.
2864          */
2865         if (delta_exec < sysctl_sched_min_granularity)
2866                 return;
2867 
2868         se = __pick_first_entity(cfs_rq);
2869         delta = curr->vruntime - se->vruntime;
2870 
2871         if (delta < 0)
2872                 return;
2873 
2874         if (delta > ideal_runtime)
2875                 resched_task(rq_of(cfs_rq)->curr);
2876 }
2877 
2878 static void
2879 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
2880 {
2881         /* 'current' is not kept within the tree. */
2882         if (se->on_rq) {
2883                 /*
2884                  * Any task has to be enqueued before it get to execute on
2885                  * a CPU. So account for the time it spent waiting on the
2886                  * runqueue.
2887                  */
2888                 update_stats_wait_end(cfs_rq, se);
2889                 __dequeue_entity(cfs_rq, se);
2890         }
2891 
2892         update_stats_curr_start(cfs_rq, se);
2893         cfs_rq->curr = se;
2894 #ifdef CONFIG_SCHEDSTATS
2895         /*
2896          * Track our maximum slice length, if the CPU's load is at
2897          * least twice that of our own weight (i.e. dont track it
2898          * when there are only lesser-weight tasks around):
2899          */
2900         if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
2901                 se->statistics.slice_max = max(se->statistics.slice_max,
2902                         se->sum_exec_runtime - se->prev_sum_exec_runtime);
2903         }
2904 #endif
2905         se->prev_sum_exec_runtime = se->sum_exec_runtime;
2906 }
2907 
2908 static int
2909 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2910 
2911 /*
2912  * Pick the next process, keeping these things in mind, in this order:
2913  * 1) keep things fair between processes/task groups
2914  * 2) pick the "next" process, since someone really wants that to run
2915  * 3) pick the "last" process, for cache locality
2916  * 4) do not run the "skip" process, if something else is available
2917  */
2918 static struct sched_entity *
2919 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
2920 {
2921         struct sched_entity *left = __pick_first_entity(cfs_rq);
2922         struct sched_entity *se;
2923 
2924         /*
2925          * If curr is set we have to see if its left of the leftmost entity
2926          * still in the tree, provided there was anything in the tree at all.
2927          */
2928         if (!left || (curr && entity_before(curr, left)))
2929                 left = curr;
2930 
2931         se = left; /* ideally we run the leftmost entity */
2932 
2933         /*
2934          * Avoid running the skip buddy, if running something else can
2935          * be done without getting too unfair.
2936          */
2937         if (cfs_rq->skip == se) {
2938                 struct sched_entity *second;
2939 
2940                 if (se == curr) {
2941                         second = __pick_first_entity(cfs_rq);
2942                 } else {
2943                         second = __pick_next_entity(se);
2944                         if (!second || (curr && entity_before(curr, second)))
2945                                 second = curr;
2946                 }
2947 
2948                 if (second && wakeup_preempt_entity(second, left) < 1)
2949                         se = second;
2950         }
2951 
2952         /*
2953          * Prefer last buddy, try to return the CPU to a preempted task.
2954          */
2955         if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2956                 se = cfs_rq->last;
2957 
2958         /*
2959          * Someone really wants this to run. If it's not unfair, run it.
2960          */
2961         if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2962                 se = cfs_rq->next;
2963 
2964         clear_buddies(cfs_rq, se);
2965 
2966         return se;
2967 }
2968 
2969 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2970 
2971 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
2972 {
2973         /*
2974          * If still on the runqueue then deactivate_task()
2975          * was not called and update_curr() has to be done:
2976          */
2977         if (prev->on_rq)
2978                 update_curr(cfs_rq);
2979 
2980         /* throttle cfs_rqs exceeding runtime */
2981         check_cfs_rq_runtime(cfs_rq);
2982 
2983         check_spread(cfs_rq, prev);
2984         if (prev->on_rq) {
2985                 update_stats_wait_start(cfs_rq, prev);
2986                 /* Put 'current' back into the tree. */
2987                 __enqueue_entity(cfs_rq, prev);
2988                 /* in !on_rq case, update occurred at dequeue */
2989                 update_entity_load_avg(prev, 1);
2990         }
2991         cfs_rq->curr = NULL;
2992 }
2993 
2994 static void
2995 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
2996 {
2997         /*
2998          * Update run-time statistics of the 'current'.
2999          */
3000         update_curr(cfs_rq);
3001 
3002         /*
3003          * Ensure that runnable average is periodically updated.
3004          */
3005         update_entity_load_avg(curr, 1);
3006         update_cfs_rq_blocked_load(cfs_rq, 1);
3007         update_cfs_shares(cfs_rq);
3008 
3009 #ifdef CONFIG_SCHED_HRTICK
3010         /*
3011          * queued ticks are scheduled to match the slice, so don't bother
3012          * validating it and just reschedule.
3013          */
3014         if (queued) {
3015                 resched_task(rq_of(cfs_rq)->curr);
3016                 return;
3017         }
3018         /*
3019          * don't let the period tick interfere with the hrtick preemption
3020          */
3021         if (!sched_feat(DOUBLE_TICK) &&
3022                         hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3023                 return;
3024 #endif
3025 
3026         if (cfs_rq->nr_running > 1)
3027                 check_preempt_tick(cfs_rq, curr);
3028 }
3029 
3030 
3031 /**************************************************
3032  * CFS bandwidth control machinery
3033  */
3034 
3035 #ifdef CONFIG_CFS_BANDWIDTH
3036 
3037 #ifdef HAVE_JUMP_LABEL
3038 static struct static_key __cfs_bandwidth_used;
3039 
3040 static inline bool cfs_bandwidth_used(void)
3041 {
3042         return static_key_false(&__cfs_bandwidth_used);
3043 }
3044 
3045 void cfs_bandwidth_usage_inc(void)
3046 {
3047         static_key_slow_inc(&__cfs_bandwidth_used);
3048 }
3049 
3050 void cfs_bandwidth_usage_dec(void)
3051 {
3052         static_key_slow_dec(&__cfs_bandwidth_used);
3053 }
3054 #else /* HAVE_JUMP_LABEL */
3055 static bool cfs_bandwidth_used(void)
3056 {
3057         return true;
3058 }
3059 
3060 void cfs_bandwidth_usage_inc(void) {}
3061 void cfs_bandwidth_usage_dec(void) {}
3062 #endif /* HAVE_JUMP_LABEL */
3063 
3064 /*
3065  * default period for cfs group bandwidth.
3066  * default: 0.1s, units: nanoseconds
3067  */
3068 static inline u64 default_cfs_period(void)
3069 {
3070         return 100000000ULL;
3071 }
3072 
3073 static inline u64 sched_cfs_bandwidth_slice(void)
3074 {
3075         return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3076 }
3077 
3078 /*
3079  * Replenish runtime according to assigned quota and update expiration time.
3080  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3081  * additional synchronization around rq->lock.
3082  *
3083  * requires cfs_b->lock
3084  */
3085 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3086 {
3087         u64 now;
3088 
3089         if (cfs_b->quota == RUNTIME_INF)
3090                 return;
3091 
3092         now = sched_clock_cpu(smp_processor_id());
3093         cfs_b->runtime = cfs_b->quota;
3094         cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3095 }
3096 
3097 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3098 {
3099         return &tg->cfs_bandwidth;
3100 }
3101 
3102 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3103 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3104 {
3105         if (unlikely(cfs_rq->throttle_count))
3106                 return cfs_rq->throttled_clock_task;
3107 
3108         return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3109 }
3110 
3111 /* returns 0 on failure to allocate runtime */
3112 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3113 {
3114         struct task_group *tg = cfs_rq->tg;
3115         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3116         u64 amount = 0, min_amount, expires;
3117 
3118         /* note: this is a positive sum as runtime_remaining <= 0 */
3119         min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3120 
3121         raw_spin_lock(&cfs_b->lock);
3122         if (cfs_b->quota == RUNTIME_INF)
3123                 amount = min_amount;
3124         else {
3125                 /*
3126                  * If the bandwidth pool has become inactive, then at least one
3127                  * period must have elapsed since the last consumption.
3128                  * Refresh the global state and ensure bandwidth timer becomes
3129                  * active.
3130                  */
3131                 if (!cfs_b->timer_active) {
3132                         __refill_cfs_bandwidth_runtime(cfs_b);
3133                         __start_cfs_bandwidth(cfs_b, false);
3134                 }
3135 
3136                 if (cfs_b->runtime > 0) {
3137                         amount = min(cfs_b->runtime, min_amount);
3138                         cfs_b->runtime -= amount;
3139                         cfs_b->idle = 0;
3140                 }
3141         }
3142         expires = cfs_b->runtime_expires;
3143         raw_spin_unlock(&cfs_b->lock);
3144 
3145         cfs_rq->runtime_remaining += amount;
3146         /*
3147          * we may have advanced our local expiration to account for allowed
3148          * spread between our sched_clock and the one on which runtime was
3149          * issued.
3150          */
3151         if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3152                 cfs_rq->runtime_expires = expires;
3153 
3154         return cfs_rq->runtime_remaining > 0;
3155 }
3156 
3157 /*
3158  * Note: This depends on the synchronization provided by sched_clock and the
3159  * fact that rq->clock snapshots this value.
3160  */
3161 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3162 {
3163         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3164 
3165         /* if the deadline is ahead of our clock, nothing to do */
3166         if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3167                 return;
3168 
3169         if (cfs_rq->runtime_remaining < 0)
3170                 return;
3171 
3172         /*
3173          * If the local deadline has passed we have to consider the
3174          * possibility that our sched_clock is 'fast' and the global deadline
3175          * has not truly expired.
3176          *
3177          * Fortunately we can check determine whether this the case by checking
3178          * whether the global deadline has advanced.
3179          */
3180 
3181         if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
3182                 /* extend local deadline, drift is bounded above by 2 ticks */
3183                 cfs_rq->runtime_expires += TICK_NSEC;
3184         } else {
3185                 /* global deadline is ahead, expiration has passed */
3186                 cfs_rq->runtime_remaining = 0;
3187         }
3188 }
3189 
3190 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3191 {
3192         /* dock delta_exec before expiring quota (as it could span periods) */
3193         cfs_rq->runtime_remaining -= delta_exec;
3194         expire_cfs_rq_runtime(cfs_rq);
3195 
3196         if (likely(cfs_rq->runtime_remaining > 0))
3197                 return;
3198 
3199         /*
3200          * if we're unable to extend our runtime we resched so that the active
3201          * hierarchy can be throttled
3202          */
3203         if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3204                 resched_task(rq_of(cfs_rq)->curr);
3205 }
3206 
3207 static __always_inline
3208 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3209 {
3210         if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3211                 return;
3212 
3213         __account_cfs_rq_runtime(cfs_rq, delta_exec);
3214 }
3215 
3216 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3217 {
3218         return cfs_bandwidth_used() && cfs_rq->throttled;
3219 }
3220 
3221 /* check whether cfs_rq, or any parent, is throttled */
3222 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3223 {
3224         return cfs_bandwidth_used() && cfs_rq->throttle_count;
3225 }
3226 
3227 /*
3228  * Ensure that neither of the group entities corresponding to src_cpu or
3229  * dest_cpu are members of a throttled hierarchy when performing group
3230  * load-balance operations.
3231  */
3232 static inline int throttled_lb_pair(struct task_group *tg,
3233                                     int src_cpu, int dest_cpu)
3234 {
3235         struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3236 
3237         src_cfs_rq = tg->cfs_rq[src_cpu];
3238         dest_cfs_rq = tg->cfs_rq[dest_cpu];
3239 
3240         return throttled_hierarchy(src_cfs_rq) ||
3241                throttled_hierarchy(dest_cfs_rq);
3242 }
3243 
3244 /* updated child weight may affect parent so we have to do this bottom up */
3245 static int tg_unthrottle_up(struct task_group *tg, void *data)
3246 {
3247         struct rq *rq = data;
3248         struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3249 
3250         cfs_rq->throttle_count--;
3251 #ifdef CONFIG_SMP
3252         if (!cfs_rq->throttle_count) {
3253                 /* adjust cfs_rq_clock_task() */
3254                 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3255                                              cfs_rq->throttled_clock_task;
3256         }
3257 #endif
3258 
3259         return 0;
3260 }
3261 
3262 static int tg_throttle_down(struct task_group *tg, void *data)
3263 {
3264         struct rq *rq = data;
3265         struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3266 
3267         /* group is entering throttled state, stop time */
3268         if (!cfs_rq->throttle_count)
3269                 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3270         cfs_rq->throttle_count++;
3271 
3272         return 0;
3273 }
3274 
3275 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3276 {
3277         struct rq *rq = rq_of(cfs_rq);
3278         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3279         struct sched_entity *se;
3280         long task_delta, dequeue = 1;
3281 
3282         se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3283 
3284         /* freeze hierarchy runnable averages while throttled */
3285         rcu_read_lock();
3286         walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3287         rcu_read_unlock();
3288 
3289         task_delta = cfs_rq->h_nr_running;
3290         for_each_sched_entity(se) {
3291                 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3292                 /* throttled entity or throttle-on-deactivate */
3293                 if (!se->on_rq)
3294                         break;
3295 
3296                 if (dequeue)
3297                         dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3298                 qcfs_rq->h_nr_running -= task_delta;
3299 
3300                 if (qcfs_rq->load.weight)
3301                         dequeue = 0;
3302         }
3303 
3304         if (!se)
3305                 rq->nr_running -= task_delta;
3306 
3307         cfs_rq->throttled = 1;
3308         cfs_rq->throttled_clock = rq_clock(rq);
3309         raw_spin_lock(&cfs_b->lock);
3310         list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3311         if (!cfs_b->timer_active)
3312                 __start_cfs_bandwidth(cfs_b, false);
3313         raw_spin_unlock(&cfs_b->lock);
3314 }
3315 
3316 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3317 {
3318         struct rq *rq = rq_of(cfs_rq);
3319         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3320         struct sched_entity *se;
3321         int enqueue = 1;
3322         long task_delta;
3323 
3324         se = cfs_rq->tg->se[cpu_of(rq)];
3325 
3326         cfs_rq->throttled = 0;
3327 
3328         update_rq_clock(rq);
3329 
3330         raw_spin_lock(&cfs_b->lock);
3331         cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
3332         list_del_rcu(&cfs_rq->throttled_list);
3333         raw_spin_unlock(&cfs_b->lock);
3334 
3335         /* update hierarchical throttle state */
3336         walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3337 
3338         if (!cfs_rq->load.weight)
3339                 return;
3340 
3341         task_delta = cfs_rq->h_nr_running;
3342         for_each_sched_entity(se) {
3343                 if (se->on_rq)
3344                         enqueue = 0;
3345 
3346                 cfs_rq = cfs_rq_of(se);
3347                 if (enqueue)
3348                         enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3349                 cfs_rq->h_nr_running += task_delta;
3350 
3351                 if (cfs_rq_throttled(cfs_rq))
3352                         break;
3353         }
3354 
3355         if (!se)
3356                 rq->nr_running += task_delta;
3357 
3358         /* determine whether we need to wake up potentially idle cpu */
3359         if (rq->curr == rq->idle && rq->cfs.nr_running)
3360                 resched_task(rq->curr);
3361 }
3362 
3363 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3364                 u64 remaining, u64 expires)
3365 {
3366         struct cfs_rq *cfs_rq;
3367         u64 runtime = remaining;
3368 
3369         rcu_read_lock();
3370         list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3371                                 throttled_list) {
3372                 struct rq *rq = rq_of(cfs_rq);
3373 
3374                 raw_spin_lock(&rq->lock);
3375                 if (!cfs_rq_throttled(cfs_rq))
3376                         goto next;
3377 
3378                 runtime = -cfs_rq->runtime_remaining + 1;
3379                 if (runtime > remaining)
3380                         runtime = remaining;
3381                 remaining -= runtime;
3382 
3383                 cfs_rq->runtime_remaining += runtime;
3384                 cfs_rq->runtime_expires = expires;
3385 
3386                 /* we check whether we're throttled above */
3387                 if (cfs_rq->runtime_remaining > 0)
3388                         unthrottle_cfs_rq(cfs_rq);
3389 
3390 next:
3391                 raw_spin_unlock(&rq->lock);
3392 
3393                 if (!remaining)
3394                         break;
3395         }
3396         rcu_read_unlock();
3397 
3398         return remaining;
3399 }
3400 
3401 /*
3402  * Responsible for refilling a task_group's bandwidth and unthrottling its
3403  * cfs_rqs as appropriate. If there has been no activity within the last
3404  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3405  * used to track this state.
3406  */
3407 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3408 {
3409         u64 runtime, runtime_expires;
3410         int idle = 1, throttled;
3411 
3412         raw_spin_lock(&cfs_b->lock);
3413         /* no need to continue the timer with no bandwidth constraint */
3414         if (cfs_b->quota == RUNTIME_INF)
3415                 goto out_unlock;
3416 
3417         throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3418         /* idle depends on !throttled (for the case of a large deficit) */
3419         idle = cfs_b->idle && !throttled;
3420         cfs_b->nr_periods += overrun;
3421 
3422         /* if we're going inactive then everything else can be deferred */
3423         if (idle)
3424                 goto out_unlock;
3425 
3426         /*
3427          * if we have relooped after returning idle once, we need to update our
3428          * status as actually running, so that other cpus doing
3429          * __start_cfs_bandwidth will stop trying to cancel us.
3430          */
3431         cfs_b->timer_active = 1;
3432 
3433         __refill_cfs_bandwidth_runtime(cfs_b);
3434 
3435         if (!throttled) {
3436                 /* mark as potentially idle for the upcoming period */
3437                 cfs_b->idle = 1;
3438                 goto out_unlock;
3439         }
3440 
3441         /* account preceding periods in which throttling occurred */
3442         cfs_b->nr_throttled += overrun;
3443 
3444         /*
3445          * There are throttled entities so we must first use the new bandwidth
3446          * to unthrottle them before making it generally available.  This
3447          * ensures that all existing debts will be paid before a new cfs_rq is
3448          * allowed to run.
3449          */
3450         runtime = cfs_b->runtime;
3451         runtime_expires = cfs_b->runtime_expires;
3452         cfs_b->runtime = 0;
3453 
3454         /*
3455          * This check is repeated as we are holding onto the new bandwidth
3456          * while we unthrottle.  This can potentially race with an unthrottled
3457          * group trying to acquire new bandwidth from the global pool.
3458          */
3459         while (throttled && runtime > 0) {
3460                 raw_spin_unlock(&cfs_b->lock);
3461                 /* we can't nest cfs_b->lock while distributing bandwidth */
3462                 runtime = distribute_cfs_runtime(cfs_b, runtime,
3463                                                  runtime_expires);
3464                 raw_spin_lock(&cfs_b->lock);
3465 
3466                 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3467         }
3468 
3469         /* return (any) remaining runtime */
3470         cfs_b->runtime = runtime;
3471         /*
3472          * While we are ensured activity in the period following an
3473          * unthrottle, this also covers the case in which the new bandwidth is
3474          * insufficient to cover the existing bandwidth deficit.  (Forcing the
3475          * timer to remain active while there are any throttled entities.)
3476          */
3477         cfs_b->idle = 0;
3478 out_unlock:
3479         if (idle)
3480                 cfs_b->timer_active = 0;
3481         raw_spin_unlock(&cfs_b->lock);
3482 
3483         return idle;
3484 }
3485 
3486 /* a cfs_rq won't donate quota below this amount */
3487 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3488 /* minimum remaining period time to redistribute slack quota */
3489 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3490 /* how long we wait to gather additional slack before distributing */
3491 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3492 
3493 /*
3494  * Are we near the end of the current quota period?
3495  *
3496  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3497  * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
3498  * migrate_hrtimers, base is never cleared, so we are fine.
3499  */
3500 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3501 {
3502         struct hrtimer *refresh_timer = &cfs_b->period_timer;
3503         u64 remaining;
3504 
3505         /* if the call-back is running a quota refresh is already occurring */
3506         if (hrtimer_callback_running(refresh_timer))
3507                 return 1;
3508 
3509         /* is a quota refresh about to occur? */
3510         remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3511         if (remaining < min_expire)
3512                 return 1;
3513 
3514         return 0;
3515 }
3516 
3517 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3518 {
3519         u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3520 
3521         /* if there's a quota refresh soon don't bother with slack */
3522         if (runtime_refresh_within(cfs_b, min_left))
3523                 return;
3524 
3525         start_bandwidth_timer(&cfs_b->slack_timer,
3526                                 ns_to_ktime(cfs_bandwidth_slack_period));
3527 }
3528 
3529 /* we know any runtime found here is valid as update_curr() precedes return */
3530 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3531 {
3532         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3533         s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3534 
3535         if (slack_runtime <= 0)
3536                 return;
3537 
3538         raw_spin_lock(&cfs_b->lock);
3539         if (cfs_b->quota != RUNTIME_INF &&
3540             cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3541                 cfs_b->runtime += slack_runtime;
3542 
3543                 /* we are under rq->lock, defer unthrottling using a timer */
3544                 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3545                     !list_empty(&cfs_b->throttled_cfs_rq))
3546                         start_cfs_slack_bandwidth(cfs_b);
3547         }
3548         raw_spin_unlock(&cfs_b->lock);
3549 
3550         /* even if it's not valid for return we don't want to try again */
3551         cfs_rq->runtime_remaining -= slack_runtime;
3552 }
3553 
3554 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3555 {
3556         if (!cfs_bandwidth_used())
3557                 return;
3558 
3559         if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
3560                 return;
3561 
3562         __return_cfs_rq_runtime(cfs_rq);
3563 }
3564 
3565 /*
3566  * This is done with a timer (instead of inline with bandwidth return) since
3567  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3568  */
3569 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3570 {
3571         u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3572         u64 expires;
3573 
3574         /* confirm we're still not at a refresh boundary */
3575         raw_spin_lock(&cfs_b->lock);
3576         if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3577                 raw_spin_unlock(&cfs_b->lock);
3578                 return;
3579         }
3580 
3581         if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
3582                 runtime = cfs_b->runtime;
3583                 cfs_b->runtime = 0;
3584         }
3585         expires = cfs_b->runtime_expires;
3586         raw_spin_unlock(&cfs_b->lock);
3587 
3588         if (!runtime)
3589                 return;
3590 
3591         runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3592 
3593         raw_spin_lock(&cfs_b->lock);
3594         if (expires == cfs_b->runtime_expires)
3595                 cfs_b->runtime = runtime;
3596         raw_spin_unlock(&cfs_b->lock);
3597 }
3598 
3599 /*
3600  * When a group wakes up we want to make sure that its quota is not already
3601  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3602  * runtime as update_curr() throttling can not not trigger until it's on-rq.
3603  */
3604 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3605 {
3606         if (!cfs_bandwidth_used())
3607                 return;
3608 
3609         /* an active group must be handled by the update_curr()->put() path */
3610         if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3611                 return;
3612 
3613         /* ensure the group is not already throttled */
3614         if (cfs_rq_throttled(cfs_rq))
3615                 return;
3616 
3617         /* update runtime allocation */
3618         account_cfs_rq_runtime(cfs_rq, 0);
3619         if (cfs_rq->runtime_remaining <= 0)
3620                 throttle_cfs_rq(cfs_rq);
3621 }
3622 
3623 /* conditionally throttle active cfs_rq's from put_prev_entity() */
3624 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3625 {
3626         if (!cfs_bandwidth_used())
3627                 return false;
3628 
3629         if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3630                 return false;
3631 
3632         /*
3633          * it's possible for a throttled entity to be forced into a running
3634          * state (e.g. set_curr_task), in this case we're finished.
3635          */
3636         if (cfs_rq_throttled(cfs_rq))
3637                 return true;
3638 
3639         throttle_cfs_rq(cfs_rq);
3640         return true;
3641 }
3642 
3643 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3644 {
3645         struct cfs_bandwidth *cfs_b =
3646                 container_of(timer, struct cfs_bandwidth, slack_timer);
3647         do_sched_cfs_slack_timer(cfs_b);
3648 
3649         return HRTIMER_NORESTART;
3650 }
3651 
3652 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3653 {
3654         struct cfs_bandwidth *cfs_b =
3655                 container_of(timer, struct cfs_bandwidth, period_timer);
3656         ktime_t now;
3657         int overrun;
3658         int idle = 0;
3659 
3660         for (;;) {
3661                 now = hrtimer_cb_get_time(timer);
3662                 overrun = hrtimer_forward(timer, now, cfs_b->period);
3663 
3664                 if (!overrun)
3665                         break;
3666 
3667                 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3668         }
3669 
3670         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3671 }
3672 
3673 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3674 {
3675         raw_spin_lock_init(&cfs_b->lock);
3676         cfs_b->runtime = 0;
3677         cfs_b->quota = RUNTIME_INF;
3678         cfs_b->period = ns_to_ktime(default_cfs_period());
3679 
3680         INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3681         hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3682         cfs_b->period_timer.function = sched_cfs_period_timer;
3683         hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3684         cfs_b->slack_timer.function = sched_cfs_slack_timer;
3685 }
3686 
3687 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3688 {
3689         cfs_rq->runtime_enabled = 0;
3690         INIT_LIST_HEAD(&cfs_rq->throttled_list);
3691 }
3692 
3693 /* requires cfs_b->lock, may release to reprogram timer */
3694 void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
3695 {
3696         /*
3697          * The timer may be active because we're trying to set a new bandwidth
3698          * period or because we're racing with the tear-down path
3699          * (timer_active==0 becomes visible before the hrtimer call-back
3700          * terminates).  In either case we ensure that it's re-programmed
3701          */
3702         while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
3703                hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
3704                 /* bounce the lock to allow do_sched_cfs_period_timer to run */
3705                 raw_spin_unlock(&cfs_b->lock);
3706                 cpu_relax();
3707                 raw_spin_lock(&cfs_b->lock);
3708                 /* if someone else restarted the timer then we're done */
3709                 if (!force && cfs_b->timer_active)
3710                         return;
3711         }
3712 
3713         cfs_b->timer_active = 1;
3714         start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3715 }
3716 
3717 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3718 {
3719         hrtimer_cancel(&cfs_b->period_timer);
3720         hrtimer_cancel(&cfs_b->slack_timer);
3721 }
3722 
3723 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
3724 {
3725         struct cfs_rq *cfs_rq;
3726 
3727         for_each_leaf_cfs_rq(rq, cfs_rq) {
3728                 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3729 
3730                 if (!cfs_rq->runtime_enabled)
3731                         continue;
3732 
3733                 /*
3734                  * clock_task is not advancing so we just need to make sure
3735                  * there's some valid quota amount
3736                  */
3737                 cfs_rq->runtime_remaining = cfs_b->quota;
3738                 if (cfs_rq_throttled(cfs_rq))
3739                         unthrottle_cfs_rq(cfs_rq);
3740         }
3741 }
3742 
3743 #else /* CONFIG_CFS_BANDWIDTH */
3744 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3745 {
3746         return rq_clock_task(rq_of(cfs_rq));
3747 }
3748 
3749 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
3750 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
3751 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
3752 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3753 
3754 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3755 {
3756         return 0;
3757 }
3758 
3759 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3760 {
3761         return 0;
3762 }
3763 
3764 static inline int throttled_lb_pair(struct task_group *tg,
3765                                     int src_cpu, int dest_cpu)
3766 {
3767         return 0;
3768 }
3769 
3770 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3771 
3772 #ifdef CONFIG_FAIR_GROUP_SCHED
3773 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3774 #endif
3775 
3776 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3777 {
3778         return NULL;
3779 }
3780 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3781 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
3782 
3783 #endif /* CONFIG_CFS_BANDWIDTH */
3784 
3785 /**************************************************
3786  * CFS operations on tasks:
3787  */
3788 
3789 #ifdef CONFIG_SCHED_HRTICK
3790 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3791 {
3792         struct sched_entity *se = &p->se;
3793         struct cfs_rq *cfs_rq = cfs_rq_of(se);
3794 
3795         WARN_ON(task_rq(p) != rq);
3796 
3797         if (cfs_rq->nr_running > 1) {
3798                 u64 slice = sched_slice(cfs_rq, se);
3799                 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3800                 s64 delta = slice - ran;
3801 
3802                 if (delta < 0) {
3803                         if (rq->curr == p)
3804                                 resched_task(p);
3805                         return;
3806                 }
3807 
3808                 /*
3809                  * Don't schedule slices shorter than 10000ns, that just
3810                  * doesn't make sense. Rely on vruntime for fairness.
3811                  */
3812                 if (rq->curr != p)
3813                         delta = max_t(s64, 10000LL, delta);
3814 
3815                 hrtick_start(rq, delta);
3816         }
3817 }
3818 
3819 /*
3820  * called from enqueue/dequeue and updates the hrtick when the
3821  * current task is from our class and nr_running is low enough
3822  * to matter.
3823  */
3824 static void hrtick_update(struct rq *rq)
3825 {
3826         struct task_struct *curr = rq->curr;
3827 
3828         if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
3829                 return;
3830 
3831         if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3832                 hrtick_start_fair(rq, curr);
3833 }
3834 #else /* !CONFIG_SCHED_HRTICK */
3835 static inline void
3836 hrtick_start_fair(struct rq *rq, struct task_struct *p)
3837 {
3838 }
3839 
3840 static inline void hrtick_update(struct rq *rq)
3841 {
3842 }
3843 #endif
3844 
3845 /*
3846  * The enqueue_task method is called before nr_running is
3847  * increased. Here we update the fair scheduling stats and
3848  * then put the task into the rbtree:
3849  */
3850 static void
3851 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3852 {
3853         struct cfs_rq *cfs_rq;
3854         struct sched_entity *se = &p->se;
3855 
3856         for_each_sched_entity(se) {
3857                 if (se->on_rq)
3858                         break;
3859                 cfs_rq = cfs_rq_of(se);
3860                 enqueue_entity(cfs_rq, se, flags);
3861 
3862                 /*
3863                  * end evaluation on encountering a throttled cfs_rq
3864                  *
3865                  * note: in the case of encountering a throttled cfs_rq we will
3866                  * post the final h_nr_running increment below.
3867                 */
3868                 if (cfs_rq_throttled(cfs_rq))
3869                         break;
3870                 cfs_rq->h_nr_running++;
3871 
3872                 flags = ENQUEUE_WAKEUP;
3873         }
3874 
3875         for_each_sched_entity(se) {
3876                 cfs_rq = cfs_rq_of(se);
3877                 cfs_rq->h_nr_running++;
3878 
3879                 if (cfs_rq_throttled(cfs_rq))
3880                         break;
3881 
3882                 update_cfs_shares(cfs_rq);
3883                 update_entity_load_avg(se, 1);
3884         }
3885 
3886         if (!se) {
3887                 update_rq_runnable_avg(rq, rq->nr_running);
3888                 inc_nr_running(rq);
3889         }
3890         hrtick_update(rq);
3891 }
3892 
3893 static void set_next_buddy(struct sched_entity *se);
3894 
3895 /*
3896  * The dequeue_task method is called before nr_running is
3897  * decreased. We remove the task from the rbtree and
3898  * update the fair scheduling stats:
3899  */
3900 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
3901 {
3902         struct cfs_rq *cfs_rq;
3903         struct sched_entity *se = &p->se;
3904         int task_sleep = flags & DEQUEUE_SLEEP;
3905 
3906         for_each_sched_entity(se) {
3907                 cfs_rq = cfs_rq_of(se);
3908                 dequeue_entity(cfs_rq, se, flags);
3909 
3910                 /*
3911                  * end evaluation on encountering a throttled cfs_rq
3912                  *
3913                  * note: in the case of encountering a throttled cfs_rq we will
3914                  * post the final h_nr_running decrement below.
3915                 */
3916                 if (cfs_rq_throttled(cfs_rq))
3917                         break;
3918                 cfs_rq->h_nr_running--;
3919 
3920                 /* Don't dequeue parent if it has other entities besides us */
3921                 if (cfs_rq->load.weight) {
3922                         /*
3923                          * Bias pick_next to pick a task from this cfs_rq, as
3924                          * p is sleeping when it is within its sched_slice.
3925                          */
3926                         if (task_sleep && parent_entity(se))
3927                                 set_next_buddy(parent_entity(se));
3928 
3929                         /* avoid re-evaluating load for this entity */
3930                         se = parent_entity(se);
3931                         break;
3932                 }
3933                 flags |= DEQUEUE_SLEEP;
3934         }
3935 
3936         for_each_sched_entity(se) {
3937                 cfs_rq = cfs_rq_of(se);
3938                 cfs_rq->h_nr_running--;
3939 
3940                 if (cfs_rq_throttled(cfs_rq))
3941                         break;
3942 
3943                 update_cfs_shares(cfs_rq);
3944                 update_entity_load_avg(se, 1);
3945         }
3946 
3947         if (!se) {
3948                 dec_nr_running(rq);
3949                 update_rq_runnable_avg(rq, 1);
3950         }
3951         hrtick_update(rq);
3952 }
3953 
3954 #ifdef CONFIG_SMP
3955 /* Used instead of source_load when we know the type == 0 */
3956 static unsigned long weighted_cpuload(const int cpu)
3957 {
3958         return cpu_rq(cpu)->cfs.runnable_load_avg;
3959 }
3960 
3961 /*
3962  * Return a low guess at the load of a migration-source cpu weighted
3963  * according to the scheduling class and "nice" value.
3964  *
3965  * We want to under-estimate the load of migration sources, to
3966  * balance conservatively.
3967  */
3968 static unsigned long source_load(int cpu, int type)
3969 {
3970         struct rq *rq = cpu_rq(cpu);
3971         unsigned long total = weighted_cpuload(cpu);
3972 
3973         if (type == 0 || !sched_feat(LB_BIAS))
3974                 return total;
3975 
3976         return min(rq->cpu_load[type-1], total);
3977 }
3978 
3979 /*
3980  * Return a high guess at the load of a migration-target cpu weighted
3981  * according to the scheduling class and "nice" value.
3982  */
3983 static unsigned long target_load(int cpu, int type)
3984 {
3985         struct rq *rq = cpu_rq(cpu);
3986         unsigned long total = weighted_cpuload(cpu);
3987 
3988         if (type == 0 || !sched_feat(LB_BIAS))
3989                 return total;
3990 
3991         return max(rq->cpu_load[type-1], total);
3992 }
3993 
3994 static unsigned long power_of(int cpu)
3995 {
3996         return cpu_rq(cpu)->cpu_power;
3997 }
3998 
3999 static unsigned long cpu_avg_load_per_task(int cpu)
4000 {
4001         struct rq *rq = cpu_rq(cpu);
4002         unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
4003         unsigned long load_avg = rq->cfs.runnable_load_avg;
4004 
4005         if (nr_running)
4006                 return load_avg / nr_running;
4007 
4008         return 0;
4009 }
4010 
4011 static void record_wakee(struct task_struct *p)
4012 {
4013         /*
4014          * Rough decay (wiping) for cost saving, don't worry
4015          * about the boundary, really active task won't care
4016          * about the loss.
4017          */
4018         if (jiffies > current->wakee_flip_decay_ts + HZ) {
4019                 current->wakee_flips = 0;
4020                 current->wakee_flip_decay_ts = jiffies;
4021         }
4022 
4023         if (current->last_wakee != p) {
4024                 current->last_wakee = p;
4025                 current->wakee_flips++;
4026         }
4027 }
4028 
4029 static void task_waking_fair(struct task_struct *p)
4030 {
4031         struct sched_entity *se = &p->se;
4032         struct cfs_rq *cfs_rq = cfs_rq_of(se);
4033         u64 min_vruntime;
4034 
4035 #ifndef CONFIG_64BIT
4036         u64 min_vruntime_copy;
4037 
4038         do {
4039                 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4040                 smp_rmb();
4041                 min_vruntime = cfs_rq->min_vruntime;
4042         } while (min_vruntime != min_vruntime_copy);
4043 #else
4044         min_vruntime = cfs_rq->min_vruntime;
4045 #endif
4046 
4047         se->vruntime -= min_vruntime;
4048         record_wakee(p);
4049 }
4050 
4051 #ifdef CONFIG_FAIR_GROUP_SCHED
4052 /*
4053  * effective_load() calculates the load change as seen from the root_task_group
4054  *
4055  * Adding load to a group doesn't make a group heavier, but can cause movement
4056  * of group shares between cpus. Assuming the shares were perfectly aligned one
4057  * can calculate the shift in shares.
4058  *
4059  * Calculate the effective load difference if @wl is added (subtracted) to @tg
4060  * on this @cpu and results in a total addition (subtraction) of @wg to the
4061  * total group weight.
4062  *
4063  * Given a runqueue weight distribution (rw_i) we can compute a shares
4064  * distribution (s_i) using:
4065  *
4066  *   s_i = rw_i / \Sum rw_j                                             (1)
4067  *
4068  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4069  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4070  * shares distribution (s_i):
4071  *
4072  *   rw_i = {   2,   4,   1,   0 }
4073  *   s_i  = { 2/7, 4/7, 1/7,   0 }
4074  *
4075  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4076  * task used to run on and the CPU the waker is running on), we need to
4077  * compute the effect of waking a task on either CPU and, in case of a sync
4078  * wakeup, compute the effect of the current task going to sleep.
4079  *
4080  * So for a change of @wl to the local @cpu with an overall group weight change
4081  * of @wl we can compute the new shares distribution (s'_i) using:
4082  *
4083  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)                            (2)
4084  *
4085  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4086  * differences in waking a task to CPU 0. The additional task changes the
4087  * weight and shares distributions like:
4088  *
4089  *   rw'_i = {   3,   4,   1,   0 }
4090  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
4091  *
4092  * We can then compute the difference in effective weight by using:
4093  *
4094  *   dw_i = S * (s'_i - s_i)                                            (3)
4095  *
4096  * Where 'S' is the group weight as seen by its parent.
4097  *
4098  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
4099  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
4100  * 4/7) times the weight of the group.
4101  */
4102 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4103 {
4104         struct sched_entity *se = tg->se[cpu];
4105 
4106         if (!tg->parent)        /* the trivial, non-cgroup case */
4107                 return wl;
4108 
4109         for_each_sched_entity(se) {
4110                 long w, W;
4111 
4112                 tg = se->my_q->tg;
4113 
4114                 /*
4115                  * W = @wg + \Sum rw_j
4116                  */
4117                 W = wg + calc_tg_weight(tg, se->my_q);
4118 
4119                 /*
4120                  * w = rw_i + @wl
4121                  */
4122                 w = se->my_q->load.weight + wl;
4123 
4124                 /*
4125                  * wl = S * s'_i; see (2)
4126                  */
4127                 if (W > 0 && w < W)
4128                         wl = (w * tg->shares) / W;
4129                 else
4130                         wl = tg->shares;
4131 
4132                 /*
4133                  * Per the above, wl is the new se->load.weight value; since
4134                  * those are clipped to [MIN_SHARES, ...) do so now. See
4135                  * calc_cfs_shares().
4136                  */
4137                 if (wl < MIN_SHARES)
4138                         wl = MIN_SHARES;
4139 
4140                 /*
4141                  * wl = dw_i = S * (s'_i - s_i); see (3)
4142                  */
4143                 wl -= se->load.weight;
4144 
4145                 /*
4146                  * Recursively apply this logic to all parent groups to compute
4147                  * the final effective load change on the root group. Since
4148                  * only the @tg group gets extra weight, all parent groups can
4149                  * only redistribute existing shares. @wl is the shift in shares
4150                  * resulting from this level per the above.
4151                  */
4152                 wg = 0;
4153         }
4154 
4155         return wl;
4156 }
4157 #else
4158 
4159 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4160 {
4161         return wl;
4162 }
4163 
4164 #endif
4165 
4166 static int wake_wide(struct task_struct *p)
4167 {
4168         int factor = this_cpu_read(sd_llc_size);
4169 
4170         /*
4171          * Yeah, it's the switching-frequency, could means many wakee or
4172          * rapidly switch, use factor here will just help to automatically
4173          * adjust the loose-degree, so bigger node will lead to more pull.
4174          */
4175         if (p->wakee_flips > factor) {
4176                 /*
4177                  * wakee is somewhat hot, it needs certain amount of cpu
4178                  * resource, so if waker is far more hot, prefer to leave
4179                  * it alone.
4180                  */
4181                 if (current->wakee_flips > (factor * p->wakee_flips))
4182                         return 1;
4183         }
4184 
4185         return 0;
4186 }
4187 
4188 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
4189 {
4190         s64 this_load, load;
4191         int idx, this_cpu, prev_cpu;
4192         unsigned long tl_per_task;
4193         struct task_group *tg;
4194         unsigned long weight;
4195         int balanced;
4196 
4197         /*
4198          * If we wake multiple tasks be careful to not bounce
4199          * ourselves around too much.
4200          */
4201         if (wake_wide(p))
4202                 return 0;
4203 
4204         idx       = sd->wake_idx;
4205         this_cpu  = smp_processor_id();
4206         prev_cpu  = task_cpu(p);
4207         load      = source_load(prev_cpu, idx);
4208         this_load = target_load(this_cpu, idx);
4209 
4210         /*
4211          * If sync wakeup then subtract the (maximum possible)
4212          * effect of the currently running task from the load
4213          * of the current CPU:
4214          */
4215         if (sync) {
4216                 tg = task_group(current);
4217                 weight = current->se.load.weight;
4218 
4219                 this_load += effective_load(tg, this_cpu, -weight, -weight);
4220                 load += effective_load(tg, prev_cpu, 0, -weight);
4221         }
4222 
4223         tg = task_group(p);
4224         weight = p->se.load.weight;
4225 
4226         /*
4227          * In low-load situations, where prev_cpu is idle and this_cpu is idle
4228          * due to the sync cause above having dropped this_load to 0, we'll
4229          * always have an imbalance, but there's really nothing you can do
4230          * about that, so that's good too.
4231          *
4232          * Otherwise check if either cpus are near enough in load to allow this
4233          * task to be woken on this_cpu.
4234          */
4235         if (this_load > 0) {
4236                 s64 this_eff_load, prev_eff_load;
4237 
4238                 this_eff_load = 100;
4239                 this_eff_load *= power_of(prev_cpu);
4240                 this_eff_load *= this_load +
4241                         effective_load(tg, this_cpu, weight, weight);
4242 
4243                 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4244                 prev_eff_load *= power_of(this_cpu);
4245                 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4246 
4247                 balanced = this_eff_load <= prev_eff_load;
4248         } else
4249                 balanced = true;
4250 
4251         /*
4252          * If the currently running task will sleep within
4253          * a reasonable amount of time then attract this newly
4254          * woken task:
4255          */
4256         if (sync && balanced)
4257                 return 1;
4258 
4259         schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
4260         tl_per_task = cpu_avg_load_per_task(this_cpu);
4261 
4262         if (balanced ||
4263             (this_load <= load &&
4264              this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
4265                 /*
4266                  * This domain has SD_WAKE_AFFINE and
4267                  * p is cache cold in this domain, and
4268                  * there is no bad imbalance.
4269                  */
4270                 schedstat_inc(sd, ttwu_move_affine);
4271                 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4272 
4273                 return 1;
4274         }
4275         return 0;
4276 }
4277 
4278 /*
4279  * find_idlest_group finds and returns the least busy CPU group within the
4280  * domain.
4281  */
4282 static struct sched_group *
4283 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
4284                   int this_cpu, int sd_flag)
4285 {
4286         struct sched_group *idlest = NULL, *group = sd->groups;
4287         unsigned long min_load = ULONG_MAX, this_load = 0;
4288         int load_idx = sd->forkexec_idx;
4289         int imbalance = 100 + (sd->imbalance_pct-100)/2;
4290 
4291         if (sd_flag & SD_BALANCE_WAKE)
4292                 load_idx = sd->wake_idx;
4293 
4294         do {
4295                 unsigned long load, avg_load;
4296                 int local_group;
4297                 int i;
4298 
4299                 /* Skip over this group if it has no CPUs allowed */
4300                 if (!cpumask_intersects(sched_group_cpus(group),
4301                                         tsk_cpus_allowed(p)))
4302                         continue;
4303 
4304                 local_group = cpumask_test_cpu(this_cpu,
4305                                                sched_group_cpus(group));
4306 
4307                 /* Tally up the load of all CPUs in the group */
4308                 avg_load = 0;
4309 
4310                 for_each_cpu(i, sched_group_cpus(group)) {
4311                         /* Bias balancing toward cpus of our domain */
4312                         if (local_group)
4313                                 load = source_load(i, load_idx);
4314                         else
4315                                 load = target_load(i, load_idx);
4316 
4317                         avg_load += load;
4318                 }
4319 
4320                 /* Adjust by relative CPU power of the group */
4321                 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
4322 
4323                 if (local_group) {
4324                         this_load = avg_load;
4325                 } else if (avg_load < min_load) {
4326                         min_load = avg_load;
4327                         idlest = group;
4328                 }
4329         } while (group = group->next, group != sd->groups);
4330 
4331         if (!idlest || 100*this_load < imbalance*min_load)
4332                 return NULL;
4333         return idlest;
4334 }
4335 
4336 /*
4337  * find_idlest_cpu - find the idlest cpu among the cpus in group.
4338  */
4339 static int
4340 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4341 {
4342         unsigned long load, min_load = ULONG_MAX;
4343         int idlest = -1;
4344         int i;
4345 
4346         /* Traverse only the allowed CPUs */
4347         for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
4348                 load = weighted_cpuload(i);
4349 
4350                 if (load < min_load || (load == min_load && i == this_cpu)) {
4351                         min_load = load;
4352                         idlest = i;
4353                 }
4354         }
4355 
4356         return idlest;
4357 }
4358 
4359 /*
4360  * Try and locate an idle CPU in the sched_domain.
4361  */
4362 static int select_idle_sibling(struct task_struct *p, int target)
4363 {
4364         struct sched_domain *sd;
4365         struct sched_group *sg;
4366         int i = task_cpu(p);
4367 
4368         if (idle_cpu(target))
4369                 return target;
4370 
4371         /*
4372          * If the prevous cpu is cache affine and idle, don't be stupid.
4373          */
4374         if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4375                 return i;
4376 
4377         /*
4378          * Otherwise, iterate the domains and find an elegible idle cpu.
4379          */
4380         sd = rcu_dereference(per_cpu(sd_llc, target));
4381         for_each_lower_domain(sd) {
4382                 sg = sd->groups;
4383                 do {
4384                         if (!cpumask_intersects(sched_group_cpus(sg),
4385                                                 tsk_cpus_allowed(p)))
4386                                 goto next;
4387 
4388                         for_each_cpu(i, sched_group_cpus(sg)) {
4389                                 if (i == target || !idle_cpu(i))
4390                                         goto next;
4391                         }
4392 
4393                         target = cpumask_first_and(sched_group_cpus(sg),
4394                                         tsk_cpus_allowed(p));
4395                         goto done;
4396 next:
4397                         sg = sg->next;
4398                 } while (sg != sd->groups);
4399         }
4400 done:
4401         return target;
4402 }
4403 
4404 /*
4405  * select_task_rq_fair: Select target runqueue for the waking task in domains
4406  * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
4407  * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
4408  *
4409  * Balances load by selecting the idlest cpu in the idlest group, or under
4410  * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
4411  *
4412  * Returns the target cpu number.
4413  *
4414  * preempt must be disabled.
4415  */
4416 static int
4417 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
4418 {
4419         struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
4420         int cpu = smp_processor_id();
4421         int new_cpu = cpu;
4422         int want_affine = 0;
4423         int sync = wake_flags & WF_SYNC;
4424 
4425         if (p->nr_cpus_allowed == 1)
4426                 return prev_cpu;
4427 
4428         if (sd_flag & SD_BALANCE_WAKE) {
4429                 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
4430                         want_affine = 1;
4431                 new_cpu = prev_cpu;
4432         }
4433 
4434         rcu_read_lock();
4435         for_each_domain(cpu, tmp) {
4436                 if (!(tmp->flags & SD_LOAD_BALANCE))
4437                         continue;
4438 
4439                 /*
4440                  * If both cpu and prev_cpu are part of this domain,
4441                  * cpu is a valid SD_WAKE_AFFINE target.
4442                  */
4443                 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4444                     cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4445                         affine_sd = tmp;
4446                         break;
4447                 }
4448 
4449                 if (tmp->flags & sd_flag)
4450                         sd = tmp;
4451         }
4452 
4453         if (affine_sd) {
4454                 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
4455                         prev_cpu = cpu;
4456 
4457                 new_cpu = select_idle_sibling(p, prev_cpu);
4458                 goto unlock;
4459         }
4460 
4461         while (sd) {
4462                 struct sched_group *group;
4463                 int weight;
4464 
4465                 if (!(sd->flags & sd_flag)) {
4466                         sd = sd->child;
4467                         continue;
4468                 }
4469 
4470                 group = find_idlest_group(sd, p, cpu, sd_flag);
4471                 if (!group) {
4472                         sd = sd->child;
4473                         continue;
4474                 }
4475 
4476                 new_cpu = find_idlest_cpu(group, p, cpu);
4477                 if (new_cpu == -1 || new_cpu == cpu) {
4478                         /* Now try balancing at a lower domain level of cpu */
4479                         sd = sd->child;
4480                         continue;
4481                 }
4482 
4483                 /* Now try balancing at a lower domain level of new_cpu */
4484                 cpu = new_cpu;
4485                 weight = sd->span_weight;
4486                 sd = NULL;
4487                 for_each_domain(cpu, tmp) {
4488                         if (weight <= tmp->span_weight)
4489                                 break;
4490                         if (tmp->flags & sd_flag)
4491                                 sd = tmp;
4492                 }
4493                 /* while loop will break here if sd == NULL */
4494         }
4495 unlock:
4496         rcu_read_unlock();
4497 
4498         return new_cpu;
4499 }
4500 
4501 /*
4502  * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4503  * cfs_rq_of(p) references at time of call are still valid and identify the
4504  * previous cpu.  However, the caller only guarantees p->pi_lock is held; no
4505  * other assumptions, including the state of rq->lock, should be made.
4506  */
4507 static void
4508 migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4509 {
4510         struct sched_entity *se = &p->se;
4511         struct cfs_rq *cfs_rq = cfs_rq_of(se);
4512 
4513         /*
4514          * Load tracking: accumulate removed load so that it can be processed
4515          * when we next update owning cfs_rq under rq->lock.  Tasks contribute
4516          * to blocked load iff they have a positive decay-count.  It can never
4517          * be negative here since on-rq tasks have decay-count == 0.
4518          */
4519         if (se->avg.decay_count) {
4520                 se->avg.decay_count = -__synchronize_entity_decay(se);
4521                 atomic_long_add(se->avg.load_avg_contrib,
4522                                                 &cfs_rq->removed_load);
4523         }
4524 }
4525 #endif /* CONFIG_SMP */
4526 
4527 static unsigned long
4528 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
4529 {
4530         unsigned long gran = sysctl_sched_wakeup_granularity;
4531 
4532         /*
4533          * Since its curr running now, convert the gran from real-time
4534          * to virtual-time in his units.
4535          *
4536          * By using 'se' instead of 'curr' we penalize light tasks, so
4537          * they get preempted easier. That is, if 'se' < 'curr' then
4538          * the resulting gran will be larger, therefore penalizing the
4539          * lighter, if otoh 'se' > 'curr' then the resulting gran will
4540          * be smaller, again penalizing the lighter task.
4541          *
4542          * This is especially important for buddies when the leftmost
4543          * task is higher priority than the buddy.
4544          */
4545         return calc_delta_fair(gran, se);
4546 }
4547 
4548 /*
4549  * Should 'se' preempt 'curr'.
4550  *
4551  *             |s1
4552  *        |s2
4553  *   |s3
4554  *         g
4555  *      |<--->|c
4556  *
4557  *  w(c, s1) = -1
4558  *  w(c, s2) =  0
4559  *  w(c, s3) =  1
4560  *
4561  */
4562 static int
4563 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4564 {
4565         s64 gran, vdiff = curr->vruntime - se->vruntime;
4566 
4567         if (vdiff <= 0)
4568                 return -1;
4569 
4570         gran = wakeup_gran(curr, se);
4571         if (vdiff > gran)
4572                 return 1;
4573 
4574         return 0;
4575 }
4576 
4577 static void set_last_buddy(struct sched_entity *se)
4578 {
4579         if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4580                 return;
4581 
4582         for_each_sched_entity(se)
4583                 cfs_rq_of(se)->last = se;
4584 }
4585 
4586 static void set_next_buddy(struct sched_entity *se)
4587 {
4588         if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4589                 return;
4590 
4591         for_each_sched_entity(se)
4592                 cfs_rq_of(se)->next = se;
4593 }
4594 
4595 static void set_skip_buddy(struct sched_entity *se)
4596 {
4597         for_each_sched_entity(se)
4598                 cfs_rq_of(se)->skip = se;
4599 }
4600 
4601 /*
4602  * Preempt the current task with a newly woken task if needed:
4603  */
4604 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
4605 {
4606         struct task_struct *curr = rq->curr;
4607         struct sched_entity *se = &curr->se, *pse = &p->se;
4608         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4609         int scale = cfs_rq->nr_running >= sched_nr_latency;
4610         int next_buddy_marked = 0;
4611 
4612         if (unlikely(se == pse))
4613                 return;
4614 
4615         /*
4616          * This is possible from callers such as move_task(), in which we
4617          * unconditionally check_prempt_curr() after an enqueue (which may have
4618          * lead to a throttle).  This both saves work and prevents false
4619          * next-buddy nomination below.
4620          */
4621         if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4622                 return;
4623 
4624         if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
4625                 set_next_buddy(pse);
4626                 next_buddy_marked = 1;
4627         }
4628 
4629         /*
4630          * We can come here with TIF_NEED_RESCHED already set from new task
4631          * wake up path.
4632          *
4633          * Note: this also catches the edge-case of curr being in a throttled
4634          * group (e.g. via set_curr_task), since update_curr() (in the
4635          * enqueue of curr) will have resulted in resched being set.  This
4636          * prevents us from potentially nominating it as a false LAST_BUDDY
4637          * below.
4638          */
4639         if (test_tsk_need_resched(curr))
4640                 return;
4641 
4642         /* Idle tasks are by definition preempted by non-idle tasks. */
4643         if (unlikely(curr->policy == SCHED_IDLE) &&
4644             likely(p->policy != SCHED_IDLE))
4645                 goto preempt;
4646 
4647         /*
4648          * Batch and idle tasks do not preempt non-idle tasks (their preemption
4649          * is driven by the tick):
4650          */
4651         if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
4652                 return;
4653 
4654         find_matching_se(&se, &pse);
4655         update_curr(cfs_rq_of(se));
4656         BUG_ON(!pse);
4657         if (wakeup_preempt_entity(se, pse) == 1) {
4658                 /*
4659                  * Bias pick_next to pick the sched entity that is
4660                  * triggering this preemption.
4661                  */
4662                 if (!next_buddy_marked)
4663                         set_next_buddy(pse);
4664                 goto preempt;
4665         }
4666 
4667         return;
4668 
4669 preempt:
4670         resched_task(curr);
4671         /*
4672          * Only set the backward buddy when the current task is still
4673          * on the rq. This can happen when a wakeup gets interleaved
4674          * with schedule on the ->pre_schedule() or idle_balance()
4675          * point, either of which can * drop the rq lock.
4676          *
4677          * Also, during early boot the idle thread is in the fair class,
4678          * for obvious reasons its a bad idea to schedule back to it.
4679          */
4680         if (unlikely(!se->on_rq || curr == rq->idle))
4681                 return;
4682 
4683         if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4684                 set_last_buddy(se);
4685 }
4686 
4687 static struct task_struct *
4688 pick_next_task_fair(struct rq *rq, struct task_struct *prev)
4689 {
4690         struct cfs_rq *cfs_rq = &rq->cfs;
4691         struct sched_entity *se;
4692         struct task_struct *p;
4693         int new_tasks;
4694 
4695 again:
4696 #ifdef CONFIG_FAIR_GROUP_SCHED
4697         if (!cfs_rq->nr_running)
4698                 goto idle;
4699 
4700         if (prev->sched_class != &fair_sched_class)
4701                 goto simple;
4702 
4703         /*
4704          * Because of the set_next_buddy() in dequeue_task_fair() it is rather
4705          * likely that a next task is from the same cgroup as the current.
4706          *
4707          * Therefore attempt to avoid putting and setting the entire cgroup
4708          * hierarchy, only change the part that actually changes.
4709          */
4710 
4711         do {
4712                 struct sched_entity *curr = cfs_rq->curr;
4713 
4714                 /*
4715                  * Since we got here without doing put_prev_entity() we also
4716                  * have to consider cfs_rq->curr. If it is still a runnable
4717                  * entity, update_curr() will update its vruntime, otherwise
4718                  * forget we've ever seen it.
4719                  */
4720                 if (curr && curr->on_rq)
4721                         update_curr(cfs_rq);
4722                 else
4723                         curr = NULL;
4724 
4725                 /*
4726                  * This call to check_cfs_rq_runtime() will do the throttle and
4727                  * dequeue its entity in the parent(s). Therefore the 'simple'
4728                  * nr_running test will indeed be correct.
4729                  */
4730                 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
4731                         goto simple;
4732 
4733                 se = pick_next_entity(cfs_rq, curr);
4734                 cfs_rq = group_cfs_rq(se);
4735         } while (cfs_rq);
4736 
4737         p = task_of(se);
4738 
4739         /*
4740          * Since we haven't yet done put_prev_entity and if the selected task
4741          * is a different task than we started out with, try and touch the
4742          * least amount of cfs_rqs.
4743          */
4744         if (prev != p) {
4745                 struct sched_entity *pse = &prev->se;
4746 
4747                 while (!(cfs_rq = is_same_group(se, pse))) {
4748                         int se_depth = se->depth;
4749                         int pse_depth = pse->depth;
4750 
4751                         if (se_depth <= pse_depth) {
4752                                 put_prev_entity(cfs_rq_of(pse), pse);
4753                                 pse = parent_entity(pse);
4754                         }
4755                         if (se_depth >= pse_depth) {
4756                                 set_next_entity(cfs_rq_of(se), se);
4757                                 se = parent_entity(se);
4758                         }
4759                 }
4760 
4761                 put_prev_entity(cfs_rq, pse);
4762                 set_next_entity(cfs_rq, se);
4763         }
4764 
4765         if (hrtick_enabled(rq))
4766                 hrtick_start_fair(rq, p);
4767 
4768         return p;
4769 simple:
4770         cfs_rq = &rq->cfs;
4771 #endif
4772 
4773         if (!cfs_rq->nr_running)
4774                 goto idle;
4775 
4776         put_prev_task(rq, prev);
4777 
4778         do {
4779                 se = pick_next_entity(cfs_rq, NULL);
4780                 set_next_entity(cfs_rq, se);
4781                 cfs_rq = group_cfs_rq(se);
4782         } while (cfs_rq);
4783 
4784         p = task_of(se);
4785 
4786         if (hrtick_enabled(rq))
4787                 hrtick_start_fair(rq, p);
4788 
4789         return p;
4790 
4791 idle:
4792         new_tasks = idle_balance(rq);
4793         /*
4794          * Because idle_balance() releases (and re-acquires) rq->lock, it is
4795          * possible for any higher priority task to appear. In that case we
4796          * must re-start the pick_next_entity() loop.
4797          */
4798         if (new_tasks < 0)
4799                 return RETRY_TASK;
4800 
4801         if (new_tasks > 0)
4802                 goto again;
4803 
4804         return NULL;
4805 }
4806 
4807 /*
4808  * Account for a descheduled task:
4809  */
4810 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
4811 {
4812         struct sched_entity *se = &prev->se;
4813         struct cfs_rq *cfs_rq;
4814 
4815         for_each_sched_entity(se) {
4816                 cfs_rq = cfs_rq_of(se);
4817                 put_prev_entity(cfs_rq, se);
4818         }
4819 }
4820 
4821 /*
4822  * sched_yield() is very simple
4823  *
4824  * The magic of dealing with the ->skip buddy is in pick_next_entity.
4825  */
4826 static void yield_task_fair(struct rq *rq)
4827 {
4828         struct task_struct *curr = rq->curr;
4829         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4830         struct sched_entity *se = &curr->se;
4831 
4832         /*
4833          * Are we the only task in the tree?
4834          */
4835         if (unlikely(rq->nr_running == 1))
4836                 return;
4837 
4838         clear_buddies(cfs_rq, se);
4839 
4840         if (curr->policy != SCHED_BATCH) {
4841                 update_rq_clock(rq);
4842                 /*
4843                  * Update run-time statistics of the 'current'.
4844                  */
4845                 update_curr(cfs_rq);
4846                 /*
4847                  * Tell update_rq_clock() that we've just updated,
4848                  * so we don't do microscopic update in schedule()
4849                  * and double the fastpath cost.
4850                  */
4851                  rq->skip_clock_update = 1;
4852         }
4853 
4854         set_skip_buddy(se);
4855 }
4856 
4857 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4858 {
4859         struct sched_entity *se = &p->se;
4860 
4861         /* throttled hierarchies are not runnable */
4862         if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
4863                 return false;
4864 
4865         /* Tell the scheduler that we'd really like pse to run next. */
4866         set_next_buddy(se);
4867 
4868         yield_task_fair(rq);
4869 
4870         return true;
4871 }
4872 
4873 #ifdef CONFIG_SMP
4874 /**************************************************
4875  * Fair scheduling class load-balancing methods.
4876  *
4877  * BASICS
4878  *
4879  * The purpose of load-balancing is to achieve the same basic fairness the
4880  * per-cpu scheduler provides, namely provide a proportional amount of compute
4881  * time to each task. This is expressed in the following equation:
4882  *
4883  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
4884  *
4885  * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
4886  * W_i,0 is defined as:
4887  *
4888  *   W_i,0 = \Sum_j w_i,j                                             (2)
4889  *
4890  * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
4891  * is derived from the nice value as per prio_to_weight[].
4892  *
4893  * The weight average is an exponential decay average of the instantaneous
4894  * weight:
4895  *
4896  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
4897  *
4898  * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
4899  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4900  * can also include other factors [XXX].
4901  *
4902  * To achieve this balance we define a measure of imbalance which follows
4903  * directly from (1):
4904  *
4905  *   imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j }    (4)
4906  *
4907  * We them move tasks around to minimize the imbalance. In the continuous
4908  * function space it is obvious this converges, in the discrete case we get
4909  * a few fun cases generally called infeasible weight scenarios.
4910  *
4911  * [XXX expand on:
4912  *     - infeasible weights;
4913  *     - local vs global optima in the discrete case. ]
4914  *
4915  *
4916  * SCHED DOMAINS
4917  *
4918  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
4919  * for all i,j solution, we create a tree of cpus that follows the hardware
4920  * topology where each level pairs two lower groups (or better). This results
4921  * in O(log n) layers. Furthermore we reduce the number of cpus going up the
4922  * tree to only the first of the previous level and we decrease the frequency
4923  * of load-balance at each level inv. proportional to the number of cpus in
4924  * the groups.
4925  *
4926  * This yields:
4927  *
4928  *     log_2 n     1     n
4929  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
4930  *     i = 0      2^i   2^i
4931  *                               `- size of each group
4932  *         |         |     `- number of cpus doing load-balance
4933  *         |         `- freq
4934  *         `- sum over all levels
4935  *
4936  * Coupled with a limit on how many tasks we can migrate every balance pass,
4937  * this makes (5) the runtime complexity of the balancer.
4938  *
4939  * An important property here is that each CPU is still (indirectly) connected
4940  * to every other cpu in at most O(log n) steps:
4941  *
4942  * The adjacency matrix of the resulting graph is given by:
4943  *
4944  *             log_2 n     
4945  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
4946  *             k = 0
4947  *
4948  * And you'll find that:
4949  *
4950  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
4951  *
4952  * Showing there's indeed a path between every cpu in at most O(log n) steps.
4953  * The task movement gives a factor of O(m), giving a convergence complexity
4954  * of:
4955  *
4956  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
4957  *
4958  *
4959  * WORK CONSERVING
4960  *
4961  * In order to avoid CPUs going idle while there's still work to do, new idle
4962  * balancing is more aggressive and has the newly idle cpu iterate up the domain
4963  * tree itself instead of relying on other CPUs to bring it work.
4964  *
4965  * This adds some complexity to both (5) and (8) but it reduces the total idle
4966  * time.
4967  *
4968  * [XXX more?]
4969  *
4970  *
4971  * CGROUPS
4972  *
4973  * Cgroups make a horror show out of (2), instead of a simple sum we get:
4974  *
4975  *                                s_k,i
4976  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
4977  *                                 S_k
4978  *
4979  * Where
4980  *
4981  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
4982  *
4983  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4984  *
4985  * The big problem is S_k, its a global sum needed to compute a local (W_i)
4986  * property.
4987  *
4988  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4989  *      rewrite all of this once again.]
4990  */ 
4991 
4992 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4993 
4994 enum fbq_type { regular, remote, all };
4995 
4996 #define LBF_ALL_PINNED  0x01
4997 #define LBF_NEED_BREAK  0x02
4998 #define LBF_DST_PINNED  0x04
4999 #define LBF_SOME_PINNED 0x08
5000 
5001 struct lb_env {
5002         struct sched_domain     *sd;
5003 
5004         struct rq               *src_rq;
5005         int                     src_cpu;
5006 
5007         int                     dst_cpu;
5008         struct rq               *dst_rq;
5009 
5010         struct cpumask          *dst_grpmask;
5011         int                     new_dst_cpu;
5012         enum cpu_idle_type      idle;
5013         long                    imbalance;
5014         /* The set of CPUs under consideration for load-balancing */
5015         struct cpumask          *cpus;
5016 
5017         unsigned int            flags;
5018 
5019         unsigned int            loop;
5020         unsigned int            loop_break;
5021         unsigned int            loop_max;
5022 
5023         enum fbq_type           fbq_type;
5024 };
5025 
5026 /*
5027  * move_task - move a task from one runqueue to another runqueue.
5028  * Both runqueues must be locked.
5029  */
5030 static void move_task(struct task_struct *p, struct lb_env *env)
5031 {
5032         deactivate_task(env->src_rq, p, 0);
5033         set_task_cpu(p, env->dst_cpu);
5034         activate_task(env->dst_rq, p, 0);
5035         check_preempt_curr(env->dst_rq, p, 0);
5036 }
5037 
5038 /*
5039  * Is this task likely cache-hot:
5040  */
5041 static int
5042 task_hot(struct task_struct *p, u64 now)
5043 {
5044         s64 delta;
5045 
5046         if (p->sched_class != &fair_sched_class)
5047                 return 0;
5048 
5049         if (unlikely(p->policy == SCHED_IDLE))
5050                 return 0;
5051 
5052         /*
5053          * Buddy candidates are cache hot:
5054          */
5055         if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
5056                         (&p->se == cfs_rq_of(&p->se)->next ||
5057                          &p->se == cfs_rq_of(&p->se)->last))
5058                 return 1;
5059 
5060         if (sysctl_sched_migration_cost == -1)
5061                 return 1;
5062         if (sysctl_sched_migration_cost == 0)
5063                 return 0;
5064 
5065         delta = now - p->se.exec_start;
5066 
5067         return delta < (s64)sysctl_sched_migration_cost;
5068 }
5069 
5070 #ifdef CONFIG_NUMA_BALANCING
5071 /* Returns true if the destination node has incurred more faults */
5072 static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
5073 {
5074         int src_nid, dst_nid;
5075 
5076         if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
5077             !(env->sd->flags & SD_NUMA)) {
5078                 return false;
5079         }
5080 
5081         src_nid = cpu_to_node(env->src_cpu);
5082         dst_nid = cpu_to_node(env->dst_cpu);
5083 
5084         if (src_nid == dst_nid)
5085                 return false;
5086 
5087         /* Always encourage migration to the preferred node. */
5088         if (dst_nid == p->numa_preferred_nid)
5089                 return true;
5090 
5091         /* If both task and group weight improve, this move is a winner. */
5092         if (task_weight(p, dst_nid) > task_weight(p, src_nid) &&
5093             group_weight(p, dst_nid) > group_weight(p, src_nid))
5094                 return true;
5095 
5096         return false;
5097 }
5098 
5099 
5100 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5101 {
5102         int src_nid, dst_nid;
5103 
5104         if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
5105                 return false;
5106 
5107         if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
5108                 return false;
5109 
5110         src_nid = cpu_to_node(env->src_cpu);
5111         dst_nid = cpu_to_node(env->dst_cpu);
5112 
5113         if (src_nid == dst_nid)
5114                 return false;
5115 
5116         /* Migrating away from the preferred node is always bad. */
5117         if (src_nid == p->numa_preferred_nid)
5118                 return true;
5119 
5120         /* If either task or group weight get worse, don't do it. */
5121         if (task_weight(p, dst_nid) < task_weight(p, src_nid) ||
5122             group_weight(p, dst_nid) < group_weight(p, src_nid))
5123                 return true;
5124 
5125         return false;
5126 }
5127 
5128 #else
5129 static inline bool migrate_improves_locality(struct task_struct *p,
5130                                              struct lb_env *env)
5131 {
5132         return false;
5133 }
5134 
5135 static inline bool migrate_degrades_locality(struct task_struct *p,
5136                                              struct lb_env *env)
5137 {
5138         return false;
5139 }
5140 #endif
5141 
5142 /*
5143  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
5144  */
5145 static
5146 int can_migrate_task(struct task_struct *p, struct lb_env *env)
5147 {
5148         int tsk_cache_hot = 0;
5149         /*
5150          * We do not migrate tasks that are:
5151          * 1) throttled_lb_pair, or
5152          * 2) cannot be migrated to this CPU due to cpus_allowed, or
5153          * 3) running (obviously), or
5154          * 4) are cache-hot on their current CPU.
5155          */
5156         if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5157                 return 0;
5158 
5159         if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
5160                 int cpu;
5161 
5162                 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
5163 
5164                 env->flags |= LBF_SOME_PINNED;
5165 
5166                 /*
5167                  * Remember if this task can be migrated to any other cpu in
5168                  * our sched_group. We may want to revisit it if we couldn't
5169                  * meet load balance goals by pulling other tasks on src_cpu.
5170                  *
5171                  * Also avoid computing new_dst_cpu if we have already computed
5172                  * one in current iteration.
5173                  */
5174                 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
5175                         return 0;
5176 
5177                 /* Prevent to re-select dst_cpu via env's cpus */
5178                 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5179                         if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
5180                                 env->flags |= LBF_DST_PINNED;
5181                                 env->new_dst_cpu = cpu;
5182                                 break;
5183                         }
5184                 }
5185 
5186                 return 0;
5187         }
5188 
5189         /* Record that we found atleast one task that could run on dst_cpu */
5190         env->flags &= ~LBF_ALL_PINNED;
5191 
5192         if (task_running(env->src_rq, p)) {
5193                 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
5194                 return 0;
5195         }
5196 
5197         /*
5198          * Aggressive migration if:
5199          * 1) destination numa is preferred
5200          * 2) task is cache cold, or
5201          * 3) too many balance attempts have failed.
5202          */
5203         tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq));
5204         if (!tsk_cache_hot)
5205                 tsk_cache_hot = migrate_degrades_locality(p, env);
5206 
5207         if (migrate_improves_locality(p, env)) {
5208 #ifdef CONFIG_SCHEDSTATS
5209                 if (tsk_cache_hot) {
5210                         schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5211                         schedstat_inc(p, se.statistics.nr_forced_migrations);
5212                 }
5213 #endif
5214                 return 1;
5215         }
5216 
5217         if (!tsk_cache_hot ||
5218                 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
5219 
5220                 if (tsk_cache_hot) {
5221                         schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5222                         schedstat_inc(p, se.statistics.nr_forced_migrations);
5223                 }
5224 
5225                 return 1;
5226         }
5227 
5228         schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5229         return 0;
5230 }
5231 
5232 /*
5233  * move_one_task tries to move exactly one task from busiest to this_rq, as
5234  * part of active balancing operations within "domain".
5235  * Returns 1 if successful and 0 otherwise.
5236  *
5237  * Called with both runqueues locked.
5238  */
5239 static int move_one_task(struct lb_env *env)
5240 {
5241         struct task_struct *p, *n;
5242 
5243         list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
5244                 if (!can_migrate_task(p, env))
5245                         continue;
5246 
5247                 move_task(p, env);
5248                 /*
5249                  * Right now, this is only the second place move_task()
5250                  * is called, so we can safely collect move_task()
5251                  * stats here rather than inside move_task().
5252                  */
5253                 schedstat_inc(env->sd, lb_gained[env->idle]);
5254                 return 1;
5255         }
5256         return 0;
5257 }
5258 
5259 static const unsigned int sched_nr_migrate_break = 32;
5260 
5261 /*
5262  * move_tasks tries to move up to imbalance weighted load from busiest to
5263  * this_rq, as part of a balancing operation within domain "sd".
5264  * Returns 1 if successful and 0 otherwise.
5265  *
5266  * Called with both runqueues locked.
5267  */
5268 static int move_tasks(struct lb_env *env)
5269 {
5270         struct list_head *tasks = &env->src_rq->cfs_tasks;
5271         struct task_struct *p;
5272         unsigned long load;
5273         int pulled = 0;
5274 
5275         if (env->imbalance <= 0)
5276                 return 0;
5277 
5278         while (!list_empty(tasks)) {
5279                 p = list_first_entry(tasks, struct task_struct, se.group_node);
5280 
5281                 env->loop++;
5282                 /* We've more or less seen every task there is, call it quits */
5283                 if (env->loop > env->loop_max)
5284                         break;
5285 
5286                 /* take a breather every nr_migrate tasks */
5287                 if (env->loop > env->loop_break) {
5288                         env->loop_break += sched_nr_migrate_break;
5289                         env->flags |= LBF_NEED_BREAK;
5290                         break;
5291                 }
5292 
5293                 if (!can_migrate_task(p, env))
5294                         goto next;
5295 
5296                 load = task_h_load(p);
5297 
5298                 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
5299                         goto next;
5300 
5301                 if ((load / 2) > env->imbalance)
5302                         goto next;
5303 
5304                 move_task(p, env);
5305                 pulled++;
5306                 env->imbalance -= load;
5307 
5308 #ifdef CONFIG_PREEMPT
5309                 /*
5310                  * NEWIDLE balancing is a source of latency, so preemptible
5311                  * kernels will stop after the first task is pulled to minimize
5312                  * the critical section.
5313                  */
5314                 if (env->idle == CPU_NEWLY_IDLE)
5315                         break;
5316 #endif
5317 
5318                 /*
5319                  * We only want to steal up to the prescribed amount of
5320                  * weighted load.
5321                  */
5322                 if (env->imbalance <= 0)
5323                         break;
5324 
5325                 continue;
5326 next:
5327                 list_move_tail(&p->se.group_node, tasks);
5328         }
5329 
5330         /*
5331          * Right now, this is one of only two places move_task() is called,
5332          * so we can safely collect move_task() stats here rather than
5333          * inside move_task().
5334          */
5335         schedstat_add(env->sd, lb_gained[env->idle], pulled);
5336 
5337         return pulled;
5338 }
5339 
5340 #ifdef CONFIG_FAIR_GROUP_SCHED
5341 /*
5342  * update tg->load_weight by folding this cpu's load_avg
5343  */
5344 static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
5345 {
5346         struct sched_entity *se = tg->se[cpu];
5347         struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
5348 
5349         /* throttled entities do not contribute to load */
5350         if (throttled_hierarchy(cfs_rq))
5351                 return;
5352 
5353         update_cfs_rq_blocked_load(cfs_rq, 1);
5354 
5355         if (se) {
5356                 update_entity_load_avg(se, 1);
5357                 /*
5358                  * We pivot on our runnable average having decayed to zero for
5359                  * list removal.  This generally implies that all our children
5360                  * have also been removed (modulo rounding error or bandwidth
5361                  * control); however, such cases are rare and we can fix these
5362                  * at enqueue.
5363                  *
5364                  * TODO: fix up out-of-order children on enqueue.
5365                  */
5366                 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5367                         list_del_leaf_cfs_rq(cfs_rq);
5368         } else {
5369                 struct rq *rq = rq_of(cfs_rq);
5370                 update_rq_runnable_avg(rq, rq->nr_running);
5371         }
5372 }
5373 
5374 static void update_blocked_averages(int cpu)
5375 {
5376         struct rq *rq = cpu_rq(cpu);
5377         struct cfs_rq *cfs_rq;
5378         unsigned long flags;
5379 
5380         raw_spin_lock_irqsave(&rq->lock, flags);
5381         update_rq_clock(rq);
5382         /*
5383          * Iterates the task_group tree in a bottom up fashion, see
5384          * list_add_leaf_cfs_rq() for details.
5385          */
5386         for_each_leaf_cfs_rq(rq, cfs_rq) {
5387                 /*
5388                  * Note: We may want to consider periodically releasing
5389                  * rq->lock about these updates so that creating many task
5390                  * groups does not result in continually extending hold time.
5391                  */
5392                 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
5393         }
5394 
5395         raw_spin_unlock_irqrestore(&rq->lock, flags);
5396 }
5397 
5398 /*
5399  * Compute the hierarchical load factor for cfs_rq and all its ascendants.
5400  * This needs to be done in a top-down fashion because the load of a child
5401  * group is a fraction of its parents load.
5402  */
5403 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
5404 {
5405         struct rq *rq = rq_of(cfs_rq);
5406         struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
5407         unsigned long now = jiffies;
5408         unsigned long load;
5409 
5410         if (cfs_rq->last_h_load_update == now)
5411                 return;
5412 
5413         cfs_rq->h_load_next = NULL;
5414         for_each_sched_entity(se) {
5415                 cfs_rq = cfs_rq_of(se);
5416                 cfs_rq->h_load_next = se;
5417                 if (cfs_rq->last_h_load_update == now)
5418                         break;
5419         }
5420 
5421         if (!se) {
5422                 cfs_rq->h_load = cfs_rq->runnable_load_avg;
5423                 cfs_rq->last_h_load_update = now;
5424         }
5425 
5426         while ((se = cfs_rq->h_load_next) != NULL) {
5427                 load = cfs_rq->h_load;
5428                 load = div64_ul(load * se->avg.load_avg_contrib,
5429                                 cfs_rq->runnable_load_avg + 1);
5430                 cfs_rq = group_cfs_rq(se);
5431                 cfs_rq->h_load = load;
5432                 cfs_rq->last_h_load_update = now;
5433         }
5434 }
5435 
5436 static unsigned long task_h_load(struct task_struct *p)
5437 {
5438         struct cfs_rq *cfs_rq = task_cfs_rq(p);
5439 
5440         update_cfs_rq_h_load(cfs_rq);
5441         return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5442                         cfs_rq->runnable_load_avg + 1);
5443 }
5444 #else
5445 static inline void update_blocked_averages(int cpu)
5446 {
5447 }
5448 
5449 static unsigned long task_h_load(struct task_struct *p)
5450 {
5451         return p->se.avg.load_avg_contrib;
5452 }
5453 #endif
5454 
5455 /********** Helpers for find_busiest_group ************************/
5456 /*
5457  * sg_lb_stats - stats of a sched_group required for load_balancing
5458  */
5459 struct sg_lb_stats {
5460         unsigned long avg_load; /*Avg load across the CPUs of the group */
5461         unsigned long group_load; /* Total load over the CPUs of the group */
5462         unsigned long sum_weighted_load; /* Weighted load of group's tasks */
5463         unsigned long load_per_task;
5464         unsigned long group_power;
5465         unsigned int sum_nr_running; /* Nr tasks running in the group */
5466         unsigned int group_capacity;
5467         unsigned int idle_cpus;
5468         unsigned int group_weight;
5469         int group_imb; /* Is there an imbalance in the group ? */
5470         int group_has_capacity; /* Is there extra capacity in the group? */
5471 #ifdef CONFIG_NUMA_BALANCING
5472         unsigned int nr_numa_running;
5473         unsigned int nr_preferred_running;
5474 #endif
5475 };
5476 
5477 /*
5478  * sd_lb_stats - Structure to store the statistics of a sched_domain
5479  *               during load balancing.
5480  */
5481 struct sd_lb_stats {
5482         struct sched_group *busiest;    /* Busiest group in this sd */
5483         struct sched_group *local;      /* Local group in this sd */
5484         unsigned long total_load;       /* Total load of all groups in sd */
5485         unsigned long total_pwr;        /* Total power of all groups in sd */
5486         unsigned long avg_load; /* Average load across all groups in sd */
5487 
5488         struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
5489         struct sg_lb_stats local_stat;  /* Statistics of the local group */
5490 };
5491 
5492 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5493 {
5494         /*
5495          * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5496          * local_stat because update_sg_lb_stats() does a full clear/assignment.
5497          * We must however clear busiest_stat::avg_load because
5498          * update_sd_pick_busiest() reads this before assignment.
5499          */
5500         *sds = (struct sd_lb_stats){
5501                 .busiest = NULL,
5502                 .local = NULL,
5503                 .total_load = 0UL,
5504                 .total_pwr = 0UL,
5505                 .busiest_stat = {
5506                         .avg_load = 0UL,
5507                 },
5508         };
5509 }
5510 
5511 /**
5512  * get_sd_load_idx - Obtain the load index for a given sched domain.
5513  * @sd: The sched_domain whose load_idx is to be obtained.
5514  * @idle: The idle status of the CPU for whose sd load_idx is obtained.
5515  *
5516  * Return: The load index.
5517  */
5518 static inline int get_sd_load_idx(struct sched_domain *sd,
5519                                         enum cpu_idle_type idle)
5520 {
5521         int load_idx;
5522 
5523         switch (idle) {
5524         case CPU_NOT_IDLE:
5525                 load_idx = sd->busy_idx;
5526                 break;
5527 
5528         case CPU_NEWLY_IDLE:
5529                 load_idx = sd->newidle_idx;
5530                 break;
5531         default:
5532                 load_idx = sd->idle_idx;
5533                 break;
5534         }
5535 
5536         return load_idx;
5537 }
5538 
5539 static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
5540 {
5541         return SCHED_POWER_SCALE;
5542 }
5543 
5544 unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
5545 {
5546         return default_scale_freq_power(sd, cpu);
5547 }
5548 
5549 static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
5550 {
5551         unsigned long weight = sd->span_weight;
5552         unsigned long smt_gain = sd->smt_gain;
5553 
5554         smt_gain /= weight;
5555 
5556         return smt_gain;
5557 }
5558 
5559 unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
5560 {
5561         return default_scale_smt_power(sd, cpu);
5562 }
5563 
5564 static unsigned long scale_rt_power(int cpu)
5565 {
5566         struct rq *rq = cpu_rq(cpu);
5567         u64 total, available, age_stamp, avg;
5568 
5569         /*
5570          * Since we're reading these variables without serialization make sure
5571          * we read them once before doing sanity checks on them.
5572          */
5573         age_stamp = ACCESS_ONCE(rq->age_stamp);
5574         avg = ACCESS_ONCE(rq->rt_avg);
5575 
5576         total = sched_avg_period() + (rq_clock(rq) - age_stamp);
5577 
5578         if (unlikely(total < avg)) {
5579                 /* Ensures that power won't end up being negative */
5580                 available = 0;
5581         } else {
5582                 available = total - avg;
5583         }
5584 
5585         if (unlikely((s64)total < SCHED_POWER_SCALE))
5586                 total = SCHED_POWER_SCALE;
5587 
5588         total >>= SCHED_POWER_SHIFT;
5589 
5590         return div_u64(available, total);
5591 }
5592 
5593 static void update_cpu_power(struct sched_domain *sd, int cpu)
5594 {
5595         unsigned long weight = sd->span_weight;
5596         unsigned long power = SCHED_POWER_SCALE;
5597         struct sched_group *sdg = sd->groups;
5598 
5599         if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
5600                 if (sched_feat(ARCH_POWER))
5601                         power *= arch_scale_smt_power(sd, cpu);
5602                 else
5603                         power *= default_scale_smt_power(sd, cpu);
5604 
5605                 power >>= SCHED_POWER_SHIFT;
5606         }
5607 
5608         sdg->sgp->power_orig = power;
5609 
5610         if (sched_feat(ARCH_POWER))
5611                 power *= arch_scale_freq_power(sd, cpu);
5612         else
5613                 power *= default_scale_freq_power(sd, cpu);
5614 
5615         power >>= SCHED_POWER_SHIFT;
5616 
5617         power *= scale_rt_power(cpu);
5618         power >>= SCHED_POWER_SHIFT;
5619 
5620         if (!power)
5621                 power = 1;
5622 
5623         cpu_rq(cpu)->cpu_power = power;
5624         sdg->sgp->power = power;
5625 }
5626 
5627 void update_group_power(struct sched_domain *sd, int cpu)
5628 {
5629         struct sched_domain *child = sd->child;
5630         struct sched_group *group, *sdg = sd->groups;
5631         unsigned long power, power_orig;
5632         unsigned long interval;
5633 
5634         interval = msecs_to_jiffies(sd->balance_interval);
5635         interval = clamp(interval, 1UL, max_load_balance_interval);
5636         sdg->sgp->next_update = jiffies + interval;
5637 
5638         if (!child) {
5639                 update_cpu_power(sd, cpu);
5640                 return;
5641         }
5642 
5643         power_orig = power = 0;
5644 
5645         if (child->flags & SD_OVERLAP) {
5646                 /*
5647                  * SD_OVERLAP domains cannot assume that child groups
5648                  * span the current group.
5649                  */
5650 
5651                 for_each_cpu(cpu, sched_group_cpus(sdg)) {
5652                         struct sched_group_power *sgp;
5653                         struct rq *rq = cpu_rq(cpu);
5654 
5655                         /*
5656                          * build_sched_domains() -> init_sched_groups_power()
5657                          * gets here before we've attached the domains to the
5658                          * runqueues.
5659                          *
5660                          * Use power_of(), which is set irrespective of domains
5661                          * in update_cpu_power().
5662                          *
5663                          * This avoids power/power_orig from being 0 and
5664                          * causing divide-by-zero issues on boot.
5665                          *
5666                          * Runtime updates will correct power_orig.
5667                          */
5668                         if (unlikely(!rq->sd)) {
5669                                 power_orig += power_of(cpu);
5670                                 power += power_of(cpu);
5671                                 continue;
5672                         }
5673 
5674                         sgp = rq->sd->groups->sgp;
5675                         power_orig += sgp->power_orig;
5676                         power += sgp->power;
5677                 }
5678         } else  {
5679                 /*
5680                  * !SD_OVERLAP domains can assume that child groups
5681                  * span the current group.
5682                  */ 
5683 
5684                 group = child->groups;
5685                 do {
5686                         power_orig += group->sgp->power_orig;
5687                         power += group->sgp->power;
5688                         group = group->next;
5689                 } while (group != child->groups);
5690         }
5691 
5692         sdg->sgp->power_orig = power_orig;
5693         sdg->sgp->power = power;
5694 }
5695 
5696 /*
5697  * Try and fix up capacity for tiny siblings, this is needed when
5698  * things like SD_ASYM_PACKING need f_b_g to select another sibling
5699  * which on its own isn't powerful enough.
5700  *
5701  * See update_sd_pick_busiest() and check_asym_packing().
5702  */
5703 static inline int
5704 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5705 {
5706         /*
5707          * Only siblings can have significantly less than SCHED_POWER_SCALE
5708          */
5709         if (!(sd->flags & SD_SHARE_CPUPOWER))
5710                 return 0;
5711 
5712         /*
5713          * If ~90% of the cpu_power is still there, we're good.
5714          */
5715         if (group->sgp->power * 32 > group->sgp->power_orig * 29)
5716                 return 1;
5717 
5718         return 0;
5719 }
5720 
5721 /*
5722  * Group imbalance indicates (and tries to solve) the problem where balancing
5723  * groups is inadequate due to tsk_cpus_allowed() constraints.
5724  *
5725  * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
5726  * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
5727  * Something like:
5728  *
5729  *      { 0 1 2 3 } { 4 5 6 7 }
5730  *              *     * * *
5731  *
5732  * If we were to balance group-wise we'd place two tasks in the first group and
5733  * two tasks in the second group. Clearly this is undesired as it will overload
5734  * cpu 3 and leave one of the cpus in the second group unused.
5735  *
5736  * The current solution to this issue is detecting the skew in the first group
5737  * by noticing the lower domain failed to reach balance and had difficulty
5738  * moving tasks due to affinity constraints.
5739  *
5740  * When this is so detected; this group becomes a candidate for busiest; see
5741  * update_sd_pick_busiest(). And calculate_imbalance() and
5742  * find_busiest_group() avoid some of the usual balance conditions to allow it
5743  * to create an effective group imbalance.
5744  *
5745  * This is a somewhat tricky proposition since the next run might not find the
5746  * group imbalance and decide the groups need to be balanced again. A most
5747  * subtle and fragile situation.
5748  */
5749 
5750 static inline int sg_imbalanced(struct sched_group *group)
5751 {
5752         return group->sgp->imbalance;
5753 }
5754 
5755 /*
5756  * Compute the group capacity.
5757  *
5758  * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
5759  * first dividing out the smt factor and computing the actual number of cores
5760  * and limit power unit capacity with that.
5761  */
5762 static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
5763 {
5764         unsigned int capacity, smt, cpus;
5765         unsigned int power, power_orig;
5766 
5767         power = group->sgp->power;
5768         power_orig = group->sgp->power_orig;
5769         cpus = group->group_weight;
5770 
5771         /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
5772         smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
5773         capacity = cpus / smt; /* cores */
5774 
5775         capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
5776         if (!capacity)
5777                 capacity = fix_small_capacity(env->sd, group);
5778 
5779         return capacity;
5780 }
5781 
5782 /**
5783  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
5784  * @env: The load balancing environment.
5785  * @group: sched_group whose statistics are to be updated.
5786  * @load_idx: Load index of sched_domain of this_cpu for load calc.
5787  * @local_group: Does group contain this_cpu.
5788  * @sgs: variable to hold the statistics for this group.
5789  */
5790 static inline void update_sg_lb_stats(struct lb_env *env,
5791                         struct sched_group *group, int load_idx,
5792                         int local_group, struct sg_lb_stats *sgs)
5793 {
5794         unsigned long load;
5795         int i;
5796 
5797         memset(sgs, 0, sizeof(*sgs));
5798 
5799         for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
5800                 struct rq *rq = cpu_rq(i);
5801 
5802                 /* Bias balancing toward cpus of our domain */
5803                 if (local_group)
5804                         load = target_load(i, load_idx);
5805                 else
5806                         load = source_load(i, load_idx);
5807 
5808                 sgs->group_load += load;
5809                 sgs->sum_nr_running += rq->nr_running;
5810 #ifdef CONFIG_NUMA_BALANCING
5811                 sgs->nr_numa_running += rq->nr_numa_running;
5812                 sgs->nr_preferred_running += rq->nr_preferred_running;
5813 #endif
5814                 sgs->sum_weighted_load += weighted_cpuload(i);
5815                 if (idle_cpu(i))
5816                         sgs->idle_cpus++;
5817         }
5818 
5819         /* Adjust by relative CPU power of the group */
5820         sgs->group_power = group->sgp->power;
5821         sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
5822 
5823         if (sgs->sum_nr_running)
5824                 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
5825 
5826         sgs->group_weight = group->group_weight;
5827 
5828         sgs->group_imb = sg_imbalanced(group);
5829         sgs->group_capacity = sg_capacity(env, group);
5830 
5831         if (sgs->group_capacity > sgs->sum_nr_running)
5832                 sgs->group_has_capacity = 1;
5833 }
5834 
5835 /**
5836  * update_sd_pick_busiest - return 1 on busiest group
5837  * @env: The load balancing environment.
5838  * @sds: sched_domain statistics
5839  * @sg: sched_group candidate to be checked for being the busiest
5840  * @sgs: sched_group statistics
5841  *
5842  * Determine if @sg is a busier group than the previously selected
5843  * busiest group.
5844  *
5845  * Return: %true if @sg is a busier group than the previously selected
5846  * busiest group. %false otherwise.
5847  */
5848 static bool update_sd_pick_busiest(struct lb_env *env,
5849                                    struct sd_lb_stats *sds,
5850                                    struct sched_group *sg,
5851                                    struct sg_lb_stats *sgs)
5852 {
5853         if (sgs->avg_load <= sds->busiest_stat.avg_load)
5854                 return false;
5855 
5856         if (sgs->sum_nr_running > sgs->group_capacity)
5857                 return true;
5858 
5859         if (sgs->group_imb)
5860                 return true;
5861 
5862         /*
5863          * ASYM_PACKING needs to move all the work to the lowest
5864          * numbered CPUs in the group, therefore mark all groups
5865          * higher than ourself as busy.
5866          */
5867         if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
5868             env->dst_cpu < group_first_cpu(sg)) {
5869                 if (!sds->busiest)
5870                         return true;
5871 
5872                 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
5873                         return true;
5874         }
5875 
5876         return false;
5877 }
5878 
5879 #ifdef CONFIG_NUMA_BALANCING
5880 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5881 {
5882         if (sgs->sum_nr_running > sgs->nr_numa_running)
5883                 return regular;
5884         if (sgs->sum_nr_running > sgs->nr_preferred_running)
5885                 return remote;
5886         return all;
5887 }
5888 
5889 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5890 {
5891         if (rq->nr_running > rq->nr_numa_running)
5892                 return regular;
5893         if (rq->nr_running > rq->nr_preferred_running)
5894                 return remote;
5895         return all;
5896 }
5897 #else
5898 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5899 {
5900         return all;
5901 }
5902 
5903 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5904 {
5905         return regular;
5906 }
5907 #endif /* CONFIG_NUMA_BALANCING */
5908 
5909 /**
5910  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
5911  * @env: The load balancing environment.
5912  * @sds: variable to hold the statistics for this sched_domain.
5913  */
5914 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
5915 {
5916         struct sched_domain *child = env->sd->child;
5917         struct sched_group *sg = env->sd->groups;
5918         struct sg_lb_stats tmp_sgs;
5919         int load_idx, prefer_sibling = 0;
5920 
5921         if (child && child->flags & SD_PREFER_SIBLING)
5922                 prefer_sibling = 1;
5923 
5924         load_idx = get_sd_load_idx(env->sd, env->idle);
5925 
5926         do {
5927                 struct sg_lb_stats *sgs = &tmp_sgs;
5928                 int local_group;
5929 
5930                 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
5931                 if (local_group) {
5932                         sds->local = sg;
5933                         sgs = &sds->local_stat;
5934 
5935                         if (env->idle != CPU_NEWLY_IDLE ||
5936                             time_after_eq(jiffies, sg->sgp->next_update))
5937                                 update_group_power(env->sd, env->dst_cpu);
5938                 }
5939 
5940                 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
5941 
5942                 if (local_group)
5943                         goto next_group;
5944 
5945                 /*
5946                  * In case the child domain prefers tasks go to siblings
5947                  * first, lower the sg capacity to one so that we'll try
5948                  * and move all the excess tasks away. We lower the capacity
5949                  * of a group only if the local group has the capacity to fit
5950                  * these excess tasks, i.e. nr_running < group_capacity. The
5951                  * extra check prevents the case where you always pull from the
5952                  * heaviest group when it is already under-utilized (possible
5953                  * with a large weight task outweighs the tasks on the system).
5954                  */
5955                 if (prefer_sibling && sds->local &&
5956                     sds->local_stat.group_has_capacity)
5957                         sgs->group_capacity = min(sgs->group_capacity, 1U);
5958 
5959                 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
5960                         sds->busiest = sg;
5961                         sds->busiest_stat = *sgs;
5962                 }
5963 
5964 next_group:
5965                 /* Now, start updating sd_lb_stats */
5966                 sds->total_load += sgs->group_load;
5967                 sds->total_pwr += sgs->group_power;
5968 
5969                 sg = sg->next;
5970         } while (sg != env->sd->groups);
5971 
5972         if (env->sd->flags & SD_NUMA)
5973                 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
5974 }
5975 
5976 /**
5977  * check_asym_packing - Check to see if the group is packed into the
5978  *                      sched doman.
5979  *
5980  * This is primarily intended to used at the sibling level.  Some
5981  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
5982  * case of POWER7, it can move to lower SMT modes only when higher
5983  * threads are idle.  When in lower SMT modes, the threads will
5984  * perform better since they share less core resources.  Hence when we
5985  * have idle threads, we want them to be the higher ones.
5986  *
5987  * This packing function is run on idle threads.  It checks to see if
5988  * the busiest CPU in this domain (core in the P7 case) has a higher
5989  * CPU number than the packing function is being run on.  Here we are
5990  * assuming lower CPU number will be equivalent to lower a SMT thread
5991  * number.
5992  *
5993  * Return: 1 when packing is required and a task should be moved to
5994  * this CPU.  The amount of the imbalance is returned in *imbalance.
5995  *
5996  * @env: The load balancing environment.
5997  * @sds: Statistics of the sched_domain which is to be packed
5998  */
5999 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
6000 {
6001         int busiest_cpu;
6002 
6003         if (!(env->sd->flags & SD_ASYM_PACKING))
6004                 return 0;
6005 
6006         if (!sds->busiest)
6007                 return 0;
6008 
6009         busiest_cpu = group_first_cpu(sds->busiest);
6010         if (env->dst_cpu > busiest_cpu)
6011                 return 0;
6012 
6013         env->imbalance = DIV_ROUND_CLOSEST(
6014                 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
6015                 SCHED_POWER_SCALE);
6016 
6017         return 1;
6018 }
6019 
6020 /**
6021  * fix_small_imbalance - Calculate the minor imbalance that exists
6022  *                      amongst the groups of a sched_domain, during
6023  *                      load balancing.
6024  * @env: The load balancing environment.
6025  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
6026  */
6027 static inline
6028 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6029 {
6030         unsigned long tmp, pwr_now = 0, pwr_move = 0;
6031         unsigned int imbn = 2;
6032         unsigned long scaled_busy_load_per_task;
6033         struct sg_lb_stats *local, *busiest;
6034 
6035         local = &sds->local_stat;
6036         busiest = &sds->busiest_stat;
6037 
6038         if (!local->sum_nr_running)
6039                 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6040         else if (busiest->load_per_task > local->load_per_task)
6041                 imbn = 1;
6042 
6043         scaled_busy_load_per_task =
6044                 (busiest->load_per_task * SCHED_POWER_SCALE) /
6045                 busiest->group_power;
6046 
6047         if (busiest->avg_load + scaled_busy_load_per_task >=
6048             local->avg_load + (scaled_busy_load_per_task * imbn)) {
6049                 env->imbalance = busiest->load_per_task;
6050                 return;
6051         }
6052 
6053         /*
6054          * OK, we don't have enough imbalance to justify moving tasks,
6055          * however we may be able to increase total CPU power used by
6056          * moving them.
6057          */
6058 
6059         pwr_now += busiest->group_power *
6060                         min(busiest->load_per_task, busiest->avg_load);
6061         pwr_now += local->group_power *
6062                         min(local->load_per_task, local->avg_load);
6063         pwr_now /= SCHED_POWER_SCALE;
6064 
6065         /* Amount of load we'd subtract */
6066         if (busiest->avg_load > scaled_busy_load_per_task) {
6067                 pwr_move += busiest->group_power *
6068                             min(busiest->load_per_task,
6069                                 busiest->avg_load - scaled_busy_load_per_task);
6070         }
6071 
6072         /* Amount of load we'd add */
6073         if (busiest->avg_load * busiest->group_power <
6074             busiest->load_per_task * SCHED_POWER_SCALE) {
6075                 tmp = (busiest->avg_load * busiest->group_power) /
6076                       local->group_power;
6077         } else {
6078                 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
6079                       local->group_power;
6080         }
6081         pwr_move += local->group_power *
6082                     min(local->load_per_task, local->avg_load + tmp);
6083         pwr_move /= SCHED_POWER_SCALE;
6084 
6085         /* Move if we gain throughput */
6086         if (pwr_move > pwr_now)
6087                 env->imbalance = busiest->load_per_task;
6088 }
6089 
6090 /**
6091  * calculate_imbalance - Calculate the amount of imbalance present within the
6092  *                       groups of a given sched_domain during load balance.
6093  * @env: load balance environment
6094  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
6095  */
6096 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
6097 {
6098         unsigned long max_pull, load_above_capacity = ~0UL;
6099         struct sg_lb_stats *local, *busiest;
6100 
6101         local = &sds->local_stat;
6102         busiest = &sds->busiest_stat;
6103 
6104         if (busiest->group_imb) {
6105                 /*
6106                  * In the group_imb case we cannot rely on group-wide averages
6107                  * to ensure cpu-load equilibrium, look at wider averages. XXX
6108                  */
6109                 busiest->load_per_task =
6110                         min(busiest->load_per_task, sds->avg_load);
6111         }
6112 
6113         /*
6114          * In the presence of smp nice balancing, certain scenarios can have
6115          * max load less than avg load(as we skip the groups at or below
6116          * its cpu_power, while calculating max_load..)
6117          */
6118         if (busiest->avg_load <= sds->avg_load ||
6119             local->avg_load >= sds->avg_load) {
6120                 env->imbalance = 0;
6121                 return fix_small_imbalance(env, sds);
6122         }
6123 
6124         if (!busiest->group_imb) {
6125                 /*
6126                  * Don't want to pull so many tasks that a group would go idle.
6127                  * Except of course for the group_imb case, since then we might
6128                  * have to drop below capacity to reach cpu-load equilibrium.
6129                  */
6130                 load_above_capacity =
6131                         (busiest->sum_nr_running - busiest->group_capacity);
6132 
6133                 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
6134                 load_above_capacity /= busiest->group_power;
6135         }
6136 
6137         /*
6138          * We're trying to get all the cpus to the average_load, so we don't
6139          * want to push ourselves above the average load, nor do we wish to
6140          * reduce the max loaded cpu below the average load. At the same time,
6141          * we also don't want to reduce the group load below the group capacity
6142          * (so that we can implement power-savings policies etc). Thus we look
6143          * for the minimum possible imbalance.
6144          */
6145         max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
6146 
6147         /* How much load to actually move to equalise the imbalance */
6148         env->imbalance = min(
6149                 max_pull * busiest->group_power,
6150                 (sds->avg_load - local->avg_load) * local->group_power
6151         ) / SCHED_POWER_SCALE;
6152 
6153         /*
6154          * if *imbalance is less than the average load per runnable task
6155          * there is no guarantee that any tasks will be moved so we'll have
6156          * a think about bumping its value to force at least one task to be
6157          * moved
6158          */
6159         if (env->imbalance < busiest->load_per_task)
6160                 return fix_small_imbalance(env, sds);
6161 }
6162 
6163 /******* find_busiest_group() helpers end here *********************/
6164 
6165 /**
6166  * find_busiest_group - Returns the busiest group within the sched_domain
6167  * if there is an imbalance. If there isn't an imbalance, and
6168  * the user has opted for power-savings, it returns a group whose
6169  * CPUs can be put to idle by rebalancing those tasks elsewhere, if
6170  * such a group exists.
6171  *
6172  * Also calculates the amount of weighted load which should be moved
6173  * to restore balance.
6174  *
6175  * @env: The load balancing environment.
6176  *
6177  * Return:      - The busiest group if imbalance exists.
6178  *              - If no imbalance and user has opted for power-savings balance,
6179  *                 return the least loaded group whose CPUs can be
6180  *                 put to idle by rebalancing its tasks onto our group.
6181  */
6182 static struct sched_group *find_busiest_group(struct lb_env *env)
6183 {
6184         struct sg_lb_stats *local, *busiest;
6185         struct sd_lb_stats sds;
6186 
6187         init_sd_lb_stats(&sds);
6188 
6189         /*
6190          * Compute the various statistics relavent for load balancing at
6191          * this level.
6192          */
6193         update_sd_lb_stats(env, &sds);
6194         local = &sds.local_stat;
6195         busiest = &sds.busiest_stat;
6196 
6197         if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6198             check_asym_packing(env, &sds))
6199                 return sds.busiest;
6200 
6201         /* There is no busy sibling group to pull tasks from */
6202         if (!sds.busiest || busiest->sum_nr_running == 0)
6203                 goto out_balanced;
6204 
6205         sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
6206 
6207         /*
6208          * If the busiest group is imbalanced the below checks don't
6209          * work because they assume all things are equal, which typically
6210          * isn't true due to cpus_allowed constraints and the like.
6211          */
6212         if (busiest->group_imb)
6213                 goto force_balance;
6214 
6215         /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
6216         if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
6217             !busiest->group_has_capacity)
6218                 goto force_balance;
6219 
6220         /*
6221          * If the local group is more busy than the selected busiest group
6222          * don't try and pull any tasks.
6223          */
6224         if (local->avg_load >= busiest->avg_load)
6225                 goto out_balanced;
6226 
6227         /*
6228          * Don't pull any tasks if this group is already above the domain
6229          * average load.
6230          */
6231         if (local->avg_load >= sds.avg_load)
6232                 goto out_balanced;
6233 
6234         if (env->idle == CPU_IDLE) {
6235                 /*
6236                  * This cpu is idle. If the busiest group load doesn't
6237                  * have more tasks than the number of available cpu's and
6238                  * there is no imbalance between this and busiest group
6239                  * wrt to idle cpu's, it is balanced.
6240                  */
6241                 if ((local->idle_cpus < busiest->idle_cpus) &&
6242                     busiest->sum_nr_running <= busiest->group_weight)
6243                         goto out_balanced;
6244         } else {
6245                 /*
6246                  * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
6247                  * imbalance_pct to be conservative.
6248                  */
6249                 if (100 * busiest->avg_load <=
6250                                 env->sd->imbalance_pct * local->avg_load)
6251                         goto out_balanced;
6252         }
6253 
6254 force_balance:
6255         /* Looks like there is an imbalance. Compute it */
6256         calculate_imbalance(env, &sds);
6257         return sds.busiest;
6258 
6259 out_balanced:
6260         env->imbalance = 0;
6261         return NULL;
6262 }
6263 
6264 /*
6265  * find_busiest_queue - find the busiest runqueue among the cpus in group.
6266  */
6267 static struct rq *find_busiest_queue(struct lb_env *env,
6268                                      struct sched_group *group)
6269 {
6270         struct rq *busiest = NULL, *rq;
6271         unsigned long busiest_load = 0, busiest_power = 1;
6272         int i;
6273 
6274         for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
6275                 unsigned long power, capacity, wl;
6276                 enum fbq_type rt;
6277 
6278                 rq = cpu_rq(i);
6279                 rt = fbq_classify_rq(rq);
6280 
6281                 /*
6282                  * We classify groups/runqueues into three groups:
6283                  *  - regular: there are !numa tasks
6284                  *  - remote:  there are numa tasks that run on the 'wrong' node
6285                  *  - all:     there is no distinction
6286                  *
6287                  * In order to avoid migrating ideally placed numa tasks,
6288                  * ignore those when there's better options.
6289                  *
6290                  * If we ignore the actual busiest queue to migrate another
6291                  * task, the next balance pass can still reduce the busiest
6292                  * queue by moving tasks around inside the node.
6293                  *
6294                  * If we cannot move enough load due to this classification
6295                  * the next pass will adjust the group classification and
6296                  * allow migration of more tasks.
6297                  *
6298                  * Both cases only affect the total convergence complexity.
6299                  */
6300                 if (rt > env->fbq_type)
6301                         continue;
6302 
6303                 power = power_of(i);
6304                 capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
6305                 if (!capacity)
6306                         capacity = fix_small_capacity(env->sd, group);
6307 
6308                 wl = weighted_cpuload(i);
6309 
6310                 /*
6311                  * When comparing with imbalance, use weighted_cpuload()
6312                  * which is not scaled with the cpu power.
6313                  */
6314                 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
6315                         continue;
6316 
6317                 /*
6318                  * For the load comparisons with the other cpu's, consider
6319                  * the weighted_cpuload() scaled with the cpu power, so that
6320                  * the load can be moved away from the cpu that is potentially
6321                  * running at a lower capacity.
6322                  *
6323                  * Thus we're looking for max(wl_i / power_i), crosswise
6324                  * multiplication to rid ourselves of the division works out
6325                  * to: wl_i * power_j > wl_j * power_i;  where j is our
6326                  * previous maximum.
6327                  */
6328                 if (wl * busiest_power > busiest_load * power) {
6329                         busiest_load = wl;
6330                         busiest_power = power;
6331                         busiest = rq;
6332                 }
6333         }
6334 
6335         return busiest;
6336 }
6337 
6338 /*
6339  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6340  * so long as it is large enough.
6341  */
6342 #define MAX_PINNED_INTERVAL     512
6343 
6344 /* Working cpumask for load_balance and load_balance_newidle. */
6345 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
6346 
6347 static int need_active_balance(struct lb_env *env)
6348 {
6349         struct sched_domain *sd = env->sd;
6350 
6351         if (env->idle == CPU_NEWLY_IDLE) {
6352 
6353                 /*
6354                  * ASYM_PACKING needs to force migrate tasks from busy but
6355                  * higher numbered CPUs in order to pack all tasks in the
6356                  * lowest numbered CPUs.
6357                  */
6358                 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
6359                         return 1;
6360         }
6361 
6362         return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6363 }
6364 
6365 static int active_load_balance_cpu_stop(void *data);
6366 
6367 static int should_we_balance(struct lb_env *env)
6368 {
6369         struct sched_group *sg = env->sd->groups;
6370         struct cpumask *sg_cpus, *sg_mask;
6371         int cpu, balance_cpu = -1;
6372 
6373         /*
6374          * In the newly idle case, we will allow all the cpu's
6375          * to do the newly idle load balance.
6376          */
6377         if (env->idle == CPU_NEWLY_IDLE)
6378                 return 1;
6379 
6380         sg_cpus = sched_group_cpus(sg);
6381         sg_mask = sched_group_mask(sg);
6382         /* Try to find first idle cpu */
6383         for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6384                 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6385                         continue;
6386 
6387                 balance_cpu = cpu;
6388                 break;
6389         }
6390 
6391         if (balance_cpu == -1)
6392                 balance_cpu = group_balance_cpu(sg);
6393 
6394         /*
6395          * First idle cpu or the first cpu(busiest) in this sched group
6396          * is eligible for doing load balancing at this and above domains.
6397          */
6398         return balance_cpu == env->dst_cpu;
6399 }
6400 
6401 /*
6402  * Check this_cpu to ensure it is balanced within domain. Attempt to move
6403  * tasks if there is an imbalance.
6404  */
6405 static int load_balance(int this_cpu, struct rq *this_rq,
6406                         struct sched_domain *sd, enum cpu_idle_type idle,
6407                         int *continue_balancing)
6408 {
6409         int ld_moved, cur_ld_moved, active_balance = 0;
6410         struct sched_domain *sd_parent = sd->parent;
6411         struct sched_group *group;
6412         struct rq *busiest;
6413         unsigned long flags;
6414         struct cpumask *cpus = __get_cpu_var(load_balance_mask);
6415 
6416         struct lb_env env = {
6417                 .sd             = sd,
6418                 .dst_cpu        = this_cpu,
6419                 .dst_rq         = this_rq,
6420                 .dst_grpmask    = sched_group_cpus(sd->groups),
6421                 .idle           = idle,
6422                 .loop_break     = sched_nr_migrate_break,
6423                 .cpus           = cpus,
6424                 .fbq_type       = all,
6425         };
6426 
6427         /*
6428          * For NEWLY_IDLE load_balancing, we don't need to consider
6429          * other cpus in our group
6430          */
6431         if (idle == CPU_NEWLY_IDLE)
6432                 env.dst_grpmask = NULL;
6433 
6434         cpumask_copy(cpus, cpu_active_mask);
6435 
6436         schedstat_inc(sd, lb_count[idle]);
6437 
6438 redo:
6439         if (!should_we_balance(&env)) {
6440                 *continue_balancing = 0;
6441                 goto out_balanced;
6442         }
6443 
6444         group = find_busiest_group(&env);
6445         if (!group) {
6446                 schedstat_inc(sd, lb_nobusyg[idle]);
6447                 goto out_balanced;
6448         }
6449 
6450         busiest = find_busiest_queue(&env, group);
6451         if (!busiest) {
6452                 schedstat_inc(sd, lb_nobusyq[idle]);
6453                 goto out_balanced;
6454         }
6455 
6456         BUG_ON(busiest == env.dst_rq);
6457 
6458         schedstat_add(sd, lb_imbalance[idle], env.imbalance);
6459 
6460         ld_moved = 0;
6461         if (busiest->nr_running > 1) {
6462                 /*
6463                  * Attempt to move tasks. If find_busiest_group has found
6464                  * an imbalance but busiest->nr_running <= 1, the group is
6465                  * still unbalanced. ld_moved simply stays zero, so it is
6466                  * correctly treated as an imbalance.
6467                  */
6468                 env.flags |= LBF_ALL_PINNED;
6469                 env.src_cpu   = busiest->cpu;
6470                 env.src_rq    = busiest;
6471                 env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
6472 
6473 more_balance:
6474                 local_irq_save(flags);
6475                 double_rq_lock(env.dst_rq, busiest);
6476 
6477                 /*
6478                  * cur_ld_moved - load moved in current iteration
6479                  * ld_moved     - cumulative load moved across iterations
6480                  */
6481                 cur_ld_moved = move_tasks(&env);
6482                 ld_moved += cur_ld_moved;
6483                 double_rq_unlock(env.dst_rq, busiest);
6484                 local_irq_restore(flags);
6485 
6486                 /*
6487                  * some other cpu did the load balance for us.
6488                  */
6489                 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
6490                         resched_cpu(env.dst_cpu);
6491 
6492                 if (env.flags & LBF_NEED_BREAK) {
6493                         env.flags &= ~LBF_NEED_BREAK;
6494                         goto more_balance;
6495                 }
6496 
6497                 /*
6498                  * Revisit (affine) tasks on src_cpu that couldn't be moved to
6499                  * us and move them to an alternate dst_cpu in our sched_group
6500                  * where they can run. The upper limit on how many times we
6501                  * iterate on same src_cpu is dependent on number of cpus in our
6502                  * sched_group.
6503                  *
6504                  * This changes load balance semantics a bit on who can move
6505                  * load to a given_cpu. In addition to the given_cpu itself
6506                  * (or a ilb_cpu acting on its behalf where given_cpu is
6507                  * nohz-idle), we now have balance_cpu in a position to move
6508                  * load to given_cpu. In rare situations, this may cause
6509                  * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
6510                  * _independently_ and at _same_ time to move some load to
6511                  * given_cpu) causing exceess load to be moved to given_cpu.
6512                  * This however should not happen so much in practice and
6513                  * moreover subsequent load balance cycles should correct the
6514                  * excess load moved.
6515                  */
6516                 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
6517 
6518                         /* Prevent to re-select dst_cpu via env's cpus */
6519                         cpumask_clear_cpu(env.dst_cpu, env.cpus);
6520 
6521                         env.dst_rq       = cpu_rq(env.new_dst_cpu);
6522                         env.dst_cpu      = env.new_dst_cpu;
6523                         env.flags       &= ~LBF_DST_PINNED;
6524                         env.loop         = 0;
6525                         env.loop_break   = sched_nr_migrate_break;
6526 
6527                         /*
6528                          * Go back to "more_balance" rather than "redo" since we
6529                          * need to continue with same src_cpu.
6530                          */
6531                         goto more_balance;
6532                 }
6533 
6534                 /*
6535                  * We failed to reach balance because of affinity.
6536                  */
6537                 if (sd_parent) {
6538                         int *group_imbalance = &sd_parent->groups->sgp->imbalance;
6539 
6540                         if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
6541                                 *group_imbalance = 1;
6542                         } else if (*group_imbalance)
6543                                 *group_imbalance = 0;
6544                 }
6545 
6546                 /* All tasks on this runqueue were pinned by CPU affinity */
6547                 if (unlikely(env.flags & LBF_ALL_PINNED)) {
6548                         cpumask_clear_cpu(cpu_of(busiest), cpus);
6549                         if (!cpumask_empty(cpus)) {
6550                                 env.loop = 0;
6551                                 env.loop_break = sched_nr_migrate_break;
6552                                 goto redo;
6553                         }
6554                         goto out_balanced;
6555                 }
6556         }
6557 
6558         if (!ld_moved) {
6559                 schedstat_inc(sd, lb_failed[idle]);
6560                 /*
6561                  * Increment the failure counter only on periodic balance.
6562                  * We do not want newidle balance, which can be very
6563                  * frequent, pollute the failure counter causing
6564                  * excessive cache_hot migrations and active balances.
6565                  */
6566                 if (idle != CPU_NEWLY_IDLE)
6567                         sd->nr_balance_failed++;
6568 
6569                 if (need_active_balance(&env)) {
6570                         raw_spin_lock_irqsave(&busiest->lock, flags);
6571 
6572                         /* don't kick the active_load_balance_cpu_stop,
6573                          * if the curr task on busiest cpu can't be
6574                          * moved to this_cpu
6575                          */
6576                         if (!cpumask_test_cpu(this_cpu,
6577                                         tsk_cpus_allowed(busiest->curr))) {
6578                                 raw_spin_unlock_irqrestore(&busiest->lock,
6579                                                             flags);
6580                                 env.flags |= LBF_ALL_PINNED;
6581                                 goto out_one_pinned;
6582                         }
6583 
6584                         /*
6585                          * ->active_balance synchronizes accesses to
6586                          * ->active_balance_work.  Once set, it's cleared
6587                          * only after active load balance is finished.
6588                          */
6589                         if (!busiest->active_balance) {
6590                                 busiest->active_balance = 1;
6591                                 busiest->push_cpu = this_cpu;
6592                                 active_balance = 1;
6593                         }
6594                         raw_spin_unlock_irqrestore(&busiest->lock, flags);
6595 
6596                         if (active_balance) {
6597                                 stop_one_cpu_nowait(cpu_of(busiest),
6598                                         active_load_balance_cpu_stop, busiest,
6599                                         &busiest->active_balance_work);
6600                         }
6601 
6602                         /*
6603                          * We've kicked active balancing, reset the failure
6604                          * counter.
6605                          */
6606                         sd->nr_balance_failed = sd->cache_nice_tries+1;
6607                 }
6608         } else
6609                 sd->nr_balance_failed = 0;
6610 
6611         if (likely(!active_balance)) {
6612                 /* We were unbalanced, so reset the balancing interval */
6613                 sd->balance_interval = sd->min_interval;
6614         } else {
6615                 /*
6616                  * If we've begun active balancing, start to back off. This
6617                  * case may not be covered by the all_pinned logic if there
6618                  * is only 1 task on the busy runqueue (because we don't call
6619                  * move_tasks).
6620                  */
6621                 if (sd->balance_interval < sd->max_interval)
6622                         sd->balance_interval *= 2;
6623         }
6624 
6625         goto out;
6626 
6627 out_balanced:
6628         schedstat_inc(sd, lb_balanced[idle]);
6629 
6630         sd->nr_balance_failed = 0;
6631 
6632 out_one_pinned:
6633         /* tune up the balancing interval */
6634         if (((env.flags & LBF_ALL_PINNED) &&
6635                         sd->balance_interval < MAX_PINNED_INTERVAL) ||
6636                         (sd->balance_interval < sd->max_interval))
6637                 sd->balance_interval *= 2;
6638 
6639         ld_moved = 0;
6640 out:
6641         return ld_moved;
6642 }
6643 
6644 /*
6645  * idle_balance is called by schedule() if this_cpu is about to become
6646  * idle. Attempts to pull tasks from other CPUs.
6647  */
6648 static int idle_balance(struct rq *this_rq)
6649 {
6650         struct sched_domain *sd;
6651         int pulled_task = 0;
6652         unsigned long next_balance = jiffies + HZ;
6653         u64 curr_cost = 0;
6654         int this_cpu = this_rq->cpu;
6655 
6656         idle_enter_fair(this_rq);
6657 
6658         /*
6659          * We must set idle_stamp _before_ calling idle_balance(), such that we
6660          * measure the duration of idle_balance() as idle time.
6661          */
6662         this_rq->idle_stamp = rq_clock(this_rq);
6663 
6664         if (this_rq->avg_idle < sysctl_sched_migration_cost)
6665                 goto out;
6666 
6667         /*
6668          * Drop the rq->lock, but keep IRQ/preempt disabled.
6669          */
6670         raw_spin_unlock(&this_rq->lock);
6671 
6672         update_blocked_averages(this_cpu);
6673         rcu_read_lock();
6674         for_each_domain(this_cpu, sd) {
6675                 unsigned long interval;
6676                 int continue_balancing = 1;
6677                 u64 t0, domain_cost;
6678 
6679                 if (!(sd->flags & SD_LOAD_BALANCE))
6680                         continue;
6681 
6682                 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
6683                         break;
6684 
6685                 if (sd->flags & SD_BALANCE_NEWIDLE) {
6686                         t0 = sched_clock_cpu(this_cpu);
6687 
6688                         /* If we've pulled tasks over stop searching: */
6689                         pulled_task = load_balance(this_cpu, this_rq,
6690                                                    sd, CPU_NEWLY_IDLE,
6691                                                    &continue_balancing);
6692 
6693                         domain_cost = sched_clock_cpu(this_cpu) - t0;
6694                         if (domain_cost > sd->max_newidle_lb_cost)
6695                                 sd->max_newidle_lb_cost = domain_cost;
6696 
6697                         curr_cost += domain_cost;
6698                 }
6699 
6700                 interval = msecs_to_jiffies(sd->balance_interval);
6701                 if (time_after(next_balance, sd->last_balance + interval))
6702                         next_balance = sd->last_balance + interval;
6703                 if (pulled_task)
6704                         break;
6705         }
6706         rcu_read_unlock();
6707 
6708         raw_spin_lock(&this_rq->lock);
6709 
6710         if (curr_cost > this_rq->max_idle_balance_cost)
6711                 this_rq->max_idle_balance_cost = curr_cost;
6712 
6713         /*
6714          * While browsing the domains, we released the rq lock, a task could
6715          * have been enqueued in the meantime. Since we're not going idle,
6716          * pretend we pulled a task.
6717          */
6718         if (this_rq->cfs.h_nr_running && !pulled_task)
6719                 pulled_task = 1;
6720 
6721         if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6722                 /*
6723                  * We are going idle. next_balance may be set based on
6724                  * a busy processor. So reset next_balance.
6725                  */
6726                 this_rq->next_balance = next_balance;
6727         }
6728 
6729 out:
6730         /* Is there a task of a high priority class? */
6731         if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
6732             ((this_rq->stop && this_rq->stop->on_rq) ||
6733              this_rq->dl.dl_nr_running ||
6734              (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
6735                 pulled_task = -1;
6736 
6737         if (pulled_task) {
6738                 idle_exit_fair(this_rq);
6739                 this_rq->idle_stamp = 0;
6740         }
6741 
6742         return pulled_task;
6743 }
6744 
6745 /*
6746  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
6747  * running tasks off the busiest CPU onto idle CPUs. It requires at
6748  * least 1 task to be running on each physical CPU where possible, and
6749  * avoids physical / logical imbalances.
6750  */
6751 static int active_load_balance_cpu_stop(void *data)
6752 {
6753         struct rq *busiest_rq = data;
6754         int busiest_cpu = cpu_of(busiest_rq);
6755         int target_cpu = busiest_rq->push_cpu;
6756         struct rq *target_rq = cpu_rq(target_cpu);
6757         struct sched_domain *sd;
6758 
6759         raw_spin_lock_irq(&busiest_rq->lock);
6760 
6761         /* make sure the requested cpu hasn't gone down in the meantime */
6762         if (unlikely(busiest_cpu != smp_processor_id() ||
6763                      !busiest_rq->active_balance))
6764                 goto out_unlock;
6765 
6766         /* Is there any task to move? */
6767         if (busiest_rq->nr_running <= 1)
6768                 goto out_unlock;
6769 
6770         /*
6771          * This condition is "impossible", if it occurs
6772          * we need to fix it. Originally reported by
6773          * Bjorn Helgaas on a 128-cpu setup.
6774          */
6775         BUG_ON(busiest_rq == target_rq);
6776 
6777         /* move a task from busiest_rq to target_rq */
6778         double_lock_balance(busiest_rq, target_rq);
6779 
6780         /* Search for an sd spanning us and the target CPU. */
6781         rcu_read_lock();
6782         for_each_domain(target_cpu, sd) {
6783                 if ((sd->flags & SD_LOAD_BALANCE) &&
6784                     cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
6785                                 break;
6786         }
6787 
6788         if (likely(sd)) {
6789                 struct lb_env env = {
6790                         .sd             = sd,
6791                         .dst_cpu        = target_cpu,
6792                         .dst_rq         = target_rq,
6793                         .src_cpu        = busiest_rq->cpu,
6794                         .src_rq         = busiest_rq,
6795                         .idle           = CPU_IDLE,
6796                 };
6797 
6798                 schedstat_inc(sd, alb_count);
6799 
6800                 if (move_one_task(&env))
6801                         schedstat_inc(sd, alb_pushed);
6802                 else
6803                         schedstat_inc(sd, alb_failed);
6804         }
6805         rcu_read_unlock();
6806         double_unlock_balance(busiest_rq, target_rq);
6807 out_unlock:
6808         busiest_rq->active_balance = 0;
6809         raw_spin_unlock_irq(&busiest_rq->lock);
6810         return 0;
6811 }
6812 
6813 static inline int on_null_domain(struct rq *rq)
6814 {
6815         return unlikely(!rcu_dereference_sched(rq->sd));
6816 }
6817 
6818 #ifdef CONFIG_NO_HZ_COMMON
6819 /*
6820  * idle load balancing details
6821  * - When one of the busy CPUs notice that there may be an idle rebalancing
6822  *   needed, they will kick the idle load balancer, which then does idle
6823  *   load balancing for all the idle CPUs.
6824  */
6825 static struct {
6826         cpumask_var_t idle_cpus_mask;
6827         atomic_t nr_cpus;
6828         unsigned long next_balance;     /* in jiffy units */
6829 } nohz ____cacheline_aligned;
6830 
6831 static inline int find_new_ilb(void)
6832 {
6833         int ilb = cpumask_first(nohz.idle_cpus_mask);
6834 
6835         if (ilb < nr_cpu_ids && idle_cpu(ilb))
6836                 return ilb;
6837 
6838         return nr_cpu_ids;
6839 }
6840 
6841 /*
6842  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
6843  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
6844  * CPU (if there is one).
6845  */
6846 static void nohz_balancer_kick(void)
6847 {
6848         int ilb_cpu;
6849 
6850         nohz.next_balance++;
6851 
6852         ilb_cpu = find_new_ilb();
6853 
6854         if (ilb_cpu >= nr_cpu_ids)
6855                 return;
6856 
6857         if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
6858                 return;
6859         /*
6860          * Use smp_send_reschedule() instead of resched_cpu().
6861          * This way we generate a sched IPI on the target cpu which
6862          * is idle. And the softirq performing nohz idle load balance
6863          * will be run before returning from the IPI.
6864          */
6865         smp_send_reschedule(ilb_cpu);
6866         return;
6867 }
6868 
6869 static inline void nohz_balance_exit_idle(int cpu)
6870 {
6871         if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
6872                 /*
6873                  * Completely isolated CPUs don't ever set, so we must test.
6874                  */
6875                 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
6876                         cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
6877                         atomic_dec(&nohz.nr_cpus);
6878                 }
6879                 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6880         }
6881 }
6882 
6883 static inline void set_cpu_sd_state_busy(void)
6884 {
6885         struct sched_domain *sd;
6886         int cpu = smp_processor_id();
6887 
6888         rcu_read_lock();
6889         sd = rcu_dereference(per_cpu(sd_busy, cpu));
6890 
6891         if (!sd || !sd->nohz_idle)
6892                 goto unlock;
6893         sd->nohz_idle = 0;
6894 
6895         atomic_inc(&sd->groups->sgp->nr_busy_cpus);
6896 unlock:
6897         rcu_read_unlock();
6898 }
6899 
6900 void set_cpu_sd_state_idle(void)
6901 {
6902         struct sched_domain *sd;
6903         int cpu = smp_processor_id();
6904 
6905         rcu_read_lock();
6906         sd = rcu_dereference(per_cpu(sd_busy, cpu));
6907 
6908         if (!sd || sd->nohz_idle)
6909                 goto unlock;
6910         sd->nohz_idle = 1;
6911 
6912         atomic_dec(&sd->groups->sgp->nr_busy_cpus);
6913 unlock:
6914         rcu_read_unlock();
6915 }
6916 
6917 /*
6918  * This routine will record that the cpu is going idle with tick stopped.
6919  * This info will be used in performing idle load balancing in the future.
6920  */
6921 void nohz_balance_enter_idle(int cpu)
6922 {
6923         /*
6924          * If this cpu is going down, then nothing needs to be done.
6925          */
6926         if (!cpu_active(cpu))
6927                 return;
6928 
6929         if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
6930                 return;
6931 
6932         /*
6933          * If we're a completely isolated CPU, we don't play.
6934          */
6935         if (on_null_domain(cpu_rq(cpu)))
6936                 return;
6937 
6938         cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
6939         atomic_inc(&nohz.nr_cpus);
6940         set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6941 }
6942 
6943 static int sched_ilb_notifier(struct notifier_block *nfb,
6944                                         unsigned long action, void *hcpu)
6945 {
6946         switch (action & ~CPU_TASKS_FROZEN) {
6947         case CPU_DYING:
6948                 nohz_balance_exit_idle(smp_processor_id());
6949                 return NOTIFY_OK;
6950         default:
6951                 return NOTIFY_DONE;
6952         }
6953 }
6954 #endif
6955 
6956 static DEFINE_SPINLOCK(balancing);
6957 
6958 /*
6959  * Scale the max load_balance interval with the number of CPUs in the system.
6960  * This trades load-balance latency on larger machines for less cross talk.
6961  */
6962 void update_max_interval(void)
6963 {
6964         max_load_balance_interval = HZ*num_online_cpus()/10;
6965 }
6966 
6967 /*
6968  * It checks each scheduling domain to see if it is due to be balanced,
6969  * and initiates a balancing operation if so.
6970  *
6971  * Balancing parameters are set up in init_sched_domains.
6972  */
6973 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
6974 {
6975         int continue_balancing = 1;
6976         int cpu = rq->cpu;
6977         unsigned long interval;
6978         struct sched_domain *sd;
6979         /* Earliest time when we have to do rebalance again */
6980         unsigned long next_balance = jiffies + 60*HZ;
6981         int update_next_balance = 0;
6982         int need_serialize, need_decay = 0;
6983         u64 max_cost = 0;
6984 
6985         update_blocked_averages(cpu);
6986 
6987         rcu_read_lock();
6988         for_each_domain(cpu, sd) {
6989                 /*
6990                  * Decay the newidle max times here because this is a regular
6991                  * visit to all the domains. Decay ~1% per second.
6992                  */
6993                 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
6994                         sd->max_newidle_lb_cost =
6995                                 (sd->max_newidle_lb_cost * 253) / 256;
6996                         sd->next_decay_max_lb_cost = jiffies + HZ;
6997                         need_decay = 1;
6998                 }
6999                 max_cost += sd->max_newidle_lb_cost;
7000 
7001                 if (!(sd->flags & SD_LOAD_BALANCE))
7002                         continue;
7003 
7004                 /*
7005                  * Stop the load balance at this level. There is another
7006                  * CPU in our sched group which is doing load balancing more
7007                  * actively.
7008                  */
7009                 if (!continue_balancing) {
7010                         if (need_decay)
7011                                 continue;
7012                         break;
7013                 }
7014 
7015                 interval = sd->balance_interval;
7016                 if (idle != CPU_IDLE)
7017                         interval *= sd->busy_factor;
7018 
7019                 /* scale ms to jiffies */
7020                 interval = msecs_to_jiffies(interval);
7021                 interval = clamp(interval, 1UL, max_load_balance_interval);
7022 
7023                 need_serialize = sd->flags & SD_SERIALIZE;
7024 
7025                 if (need_serialize) {
7026                         if (!spin_trylock(&balancing))
7027                                 goto out;
7028                 }
7029 
7030                 if (time_after_eq(jiffies, sd->last_balance + interval)) {
7031                         if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
7032                                 /*
7033                                  * The LBF_DST_PINNED logic could have changed
7034                                  * env->dst_cpu, so we can't know our idle
7035                                  * state even if we migrated tasks. Update it.
7036                                  */
7037                                 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
7038                         }
7039                         sd->last_balance = jiffies;
7040                 }
7041                 if (need_serialize)
7042                         spin_unlock(&balancing);
7043 out:
7044                 if (time_after(next_balance, sd->last_balance + interval)) {
7045                         next_balance = sd->last_balance + interval;
7046                         update_next_balance = 1;
7047                 }
7048         }
7049         if (need_decay) {
7050                 /*
7051                  * Ensure the rq-wide value also decays but keep it at a
7052                  * reasonable floor to avoid funnies with rq->avg_idle.
7053                  */
7054                 rq->max_idle_balance_cost =
7055                         max((u64)sysctl_sched_migration_cost, max_cost);
7056         }
7057         rcu_read_unlock();
7058 
7059         /*
7060          * next_balance will be updated only when there is a need.
7061          * When the cpu is attached to null domain for ex, it will not be
7062          * updated.
7063          */
7064         if (likely(update_next_balance))
7065                 rq->next_balance = next_balance;
7066 }
7067 
7068 #ifdef CONFIG_NO_HZ_COMMON
7069 /*
7070  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
7071  * rebalancing for all the cpus for whom scheduler ticks are stopped.
7072  */
7073 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
7074 {
7075         int this_cpu = this_rq->cpu;
7076         struct rq *rq;
7077         int balance_cpu;
7078 
7079         if (idle != CPU_IDLE ||
7080             !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7081                 goto end;
7082 
7083         for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
7084                 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
7085                         continue;
7086 
7087                 /*
7088                  * If this cpu gets work to do, stop the load balancing
7089                  * work being done for other cpus. Next load
7090                  * balancing owner will pick it up.
7091                  */
7092                 if (need_resched())
7093                         break;
7094 
7095                 rq = cpu_rq(balance_cpu);
7096 
7097                 raw_spin_lock_irq(&rq->lock);
7098                 update_rq_clock(rq);
7099                 update_idle_cpu_load(rq);
7100                 raw_spin_unlock_irq(&rq->lock);
7101 
7102                 rebalance_domains(rq, CPU_IDLE);
7103 
7104                 if (time_after(this_rq->next_balance, rq->next_balance))
7105                         this_rq->next_balance = rq->next_balance;
7106         }
7107         nohz.next_balance = this_rq->next_balance;
7108 end:
7109         clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
7110 }
7111 
7112 /*
7113  * Current heuristic for kicking the idle load balancer in the presence
7114  * of an idle cpu is the system.
7115  *   - This rq has more than one task.
7116  *   - At any scheduler domain level, this cpu's scheduler group has multiple
7117  *     busy cpu's exceeding the group's power.
7118  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
7119  *     domain span are idle.
7120  */
7121 static inline int nohz_kick_needed(struct rq *rq)
7122 {
7123         unsigned long now = jiffies;
7124         struct sched_domain *sd;
7125         struct sched_group_power *sgp;
7126         int nr_busy, cpu = rq->cpu;
7127 
7128         if (unlikely(rq->idle_balance))
7129                 return 0;
7130 
7131        /*
7132         * We may be recently in ticked or tickless idle mode. At the first
7133         * busy tick after returning from idle, we will update the busy stats.
7134         */
7135         set_cpu_sd_state_busy();
7136         nohz_balance_exit_idle(cpu);
7137 
7138         /*
7139          * None are in tickless mode and hence no need for NOHZ idle load
7140          * balancing.
7141          */
7142         if (likely(!atomic_read(&nohz.nr_cpus)))
7143                 return 0;
7144 
7145         if (time_before(now, nohz.next_balance))
7146                 return 0;
7147 
7148         if (rq->nr_running >= 2)
7149                 goto need_kick;
7150 
7151         rcu_read_lock();
7152         sd = rcu_dereference(per_cpu(sd_busy, cpu));
7153 
7154         if (sd) {
7155                 sgp = sd->groups->sgp;
7156                 nr_busy = atomic_read(&sgp->nr_busy_cpus);
7157 
7158                 if (nr_busy > 1)
7159                         goto need_kick_unlock;
7160         }
7161 
7162         sd = rcu_dereference(per_cpu(sd_asym, cpu));
7163 
7164         if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
7165                                   sched_domain_span(sd)) < cpu))
7166                 goto need_kick_unlock;
7167 
7168         rcu_read_unlock();
7169         return 0;
7170 
7171 need_kick_unlock:
7172         rcu_read_unlock();
7173 need_kick:
7174         return 1;
7175 }
7176 #else
7177 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
7178 #endif
7179 
7180 /*
7181  * run_rebalance_domains is triggered when needed from the scheduler tick.
7182  * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
7183  */
7184 static void run_rebalance_domains(struct softirq_action *h)
7185 {
7186         struct rq *this_rq = this_rq();
7187         enum cpu_idle_type idle = this_rq->idle_balance ?
7188                                                 CPU_IDLE : CPU_NOT_IDLE;
7189 
7190         rebalance_domains(this_rq, idle);
7191 
7192         /*
7193          * If this cpu has a pending nohz_balance_kick, then do the
7194          * balancing on behalf of the other idle cpus whose ticks are
7195          * stopped.
7196          */
7197         nohz_idle_balance(this_rq, idle);
7198 }
7199 
7200 /*
7201  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
7202  */
7203 void trigger_load_balance(struct rq *rq)
7204 {
7205         /* Don't need to rebalance while attached to NULL domain */
7206         if (unlikely(on_null_domain(rq)))
7207                 return;
7208 
7209         if (time_after_eq(jiffies, rq->next_balance))
7210                 raise_softirq(SCHED_SOFTIRQ);
7211 #ifdef CONFIG_NO_HZ_COMMON
7212         if (nohz_kick_needed(rq))
7213                 nohz_balancer_kick();
7214 #endif
7215 }
7216 
7217 static void rq_online_fair(struct rq *rq)
7218 {
7219         update_sysctl();
7220 }
7221 
7222 static void rq_offline_fair(struct rq *rq)
7223 {
7224         update_sysctl();
7225 
7226         /* Ensure any throttled groups are reachable by pick_next_task */
7227         unthrottle_offline_cfs_rqs(rq);
7228 }
7229 
7230 #endif /* CONFIG_SMP */
7231 
7232 /*
7233  * scheduler tick hitting a task of our scheduling class:
7234  */
7235 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
7236 {
7237         struct cfs_rq *cfs_rq;
7238         struct sched_entity *se = &curr->se;
7239 
7240         for_each_sched_entity(se) {
7241                 cfs_rq = cfs_rq_of(se);
7242                 entity_tick(cfs_rq, se, queued);
7243         }
7244 
7245         if (numabalancing_enabled)
7246                 task_tick_numa(rq, curr);
7247 
7248         update_rq_runnable_avg(rq, 1);
7249 }
7250 
7251 /*
7252  * called on fork with the child task as argument from the parent's context
7253  *  - child not yet on the tasklist
7254  *  - preemption disabled
7255  */
7256 static void task_fork_fair(struct task_struct *p)
7257 {
7258         struct cfs_rq *cfs_rq;
7259         struct sched_entity *se = &p->se, *curr;
7260         int this_cpu = smp_processor_id();
7261         struct rq *rq = this_rq();
7262         unsigned long flags;
7263 
7264         raw_spin_lock_irqsave(&rq->lock, flags);
7265 
7266         update_rq_clock(rq);
7267 
7268         cfs_rq = task_cfs_rq(current);
7269         curr = cfs_rq->curr;
7270 
7271         /*
7272          * Not only the cpu but also the task_group of the parent might have
7273          * been changed after parent->se.parent,cfs_rq were copied to
7274          * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
7275          * of child point to valid ones.
7276          */
7277         rcu_read_lock();
7278         __set_task_cpu(p, this_cpu);
7279         rcu_read_unlock();
7280 
7281         update_curr(cfs_rq);
7282 
7283         if (curr)
7284                 se->vruntime = curr->vruntime;
7285         place_entity(cfs_rq, se, 1);
7286 
7287         if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
7288                 /*
7289                  * Upon rescheduling, sched_class::put_prev_task() will place
7290                  * 'current' within the tree based on its new key value.
7291                  */
7292                 swap(curr->vruntime, se->vruntime);
7293                 resched_task(rq->curr);
7294         }
7295 
7296         se->vruntime -= cfs_rq->min_vruntime;
7297 
7298         raw_spin_unlock_irqrestore(&rq->lock, flags);
7299 }
7300 
7301 /*
7302  * Priority of the task has changed. Check to see if we preempt
7303  * the current task.
7304  */
7305 static void
7306 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
7307 {
7308         if (!p->se.on_rq)
7309                 return;
7310 
7311         /*
7312          * Reschedule if we are currently running on this runqueue and
7313          * our priority decreased, or if we are not currently running on
7314          * this runqueue and our priority is higher than the current's
7315          */
7316         if (rq->curr == p) {
7317                 if (p->prio > oldprio)
7318                         resched_task(rq->curr);
7319         } else
7320                 check_preempt_curr(rq, p, 0);
7321 }
7322 
7323 static void switched_from_fair(struct rq *rq, struct task_struct *p)
7324 {
7325         struct sched_entity *se = &p->se;
7326         struct cfs_rq *cfs_rq = cfs_rq_of(se);
7327 
7328         /*
7329          * Ensure the task's vruntime is normalized, so that when it's
7330          * switched back to the fair class the enqueue_entity(.flags=0) will
7331          * do the right thing.
7332          *
7333          * If it's on_rq, then the dequeue_entity(.flags=0) will already
7334          * have normalized the vruntime, if it's !on_rq, then only when
7335          * the task is sleeping will it still have non-normalized vruntime.
7336          */
7337         if (!p->on_rq && p->state != TASK_RUNNING) {
7338                 /*
7339                  * Fix up our vruntime so that the current sleep doesn't
7340                  * cause 'unlimited' sleep bonus.
7341                  */
7342                 place_entity(cfs_rq, se, 0);
7343                 se->vruntime -= cfs_rq->min_vruntime;
7344         }
7345 
7346 #ifdef CONFIG_SMP
7347         /*
7348         * Remove our load from contribution when we leave sched_fair
7349         * and ensure we don't carry in an old decay_count if we
7350         * switch back.
7351         */
7352         if (se->avg.decay_count) {
7353                 __synchronize_entity_decay(se);
7354                 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
7355         }
7356 #endif
7357 }
7358 
7359 /*
7360  * We switched to the sched_fair class.
7361  */
7362 static void switched_to_fair(struct rq *rq, struct task_struct *p)
7363 {
7364         struct sched_entity *se = &p->se;
7365 #ifdef CONFIG_FAIR_GROUP_SCHED
7366         /*
7367          * Since the real-depth could have been changed (only FAIR
7368          * class maintain depth value), reset depth properly.
7369          */
7370         se->depth = se->parent ? se->parent->depth + 1 : 0;
7371 #endif
7372         if (!se->on_rq)
7373                 return;
7374 
7375         /*
7376          * We were most likely switched from sched_rt, so
7377          * kick off the schedule if running, otherwise just see
7378          * if we can still preempt the current task.
7379          */
7380         if (rq->curr == p)
7381                 resched_task(rq->curr);
7382         else
7383                 check_preempt_curr(rq, p, 0);
7384 }
7385 
7386 /* Account for a task changing its policy or group.
7387  *
7388  * This routine is mostly called to set cfs_rq->curr field when a task
7389  * migrates between groups/classes.
7390  */
7391 static void set_curr_task_fair(struct rq *rq)
7392 {
7393         struct sched_entity *se = &rq->curr->se;
7394 
7395         for_each_sched_entity(se) {
7396                 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7397 
7398                 set_next_entity(cfs_rq, se);
7399                 /* ensure bandwidth has been allocated on our new cfs_rq */
7400                 account_cfs_rq_runtime(cfs_rq, 0);
7401         }
7402 }
7403 
7404 void init_cfs_rq(struct cfs_rq *cfs_rq)
7405 {
7406         cfs_rq->tasks_timeline = RB_ROOT;
7407         cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7408 #ifndef CONFIG_64BIT
7409         cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7410 #endif
7411 #ifdef CONFIG_SMP
7412         atomic64_set(&cfs_rq->decay_counter, 1);
7413         atomic_long_set(&cfs_rq->removed_load, 0);
7414 #endif
7415 }
7416 
7417 #ifdef CONFIG_FAIR_GROUP_SCHED
7418 static void task_move_group_fair(struct task_struct *p, int on_rq)
7419 {
7420         struct sched_entity *se = &p->se;
7421         struct cfs_rq *cfs_rq;
7422 
7423         /*
7424          * If the task was not on the rq at the time of this cgroup movement
7425          * it must have been asleep, sleeping tasks keep their ->vruntime
7426          * absolute on their old rq until wakeup (needed for the fair sleeper
7427          * bonus in place_entity()).
7428          *
7429          * If it was on the rq, we've just 'preempted' it, which does convert
7430          * ->vruntime to a relative base.
7431          *
7432          * Make sure both cases convert their relative position when migrating
7433          * to another cgroup's rq. This does somewhat interfere with the
7434          * fair sleeper stuff for the first placement, but who cares.
7435          */
7436         /*
7437          * When !on_rq, vruntime of the task has usually NOT been normalized.
7438          * But there are some cases where it has already been normalized:
7439          *
7440          * - Moving a forked child which is waiting for being woken up by
7441          *   wake_up_new_task().
7442          * - Moving a task which has been woken up by try_to_wake_up() and
7443          *   waiting for actually being woken up by sched_ttwu_pending().
7444          *
7445          * To prevent boost or penalty in the new cfs_rq caused by delta
7446          * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
7447          */
7448         if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING))
7449                 on_rq = 1;
7450 
7451         if (!on_rq)
7452                 se->vruntime -= cfs_rq_of(se)->min_vruntime;
7453         set_task_rq(p, task_cpu(p));
7454         se->depth = se->parent ? se->parent->depth + 1 : 0;
7455         if (!on_rq) {
7456                 cfs_rq = cfs_rq_of(se);
7457                 se->vruntime += cfs_rq->min_vruntime;
7458 #ifdef CONFIG_SMP
7459                 /*
7460                  * migrate_task_rq_fair() will have removed our previous
7461                  * contribution, but we must synchronize for ongoing future
7462                  * decay.
7463                  */
7464                 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
7465                 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
7466 #endif
7467         }
7468 }
7469 
7470 void free_fair_sched_group(struct task_group *tg)
7471 {
7472         int i;
7473 
7474         destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
7475 
7476         for_each_possible_cpu(i) {
7477                 if (tg->cfs_rq)
7478                         kfree(tg->cfs_rq[i]);
7479                 if (tg->se)
7480                         kfree(tg->se[i]);
7481         }
7482 
7483         kfree(tg->cfs_rq);
7484         kfree(tg->se);
7485 }
7486 
7487 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7488 {
7489         struct cfs_rq *cfs_rq;
7490         struct sched_entity *se;
7491         int i;
7492 
7493         tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7494         if (!tg->cfs_rq)
7495                 goto err;
7496         tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7497         if (!tg->se)
7498                 goto err;
7499 
7500         tg->shares = NICE_0_LOAD;
7501 
7502         init_cfs_bandwidth(tg_cfs_bandwidth(tg));
7503 
7504         for_each_possible_cpu(i) {
7505                 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
7506                                       GFP_KERNEL, cpu_to_node(i));
7507                 if (!cfs_rq)
7508                         goto err;
7509 
7510                 se = kzalloc_node(sizeof(struct sched_entity),
7511                                   GFP_KERNEL, cpu_to_node(i));
7512                 if (!se)
7513                         goto err_free_rq;
7514 
7515                 init_cfs_rq(cfs_rq);
7516                 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
7517         }
7518 
7519         return 1;
7520 
7521 err_free_rq:
7522         kfree(cfs_rq);
7523 err:
7524         return 0;
7525 }
7526 
7527 void unregister_fair_sched_group(struct task_group *tg, int cpu)
7528 {
7529         struct rq *rq = cpu_rq(cpu);
7530         unsigned long flags;
7531 
7532         /*
7533         * Only empty task groups can be destroyed; so we can speculatively
7534         * check on_list without danger of it being re-added.
7535         */
7536         if (!tg->cfs_rq[cpu]->on_list)
7537                 return;
7538 
7539         raw_spin_lock_irqsave(&rq->lock, flags);
7540         list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
7541         raw_spin_unlock_irqrestore(&rq->lock, flags);
7542 }
7543 
7544 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7545                         struct sched_entity *se, int cpu,
7546                         struct sched_entity *parent)
7547 {
7548         struct rq *rq = cpu_rq(cpu);
7549 
7550         cfs_rq->tg = tg;
7551         cfs_rq->rq = rq;
7552         init_cfs_rq_runtime(cfs_rq);
7553 
7554         tg->cfs_rq[cpu] = cfs_rq;
7555         tg->se[cpu] = se;
7556 
7557         /* se could be NULL for root_task_group */
7558         if (!se)
7559                 return;
7560 
7561         if (!parent) {
7562                 se->cfs_rq = &rq->cfs;
7563                 se->depth = 0;
7564         } else {
7565                 se->cfs_rq = parent->my_q;
7566                 se->depth = parent->depth + 1;
7567         }
7568 
7569         se->my_q = cfs_rq;
7570         /* guarantee group entities always have weight */
7571         update_load_set(&se->load, NICE_0_LOAD);
7572         se->parent = parent;
7573 }
7574 
7575 static DEFINE_MUTEX(shares_mutex);
7576 
7577 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7578 {
7579         int i;
7580         unsigned long flags;
7581 
7582         /*
7583          * We can't change the weight of the root cgroup.
7584          */
7585         if (!tg->se[0])
7586                 return -EINVAL;
7587 
7588         shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
7589 
7590         mutex_lock(&shares_mutex);
7591         if (tg->shares == shares)
7592                 goto done;
7593 
7594         tg->shares = shares;
7595         for_each_possible_cpu(i) {
7596                 struct rq *rq = cpu_rq(i);
7597                 struct sched_entity *se;
7598 
7599                 se = tg->se[i];
7600                 /* Propagate contribution to hierarchy */
7601                 raw_spin_lock_irqsave(&rq->lock, flags);
7602 
7603                 /* Possible calls to update_curr() need rq clock */
7604                 update_rq_clock(rq);
7605                 for_each_sched_entity(se)
7606                         update_cfs_shares(group_cfs_rq(se));
7607                 raw_spin_unlock_irqrestore(&rq->lock, flags);
7608         }
7609 
7610 done:
7611         mutex_unlock(&shares_mutex);
7612         return 0;
7613 }
7614 #else /* CONFIG_FAIR_GROUP_SCHED */
7615 
7616 void free_fair_sched_group(struct task_group *tg) { }
7617 
7618 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7619 {
7620         return 1;
7621 }
7622 
7623 void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
7624 
7625 #endif /* CONFIG_FAIR_GROUP_SCHED */
7626 
7627 
7628 static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
7629 {
7630         struct sched_entity *se = &task->se;
7631         unsigned int rr_interval = 0;
7632 
7633         /*
7634          * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
7635          * idle runqueue:
7636          */
7637         if (rq->cfs.load.weight)
7638                 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
7639 
7640         return rr_interval;
7641 }
7642 
7643 /*
7644  * All the scheduling class methods:
7645  */
7646 const struct sched_class fair_sched_class = {
7647         .next                   = &idle_sched_class,
7648         .enqueue_task           = enqueue_task_fair,
7649         .dequeue_task           = dequeue_task_fair,
7650         .yield_task             = yield_task_fair,
7651         .yield_to_task          = yield_to_task_fair,
7652 
7653         .check_preempt_curr     = check_preempt_wakeup,
7654 
7655         .pick_next_task         = pick_next_task_fair,
7656         .put_prev_task          = put_prev_task_fair,
7657 
7658 #ifdef CONFIG_SMP
7659         .select_task_rq         = select_task_rq_fair,
7660         .migrate_task_rq        = migrate_task_rq_fair,
7661 
7662         .rq_online              = rq_online_fair,
7663         .rq_offline             = rq_offline_fair,
7664 
7665         .task_waking            = task_waking_fair,
7666 #endif
7667 
7668         .set_curr_task          = set_curr_task_fair,
7669         .task_tick              = task_tick_fair,
7670         .task_fork              = task_fork_fair,
7671 
7672         .prio_changed           = prio_changed_fair,
7673         .switched_from          = switched_from_fair,
7674         .switched_to            = switched_to_fair,
7675 
7676         .get_rr_interval        = get_rr_interval_fair,
7677 
7678 #ifdef CONFIG_FAIR_GROUP_SCHED
7679         .task_move_group        = task_move_group_fair,
7680 #endif
7681 };
7682 
7683 #ifdef CONFIG_SCHED_DEBUG
7684 void print_cfs_stats(struct seq_file *m, int cpu)
7685 {
7686         struct cfs_rq *cfs_rq;
7687 
7688         rcu_read_lock();
7689         for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
7690                 print_cfs_rq(m, cpu, cfs_rq);
7691         rcu_read_unlock();
7692 }
7693 #endif
7694 
7695 __init void init_sched_fair_class(void)
7696 {
7697 #ifdef CONFIG_SMP
7698         open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
7699 
7700 #ifdef CONFIG_NO_HZ_COMMON
7701         nohz.next_balance = jiffies;
7702         zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
7703         cpu_notifier(sched_ilb_notifier, 0);
7704 #endif
7705 #endif /* SMP */
7706 
7707 }
7708 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us