Version:  2.0.40 2.2.26 2.4.37 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9

Linux/kernel/sched/fair.c

  1 /*
  2  * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
  3  *
  4  *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5  *
  6  *  Interactivity improvements by Mike Galbraith
  7  *  (C) 2007 Mike Galbraith <efault@gmx.de>
  8  *
  9  *  Various enhancements by Dmitry Adamushko.
 10  *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
 11  *
 12  *  Group scheduling enhancements by Srivatsa Vaddagiri
 13  *  Copyright IBM Corporation, 2007
 14  *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
 15  *
 16  *  Scaled math optimizations by Thomas Gleixner
 17  *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
 18  *
 19  *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
 20  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
 21  */
 22 
 23 #include <linux/sched.h>
 24 #include <linux/latencytop.h>
 25 #include <linux/cpumask.h>
 26 #include <linux/cpuidle.h>
 27 #include <linux/slab.h>
 28 #include <linux/profile.h>
 29 #include <linux/interrupt.h>
 30 #include <linux/mempolicy.h>
 31 #include <linux/migrate.h>
 32 #include <linux/task_work.h>
 33 
 34 #include <trace/events/sched.h>
 35 
 36 #include "sched.h"
 37 
 38 /*
 39  * Targeted preemption latency for CPU-bound tasks:
 40  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
 41  *
 42  * NOTE: this latency value is not the same as the concept of
 43  * 'timeslice length' - timeslices in CFS are of variable length
 44  * and have no persistent notion like in traditional, time-slice
 45  * based scheduling concepts.
 46  *
 47  * (to see the precise effective timeslice length of your workload,
 48  *  run vmstat and monitor the context-switches (cs) field)
 49  */
 50 unsigned int sysctl_sched_latency = 6000000ULL;
 51 unsigned int normalized_sysctl_sched_latency = 6000000ULL;
 52 
 53 /*
 54  * The initial- and re-scaling of tunables is configurable
 55  * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
 56  *
 57  * Options are:
 58  * SCHED_TUNABLESCALING_NONE - unscaled, always *1
 59  * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
 60  * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
 61  */
 62 enum sched_tunable_scaling sysctl_sched_tunable_scaling
 63         = SCHED_TUNABLESCALING_LOG;
 64 
 65 /*
 66  * Minimal preemption granularity for CPU-bound tasks:
 67  * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
 68  */
 69 unsigned int sysctl_sched_min_granularity = 750000ULL;
 70 unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
 71 
 72 /*
 73  * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
 74  */
 75 static unsigned int sched_nr_latency = 8;
 76 
 77 /*
 78  * After fork, child runs first. If set to 0 (default) then
 79  * parent will (try to) run first.
 80  */
 81 unsigned int sysctl_sched_child_runs_first __read_mostly;
 82 
 83 /*
 84  * SCHED_OTHER wake-up granularity.
 85  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
 86  *
 87  * This option delays the preemption effects of decoupled workloads
 88  * and reduces their over-scheduling. Synchronous workloads will still
 89  * have immediate wakeup/sleep latencies.
 90  */
 91 unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
 92 unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
 93 
 94 const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
 95 
 96 /*
 97  * The exponential sliding  window over which load is averaged for shares
 98  * distribution.
 99  * (default: 10msec)
100  */
101 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102 
103 #ifdef CONFIG_CFS_BANDWIDTH
104 /*
105  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
106  * each time a cfs_rq requests quota.
107  *
108  * Note: in the case that the slice exceeds the runtime remaining (either due
109  * to consumption or the quota being specified to be smaller than the slice)
110  * we will always only issue the remaining available time.
111  *
112  * default: 5 msec, units: microseconds
113   */
114 unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
115 #endif
116 
117 /*
118  * The margin used when comparing utilization with CPU capacity:
119  * util * 1024 < capacity * margin
120  */
121 unsigned int capacity_margin = 1280; /* ~20% */
122 
123 static inline void update_load_add(struct load_weight *lw, unsigned long inc)
124 {
125         lw->weight += inc;
126         lw->inv_weight = 0;
127 }
128 
129 static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
130 {
131         lw->weight -= dec;
132         lw->inv_weight = 0;
133 }
134 
135 static inline void update_load_set(struct load_weight *lw, unsigned long w)
136 {
137         lw->weight = w;
138         lw->inv_weight = 0;
139 }
140 
141 /*
142  * Increase the granularity value when there are more CPUs,
143  * because with more CPUs the 'effective latency' as visible
144  * to users decreases. But the relationship is not linear,
145  * so pick a second-best guess by going with the log2 of the
146  * number of CPUs.
147  *
148  * This idea comes from the SD scheduler of Con Kolivas:
149  */
150 static unsigned int get_update_sysctl_factor(void)
151 {
152         unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
153         unsigned int factor;
154 
155         switch (sysctl_sched_tunable_scaling) {
156         case SCHED_TUNABLESCALING_NONE:
157                 factor = 1;
158                 break;
159         case SCHED_TUNABLESCALING_LINEAR:
160                 factor = cpus;
161                 break;
162         case SCHED_TUNABLESCALING_LOG:
163         default:
164                 factor = 1 + ilog2(cpus);
165                 break;
166         }
167 
168         return factor;
169 }
170 
171 static void update_sysctl(void)
172 {
173         unsigned int factor = get_update_sysctl_factor();
174 
175 #define SET_SYSCTL(name) \
176         (sysctl_##name = (factor) * normalized_sysctl_##name)
177         SET_SYSCTL(sched_min_granularity);
178         SET_SYSCTL(sched_latency);
179         SET_SYSCTL(sched_wakeup_granularity);
180 #undef SET_SYSCTL
181 }
182 
183 void sched_init_granularity(void)
184 {
185         update_sysctl();
186 }
187 
188 #define WMULT_CONST     (~0U)
189 #define WMULT_SHIFT     32
190 
191 static void __update_inv_weight(struct load_weight *lw)
192 {
193         unsigned long w;
194 
195         if (likely(lw->inv_weight))
196                 return;
197 
198         w = scale_load_down(lw->weight);
199 
200         if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
201                 lw->inv_weight = 1;
202         else if (unlikely(!w))
203                 lw->inv_weight = WMULT_CONST;
204         else
205                 lw->inv_weight = WMULT_CONST / w;
206 }
207 
208 /*
209  * delta_exec * weight / lw.weight
210  *   OR
211  * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
212  *
213  * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
214  * we're guaranteed shift stays positive because inv_weight is guaranteed to
215  * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
216  *
217  * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
218  * weight/lw.weight <= 1, and therefore our shift will also be positive.
219  */
220 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
221 {
222         u64 fact = scale_load_down(weight);
223         int shift = WMULT_SHIFT;
224 
225         __update_inv_weight(lw);
226 
227         if (unlikely(fact >> 32)) {
228                 while (fact >> 32) {
229                         fact >>= 1;
230                         shift--;
231                 }
232         }
233 
234         /* hint to use a 32x32->64 mul */
235         fact = (u64)(u32)fact * lw->inv_weight;
236 
237         while (fact >> 32) {
238                 fact >>= 1;
239                 shift--;
240         }
241 
242         return mul_u64_u32_shr(delta_exec, fact, shift);
243 }
244 
245 
246 const struct sched_class fair_sched_class;
247 
248 /**************************************************************
249  * CFS operations on generic schedulable entities:
250  */
251 
252 #ifdef CONFIG_FAIR_GROUP_SCHED
253 
254 /* cpu runqueue to which this cfs_rq is attached */
255 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
256 {
257         return cfs_rq->rq;
258 }
259 
260 /* An entity is a task if it doesn't "own" a runqueue */
261 #define entity_is_task(se)      (!se->my_q)
262 
263 static inline struct task_struct *task_of(struct sched_entity *se)
264 {
265         SCHED_WARN_ON(!entity_is_task(se));
266         return container_of(se, struct task_struct, se);
267 }
268 
269 /* Walk up scheduling entities hierarchy */
270 #define for_each_sched_entity(se) \
271                 for (; se; se = se->parent)
272 
273 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
274 {
275         return p->se.cfs_rq;
276 }
277 
278 /* runqueue on which this entity is (to be) queued */
279 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
280 {
281         return se->cfs_rq;
282 }
283 
284 /* runqueue "owned" by this group */
285 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
286 {
287         return grp->my_q;
288 }
289 
290 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
291 {
292         if (!cfs_rq->on_list) {
293                 /*
294                  * Ensure we either appear before our parent (if already
295                  * enqueued) or force our parent to appear after us when it is
296                  * enqueued.  The fact that we always enqueue bottom-up
297                  * reduces this to two cases.
298                  */
299                 if (cfs_rq->tg->parent &&
300                     cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
301                         list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
302                                 &rq_of(cfs_rq)->leaf_cfs_rq_list);
303                 } else {
304                         list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
305                                 &rq_of(cfs_rq)->leaf_cfs_rq_list);
306                 }
307 
308                 cfs_rq->on_list = 1;
309         }
310 }
311 
312 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
313 {
314         if (cfs_rq->on_list) {
315                 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
316                 cfs_rq->on_list = 0;
317         }
318 }
319 
320 /* Iterate thr' all leaf cfs_rq's on a runqueue */
321 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
322         list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
323 
324 /* Do the two (enqueued) entities belong to the same group ? */
325 static inline struct cfs_rq *
326 is_same_group(struct sched_entity *se, struct sched_entity *pse)
327 {
328         if (se->cfs_rq == pse->cfs_rq)
329                 return se->cfs_rq;
330 
331         return NULL;
332 }
333 
334 static inline struct sched_entity *parent_entity(struct sched_entity *se)
335 {
336         return se->parent;
337 }
338 
339 static void
340 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
341 {
342         int se_depth, pse_depth;
343 
344         /*
345          * preemption test can be made between sibling entities who are in the
346          * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
347          * both tasks until we find their ancestors who are siblings of common
348          * parent.
349          */
350 
351         /* First walk up until both entities are at same depth */
352         se_depth = (*se)->depth;
353         pse_depth = (*pse)->depth;
354 
355         while (se_depth > pse_depth) {
356                 se_depth--;
357                 *se = parent_entity(*se);
358         }
359 
360         while (pse_depth > se_depth) {
361                 pse_depth--;
362                 *pse = parent_entity(*pse);
363         }
364 
365         while (!is_same_group(*se, *pse)) {
366                 *se = parent_entity(*se);
367                 *pse = parent_entity(*pse);
368         }
369 }
370 
371 #else   /* !CONFIG_FAIR_GROUP_SCHED */
372 
373 static inline struct task_struct *task_of(struct sched_entity *se)
374 {
375         return container_of(se, struct task_struct, se);
376 }
377 
378 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
379 {
380         return container_of(cfs_rq, struct rq, cfs);
381 }
382 
383 #define entity_is_task(se)      1
384 
385 #define for_each_sched_entity(se) \
386                 for (; se; se = NULL)
387 
388 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
389 {
390         return &task_rq(p)->cfs;
391 }
392 
393 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
394 {
395         struct task_struct *p = task_of(se);
396         struct rq *rq = task_rq(p);
397 
398         return &rq->cfs;
399 }
400 
401 /* runqueue "owned" by this group */
402 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
403 {
404         return NULL;
405 }
406 
407 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408 {
409 }
410 
411 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
412 {
413 }
414 
415 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
416                 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
417 
418 static inline struct sched_entity *parent_entity(struct sched_entity *se)
419 {
420         return NULL;
421 }
422 
423 static inline void
424 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
425 {
426 }
427 
428 #endif  /* CONFIG_FAIR_GROUP_SCHED */
429 
430 static __always_inline
431 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
432 
433 /**************************************************************
434  * Scheduling class tree data structure manipulation methods:
435  */
436 
437 static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
438 {
439         s64 delta = (s64)(vruntime - max_vruntime);
440         if (delta > 0)
441                 max_vruntime = vruntime;
442 
443         return max_vruntime;
444 }
445 
446 static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
447 {
448         s64 delta = (s64)(vruntime - min_vruntime);
449         if (delta < 0)
450                 min_vruntime = vruntime;
451 
452         return min_vruntime;
453 }
454 
455 static inline int entity_before(struct sched_entity *a,
456                                 struct sched_entity *b)
457 {
458         return (s64)(a->vruntime - b->vruntime) < 0;
459 }
460 
461 static void update_min_vruntime(struct cfs_rq *cfs_rq)
462 {
463         struct sched_entity *curr = cfs_rq->curr;
464 
465         u64 vruntime = cfs_rq->min_vruntime;
466 
467         if (curr) {
468                 if (curr->on_rq)
469                         vruntime = curr->vruntime;
470                 else
471                         curr = NULL;
472         }
473 
474         if (cfs_rq->rb_leftmost) {
475                 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
476                                                    struct sched_entity,
477                                                    run_node);
478 
479                 if (!curr)
480                         vruntime = se->vruntime;
481                 else
482                         vruntime = min_vruntime(vruntime, se->vruntime);
483         }
484 
485         /* ensure we never gain time by being placed backwards. */
486         cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
487 #ifndef CONFIG_64BIT
488         smp_wmb();
489         cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
490 #endif
491 }
492 
493 /*
494  * Enqueue an entity into the rb-tree:
495  */
496 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
497 {
498         struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
499         struct rb_node *parent = NULL;
500         struct sched_entity *entry;
501         int leftmost = 1;
502 
503         /*
504          * Find the right place in the rbtree:
505          */
506         while (*link) {
507                 parent = *link;
508                 entry = rb_entry(parent, struct sched_entity, run_node);
509                 /*
510                  * We dont care about collisions. Nodes with
511                  * the same key stay together.
512                  */
513                 if (entity_before(se, entry)) {
514                         link = &parent->rb_left;
515                 } else {
516                         link = &parent->rb_right;
517                         leftmost = 0;
518                 }
519         }
520 
521         /*
522          * Maintain a cache of leftmost tree entries (it is frequently
523          * used):
524          */
525         if (leftmost)
526                 cfs_rq->rb_leftmost = &se->run_node;
527 
528         rb_link_node(&se->run_node, parent, link);
529         rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
530 }
531 
532 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
533 {
534         if (cfs_rq->rb_leftmost == &se->run_node) {
535                 struct rb_node *next_node;
536 
537                 next_node = rb_next(&se->run_node);
538                 cfs_rq->rb_leftmost = next_node;
539         }
540 
541         rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
542 }
543 
544 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
545 {
546         struct rb_node *left = cfs_rq->rb_leftmost;
547 
548         if (!left)
549                 return NULL;
550 
551         return rb_entry(left, struct sched_entity, run_node);
552 }
553 
554 static struct sched_entity *__pick_next_entity(struct sched_entity *se)
555 {
556         struct rb_node *next = rb_next(&se->run_node);
557 
558         if (!next)
559                 return NULL;
560 
561         return rb_entry(next, struct sched_entity, run_node);
562 }
563 
564 #ifdef CONFIG_SCHED_DEBUG
565 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
566 {
567         struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
568 
569         if (!last)
570                 return NULL;
571 
572         return rb_entry(last, struct sched_entity, run_node);
573 }
574 
575 /**************************************************************
576  * Scheduling class statistics methods:
577  */
578 
579 int sched_proc_update_handler(struct ctl_table *table, int write,
580                 void __user *buffer, size_t *lenp,
581                 loff_t *ppos)
582 {
583         int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
584         unsigned int factor = get_update_sysctl_factor();
585 
586         if (ret || !write)
587                 return ret;
588 
589         sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
590                                         sysctl_sched_min_granularity);
591 
592 #define WRT_SYSCTL(name) \
593         (normalized_sysctl_##name = sysctl_##name / (factor))
594         WRT_SYSCTL(sched_min_granularity);
595         WRT_SYSCTL(sched_latency);
596         WRT_SYSCTL(sched_wakeup_granularity);
597 #undef WRT_SYSCTL
598 
599         return 0;
600 }
601 #endif
602 
603 /*
604  * delta /= w
605  */
606 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
607 {
608         if (unlikely(se->load.weight != NICE_0_LOAD))
609                 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
610 
611         return delta;
612 }
613 
614 /*
615  * The idea is to set a period in which each task runs once.
616  *
617  * When there are too many tasks (sched_nr_latency) we have to stretch
618  * this period because otherwise the slices get too small.
619  *
620  * p = (nr <= nl) ? l : l*nr/nl
621  */
622 static u64 __sched_period(unsigned long nr_running)
623 {
624         if (unlikely(nr_running > sched_nr_latency))
625                 return nr_running * sysctl_sched_min_granularity;
626         else
627                 return sysctl_sched_latency;
628 }
629 
630 /*
631  * We calculate the wall-time slice from the period by taking a part
632  * proportional to the weight.
633  *
634  * s = p*P[w/rw]
635  */
636 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
637 {
638         u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
639 
640         for_each_sched_entity(se) {
641                 struct load_weight *load;
642                 struct load_weight lw;
643 
644                 cfs_rq = cfs_rq_of(se);
645                 load = &cfs_rq->load;
646 
647                 if (unlikely(!se->on_rq)) {
648                         lw = cfs_rq->load;
649 
650                         update_load_add(&lw, se->load.weight);
651                         load = &lw;
652                 }
653                 slice = __calc_delta(slice, se->load.weight, load);
654         }
655         return slice;
656 }
657 
658 /*
659  * We calculate the vruntime slice of a to-be-inserted task.
660  *
661  * vs = s/w
662  */
663 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
664 {
665         return calc_delta_fair(sched_slice(cfs_rq, se), se);
666 }
667 
668 #ifdef CONFIG_SMP
669 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
670 static unsigned long task_h_load(struct task_struct *p);
671 
672 /*
673  * We choose a half-life close to 1 scheduling period.
674  * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
675  * dependent on this value.
676  */
677 #define LOAD_AVG_PERIOD 32
678 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
679 #define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
680 
681 /* Give new sched_entity start runnable values to heavy its load in infant time */
682 void init_entity_runnable_average(struct sched_entity *se)
683 {
684         struct sched_avg *sa = &se->avg;
685 
686         sa->last_update_time = 0;
687         /*
688          * sched_avg's period_contrib should be strictly less then 1024, so
689          * we give it 1023 to make sure it is almost a period (1024us), and
690          * will definitely be update (after enqueue).
691          */
692         sa->period_contrib = 1023;
693         /*
694          * Tasks are intialized with full load to be seen as heavy tasks until
695          * they get a chance to stabilize to their real load level.
696          * Group entities are intialized with zero load to reflect the fact that
697          * nothing has been attached to the task group yet.
698          */
699         if (entity_is_task(se))
700                 sa->load_avg = scale_load_down(se->load.weight);
701         sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
702         /*
703          * At this point, util_avg won't be used in select_task_rq_fair anyway
704          */
705         sa->util_avg = 0;
706         sa->util_sum = 0;
707         /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
708 }
709 
710 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
711 static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
712 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
713 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
714 
715 /*
716  * With new tasks being created, their initial util_avgs are extrapolated
717  * based on the cfs_rq's current util_avg:
718  *
719  *   util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
720  *
721  * However, in many cases, the above util_avg does not give a desired
722  * value. Moreover, the sum of the util_avgs may be divergent, such
723  * as when the series is a harmonic series.
724  *
725  * To solve this problem, we also cap the util_avg of successive tasks to
726  * only 1/2 of the left utilization budget:
727  *
728  *   util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
729  *
730  * where n denotes the nth task.
731  *
732  * For example, a simplest series from the beginning would be like:
733  *
734  *  task  util_avg: 512, 256, 128,  64,  32,   16,    8, ...
735  * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
736  *
737  * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
738  * if util_avg > util_avg_cap.
739  */
740 void post_init_entity_util_avg(struct sched_entity *se)
741 {
742         struct cfs_rq *cfs_rq = cfs_rq_of(se);
743         struct sched_avg *sa = &se->avg;
744         long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
745         u64 now = cfs_rq_clock_task(cfs_rq);
746 
747         if (cap > 0) {
748                 if (cfs_rq->avg.util_avg != 0) {
749                         sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
750                         sa->util_avg /= (cfs_rq->avg.load_avg + 1);
751 
752                         if (sa->util_avg > cap)
753                                 sa->util_avg = cap;
754                 } else {
755                         sa->util_avg = cap;
756                 }
757                 sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
758         }
759 
760         if (entity_is_task(se)) {
761                 struct task_struct *p = task_of(se);
762                 if (p->sched_class != &fair_sched_class) {
763                         /*
764                          * For !fair tasks do:
765                          *
766                         update_cfs_rq_load_avg(now, cfs_rq, false);
767                         attach_entity_load_avg(cfs_rq, se);
768                         switched_from_fair(rq, p);
769                          *
770                          * such that the next switched_to_fair() has the
771                          * expected state.
772                          */
773                         se->avg.last_update_time = now;
774                         return;
775                 }
776         }
777 
778         update_cfs_rq_load_avg(now, cfs_rq, false);
779         attach_entity_load_avg(cfs_rq, se);
780         update_tg_load_avg(cfs_rq, false);
781 }
782 
783 #else /* !CONFIG_SMP */
784 void init_entity_runnable_average(struct sched_entity *se)
785 {
786 }
787 void post_init_entity_util_avg(struct sched_entity *se)
788 {
789 }
790 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
791 {
792 }
793 #endif /* CONFIG_SMP */
794 
795 /*
796  * Update the current task's runtime statistics.
797  */
798 static void update_curr(struct cfs_rq *cfs_rq)
799 {
800         struct sched_entity *curr = cfs_rq->curr;
801         u64 now = rq_clock_task(rq_of(cfs_rq));
802         u64 delta_exec;
803 
804         if (unlikely(!curr))
805                 return;
806 
807         delta_exec = now - curr->exec_start;
808         if (unlikely((s64)delta_exec <= 0))
809                 return;
810 
811         curr->exec_start = now;
812 
813         schedstat_set(curr->statistics.exec_max,
814                       max(delta_exec, curr->statistics.exec_max));
815 
816         curr->sum_exec_runtime += delta_exec;
817         schedstat_add(cfs_rq->exec_clock, delta_exec);
818 
819         curr->vruntime += calc_delta_fair(delta_exec, curr);
820         update_min_vruntime(cfs_rq);
821 
822         if (entity_is_task(curr)) {
823                 struct task_struct *curtask = task_of(curr);
824 
825                 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
826                 cpuacct_charge(curtask, delta_exec);
827                 account_group_exec_runtime(curtask, delta_exec);
828         }
829 
830         account_cfs_rq_runtime(cfs_rq, delta_exec);
831 }
832 
833 static void update_curr_fair(struct rq *rq)
834 {
835         update_curr(cfs_rq_of(&rq->curr->se));
836 }
837 
838 static inline void
839 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
840 {
841         u64 wait_start, prev_wait_start;
842 
843         if (!schedstat_enabled())
844                 return;
845 
846         wait_start = rq_clock(rq_of(cfs_rq));
847         prev_wait_start = schedstat_val(se->statistics.wait_start);
848 
849         if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
850             likely(wait_start > prev_wait_start))
851                 wait_start -= prev_wait_start;
852 
853         schedstat_set(se->statistics.wait_start, wait_start);
854 }
855 
856 static inline void
857 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
858 {
859         struct task_struct *p;
860         u64 delta;
861 
862         if (!schedstat_enabled())
863                 return;
864 
865         delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
866 
867         if (entity_is_task(se)) {
868                 p = task_of(se);
869                 if (task_on_rq_migrating(p)) {
870                         /*
871                          * Preserve migrating task's wait time so wait_start
872                          * time stamp can be adjusted to accumulate wait time
873                          * prior to migration.
874                          */
875                         schedstat_set(se->statistics.wait_start, delta);
876                         return;
877                 }
878                 trace_sched_stat_wait(p, delta);
879         }
880 
881         schedstat_set(se->statistics.wait_max,
882                       max(schedstat_val(se->statistics.wait_max), delta));
883         schedstat_inc(se->statistics.wait_count);
884         schedstat_add(se->statistics.wait_sum, delta);
885         schedstat_set(se->statistics.wait_start, 0);
886 }
887 
888 static inline void
889 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
890 {
891         struct task_struct *tsk = NULL;
892         u64 sleep_start, block_start;
893 
894         if (!schedstat_enabled())
895                 return;
896 
897         sleep_start = schedstat_val(se->statistics.sleep_start);
898         block_start = schedstat_val(se->statistics.block_start);
899 
900         if (entity_is_task(se))
901                 tsk = task_of(se);
902 
903         if (sleep_start) {
904                 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
905 
906                 if ((s64)delta < 0)
907                         delta = 0;
908 
909                 if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
910                         schedstat_set(se->statistics.sleep_max, delta);
911 
912                 schedstat_set(se->statistics.sleep_start, 0);
913                 schedstat_add(se->statistics.sum_sleep_runtime, delta);
914 
915                 if (tsk) {
916                         account_scheduler_latency(tsk, delta >> 10, 1);
917                         trace_sched_stat_sleep(tsk, delta);
918                 }
919         }
920         if (block_start) {
921                 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
922 
923                 if ((s64)delta < 0)
924                         delta = 0;
925 
926                 if (unlikely(delta > schedstat_val(se->statistics.block_max)))
927                         schedstat_set(se->statistics.block_max, delta);
928 
929                 schedstat_set(se->statistics.block_start, 0);
930                 schedstat_add(se->statistics.sum_sleep_runtime, delta);
931 
932                 if (tsk) {
933                         if (tsk->in_iowait) {
934                                 schedstat_add(se->statistics.iowait_sum, delta);
935                                 schedstat_inc(se->statistics.iowait_count);
936                                 trace_sched_stat_iowait(tsk, delta);
937                         }
938 
939                         trace_sched_stat_blocked(tsk, delta);
940 
941                         /*
942                          * Blocking time is in units of nanosecs, so shift by
943                          * 20 to get a milliseconds-range estimation of the
944                          * amount of time that the task spent sleeping:
945                          */
946                         if (unlikely(prof_on == SLEEP_PROFILING)) {
947                                 profile_hits(SLEEP_PROFILING,
948                                                 (void *)get_wchan(tsk),
949                                                 delta >> 20);
950                         }
951                         account_scheduler_latency(tsk, delta >> 10, 0);
952                 }
953         }
954 }
955 
956 /*
957  * Task is being enqueued - update stats:
958  */
959 static inline void
960 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
961 {
962         if (!schedstat_enabled())
963                 return;
964 
965         /*
966          * Are we enqueueing a waiting task? (for current tasks
967          * a dequeue/enqueue event is a NOP)
968          */
969         if (se != cfs_rq->curr)
970                 update_stats_wait_start(cfs_rq, se);
971 
972         if (flags & ENQUEUE_WAKEUP)
973                 update_stats_enqueue_sleeper(cfs_rq, se);
974 }
975 
976 static inline void
977 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
978 {
979 
980         if (!schedstat_enabled())
981                 return;
982 
983         /*
984          * Mark the end of the wait period if dequeueing a
985          * waiting task:
986          */
987         if (se != cfs_rq->curr)
988                 update_stats_wait_end(cfs_rq, se);
989 
990         if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
991                 struct task_struct *tsk = task_of(se);
992 
993                 if (tsk->state & TASK_INTERRUPTIBLE)
994                         schedstat_set(se->statistics.sleep_start,
995                                       rq_clock(rq_of(cfs_rq)));
996                 if (tsk->state & TASK_UNINTERRUPTIBLE)
997                         schedstat_set(se->statistics.block_start,
998                                       rq_clock(rq_of(cfs_rq)));
999         }
1000 }
1001 
1002 /*
1003  * We are picking a new current task - update its stats:
1004  */
1005 static inline void
1006 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
1007 {
1008         /*
1009          * We are starting a new run period:
1010          */
1011         se->exec_start = rq_clock_task(rq_of(cfs_rq));
1012 }
1013 
1014 /**************************************************
1015  * Scheduling class queueing methods:
1016  */
1017 
1018 #ifdef CONFIG_NUMA_BALANCING
1019 /*
1020  * Approximate time to scan a full NUMA task in ms. The task scan period is
1021  * calculated based on the tasks virtual memory size and
1022  * numa_balancing_scan_size.
1023  */
1024 unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1025 unsigned int sysctl_numa_balancing_scan_period_max = 60000;
1026 
1027 /* Portion of address space to scan in MB */
1028 unsigned int sysctl_numa_balancing_scan_size = 256;
1029 
1030 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1031 unsigned int sysctl_numa_balancing_scan_delay = 1000;
1032 
1033 static unsigned int task_nr_scan_windows(struct task_struct *p)
1034 {
1035         unsigned long rss = 0;
1036         unsigned long nr_scan_pages;
1037 
1038         /*
1039          * Calculations based on RSS as non-present and empty pages are skipped
1040          * by the PTE scanner and NUMA hinting faults should be trapped based
1041          * on resident pages
1042          */
1043         nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1044         rss = get_mm_rss(p->mm);
1045         if (!rss)
1046                 rss = nr_scan_pages;
1047 
1048         rss = round_up(rss, nr_scan_pages);
1049         return rss / nr_scan_pages;
1050 }
1051 
1052 /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1053 #define MAX_SCAN_WINDOW 2560
1054 
1055 static unsigned int task_scan_min(struct task_struct *p)
1056 {
1057         unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
1058         unsigned int scan, floor;
1059         unsigned int windows = 1;
1060 
1061         if (scan_size < MAX_SCAN_WINDOW)
1062                 windows = MAX_SCAN_WINDOW / scan_size;
1063         floor = 1000 / windows;
1064 
1065         scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1066         return max_t(unsigned int, floor, scan);
1067 }
1068 
1069 static unsigned int task_scan_max(struct task_struct *p)
1070 {
1071         unsigned int smin = task_scan_min(p);
1072         unsigned int smax;
1073 
1074         /* Watch for min being lower than max due to floor calculations */
1075         smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1076         return max(smin, smax);
1077 }
1078 
1079 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1080 {
1081         rq->nr_numa_running += (p->numa_preferred_nid != -1);
1082         rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1083 }
1084 
1085 static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1086 {
1087         rq->nr_numa_running -= (p->numa_preferred_nid != -1);
1088         rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1089 }
1090 
1091 struct numa_group {
1092         atomic_t refcount;
1093 
1094         spinlock_t lock; /* nr_tasks, tasks */
1095         int nr_tasks;
1096         pid_t gid;
1097         int active_nodes;
1098 
1099         struct rcu_head rcu;
1100         unsigned long total_faults;
1101         unsigned long max_faults_cpu;
1102         /*
1103          * Faults_cpu is used to decide whether memory should move
1104          * towards the CPU. As a consequence, these stats are weighted
1105          * more by CPU use than by memory faults.
1106          */
1107         unsigned long *faults_cpu;
1108         unsigned long faults[0];
1109 };
1110 
1111 /* Shared or private faults. */
1112 #define NR_NUMA_HINT_FAULT_TYPES 2
1113 
1114 /* Memory and CPU locality */
1115 #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1116 
1117 /* Averaged statistics, and temporary buffers. */
1118 #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1119 
1120 pid_t task_numa_group_id(struct task_struct *p)
1121 {
1122         return p->numa_group ? p->numa_group->gid : 0;
1123 }
1124 
1125 /*
1126  * The averaged statistics, shared & private, memory & cpu,
1127  * occupy the first half of the array. The second half of the
1128  * array is for current counters, which are averaged into the
1129  * first set by task_numa_placement.
1130  */
1131 static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
1132 {
1133         return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
1134 }
1135 
1136 static inline unsigned long task_faults(struct task_struct *p, int nid)
1137 {
1138         if (!p->numa_faults)
1139                 return 0;
1140 
1141         return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1142                 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
1143 }
1144 
1145 static inline unsigned long group_faults(struct task_struct *p, int nid)
1146 {
1147         if (!p->numa_group)
1148                 return 0;
1149 
1150         return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1151                 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
1152 }
1153 
1154 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1155 {
1156         return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1157                 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
1158 }
1159 
1160 /*
1161  * A node triggering more than 1/3 as many NUMA faults as the maximum is
1162  * considered part of a numa group's pseudo-interleaving set. Migrations
1163  * between these nodes are slowed down, to allow things to settle down.
1164  */
1165 #define ACTIVE_NODE_FRACTION 3
1166 
1167 static bool numa_is_active_node(int nid, struct numa_group *ng)
1168 {
1169         return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1170 }
1171 
1172 /* Handle placement on systems where not all nodes are directly connected. */
1173 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1174                                         int maxdist, bool task)
1175 {
1176         unsigned long score = 0;
1177         int node;
1178 
1179         /*
1180          * All nodes are directly connected, and the same distance
1181          * from each other. No need for fancy placement algorithms.
1182          */
1183         if (sched_numa_topology_type == NUMA_DIRECT)
1184                 return 0;
1185 
1186         /*
1187          * This code is called for each node, introducing N^2 complexity,
1188          * which should be ok given the number of nodes rarely exceeds 8.
1189          */
1190         for_each_online_node(node) {
1191                 unsigned long faults;
1192                 int dist = node_distance(nid, node);
1193 
1194                 /*
1195                  * The furthest away nodes in the system are not interesting
1196                  * for placement; nid was already counted.
1197                  */
1198                 if (dist == sched_max_numa_distance || node == nid)
1199                         continue;
1200 
1201                 /*
1202                  * On systems with a backplane NUMA topology, compare groups
1203                  * of nodes, and move tasks towards the group with the most
1204                  * memory accesses. When comparing two nodes at distance
1205                  * "hoplimit", only nodes closer by than "hoplimit" are part
1206                  * of each group. Skip other nodes.
1207                  */
1208                 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1209                                         dist > maxdist)
1210                         continue;
1211 
1212                 /* Add up the faults from nearby nodes. */
1213                 if (task)
1214                         faults = task_faults(p, node);
1215                 else
1216                         faults = group_faults(p, node);
1217 
1218                 /*
1219                  * On systems with a glueless mesh NUMA topology, there are
1220                  * no fixed "groups of nodes". Instead, nodes that are not
1221                  * directly connected bounce traffic through intermediate
1222                  * nodes; a numa_group can occupy any set of nodes.
1223                  * The further away a node is, the less the faults count.
1224                  * This seems to result in good task placement.
1225                  */
1226                 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1227                         faults *= (sched_max_numa_distance - dist);
1228                         faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1229                 }
1230 
1231                 score += faults;
1232         }
1233 
1234         return score;
1235 }
1236 
1237 /*
1238  * These return the fraction of accesses done by a particular task, or
1239  * task group, on a particular numa node.  The group weight is given a
1240  * larger multiplier, in order to group tasks together that are almost
1241  * evenly spread out between numa nodes.
1242  */
1243 static inline unsigned long task_weight(struct task_struct *p, int nid,
1244                                         int dist)
1245 {
1246         unsigned long faults, total_faults;
1247 
1248         if (!p->numa_faults)
1249                 return 0;
1250 
1251         total_faults = p->total_numa_faults;
1252 
1253         if (!total_faults)
1254                 return 0;
1255 
1256         faults = task_faults(p, nid);
1257         faults += score_nearby_nodes(p, nid, dist, true);
1258 
1259         return 1000 * faults / total_faults;
1260 }
1261 
1262 static inline unsigned long group_weight(struct task_struct *p, int nid,
1263                                          int dist)
1264 {
1265         unsigned long faults, total_faults;
1266 
1267         if (!p->numa_group)
1268                 return 0;
1269 
1270         total_faults = p->numa_group->total_faults;
1271 
1272         if (!total_faults)
1273                 return 0;
1274 
1275         faults = group_faults(p, nid);
1276         faults += score_nearby_nodes(p, nid, dist, false);
1277 
1278         return 1000 * faults / total_faults;
1279 }
1280 
1281 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1282                                 int src_nid, int dst_cpu)
1283 {
1284         struct numa_group *ng = p->numa_group;
1285         int dst_nid = cpu_to_node(dst_cpu);
1286         int last_cpupid, this_cpupid;
1287 
1288         this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1289 
1290         /*
1291          * Multi-stage node selection is used in conjunction with a periodic
1292          * migration fault to build a temporal task<->page relation. By using
1293          * a two-stage filter we remove short/unlikely relations.
1294          *
1295          * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1296          * a task's usage of a particular page (n_p) per total usage of this
1297          * page (n_t) (in a given time-span) to a probability.
1298          *
1299          * Our periodic faults will sample this probability and getting the
1300          * same result twice in a row, given these samples are fully
1301          * independent, is then given by P(n)^2, provided our sample period
1302          * is sufficiently short compared to the usage pattern.
1303          *
1304          * This quadric squishes small probabilities, making it less likely we
1305          * act on an unlikely task<->page relation.
1306          */
1307         last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1308         if (!cpupid_pid_unset(last_cpupid) &&
1309                                 cpupid_to_nid(last_cpupid) != dst_nid)
1310                 return false;
1311 
1312         /* Always allow migrate on private faults */
1313         if (cpupid_match_pid(p, last_cpupid))
1314                 return true;
1315 
1316         /* A shared fault, but p->numa_group has not been set up yet. */
1317         if (!ng)
1318                 return true;
1319 
1320         /*
1321          * Destination node is much more heavily used than the source
1322          * node? Allow migration.
1323          */
1324         if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1325                                         ACTIVE_NODE_FRACTION)
1326                 return true;
1327 
1328         /*
1329          * Distribute memory according to CPU & memory use on each node,
1330          * with 3/4 hysteresis to avoid unnecessary memory migrations:
1331          *
1332          * faults_cpu(dst)   3   faults_cpu(src)
1333          * --------------- * - > ---------------
1334          * faults_mem(dst)   4   faults_mem(src)
1335          */
1336         return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1337                group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
1338 }
1339 
1340 static unsigned long weighted_cpuload(const int cpu);
1341 static unsigned long source_load(int cpu, int type);
1342 static unsigned long target_load(int cpu, int type);
1343 static unsigned long capacity_of(int cpu);
1344 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1345 
1346 /* Cached statistics for all CPUs within a node */
1347 struct numa_stats {
1348         unsigned long nr_running;
1349         unsigned long load;
1350 
1351         /* Total compute capacity of CPUs on a node */
1352         unsigned long compute_capacity;
1353 
1354         /* Approximate capacity in terms of runnable tasks on a node */
1355         unsigned long task_capacity;
1356         int has_free_capacity;
1357 };
1358 
1359 /*
1360  * XXX borrowed from update_sg_lb_stats
1361  */
1362 static void update_numa_stats(struct numa_stats *ns, int nid)
1363 {
1364         int smt, cpu, cpus = 0;
1365         unsigned long capacity;
1366 
1367         memset(ns, 0, sizeof(*ns));
1368         for_each_cpu(cpu, cpumask_of_node(nid)) {
1369                 struct rq *rq = cpu_rq(cpu);
1370 
1371                 ns->nr_running += rq->nr_running;
1372                 ns->load += weighted_cpuload(cpu);
1373                 ns->compute_capacity += capacity_of(cpu);
1374 
1375                 cpus++;
1376         }
1377 
1378         /*
1379          * If we raced with hotplug and there are no CPUs left in our mask
1380          * the @ns structure is NULL'ed and task_numa_compare() will
1381          * not find this node attractive.
1382          *
1383          * We'll either bail at !has_free_capacity, or we'll detect a huge
1384          * imbalance and bail there.
1385          */
1386         if (!cpus)
1387                 return;
1388 
1389         /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1390         smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1391         capacity = cpus / smt; /* cores */
1392 
1393         ns->task_capacity = min_t(unsigned, capacity,
1394                 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1395         ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1396 }
1397 
1398 struct task_numa_env {
1399         struct task_struct *p;
1400 
1401         int src_cpu, src_nid;
1402         int dst_cpu, dst_nid;
1403 
1404         struct numa_stats src_stats, dst_stats;
1405 
1406         int imbalance_pct;
1407         int dist;
1408 
1409         struct task_struct *best_task;
1410         long best_imp;
1411         int best_cpu;
1412 };
1413 
1414 static void task_numa_assign(struct task_numa_env *env,
1415                              struct task_struct *p, long imp)
1416 {
1417         if (env->best_task)
1418                 put_task_struct(env->best_task);
1419         if (p)
1420                 get_task_struct(p);
1421 
1422         env->best_task = p;
1423         env->best_imp = imp;
1424         env->best_cpu = env->dst_cpu;
1425 }
1426 
1427 static bool load_too_imbalanced(long src_load, long dst_load,
1428                                 struct task_numa_env *env)
1429 {
1430         long imb, old_imb;
1431         long orig_src_load, orig_dst_load;
1432         long src_capacity, dst_capacity;
1433 
1434         /*
1435          * The load is corrected for the CPU capacity available on each node.
1436          *
1437          * src_load        dst_load
1438          * ------------ vs ---------
1439          * src_capacity    dst_capacity
1440          */
1441         src_capacity = env->src_stats.compute_capacity;
1442         dst_capacity = env->dst_stats.compute_capacity;
1443 
1444         /* We care about the slope of the imbalance, not the direction. */
1445         if (dst_load < src_load)
1446                 swap(dst_load, src_load);
1447 
1448         /* Is the difference below the threshold? */
1449         imb = dst_load * src_capacity * 100 -
1450               src_load * dst_capacity * env->imbalance_pct;
1451         if (imb <= 0)
1452                 return false;
1453 
1454         /*
1455          * The imbalance is above the allowed threshold.
1456          * Compare it with the old imbalance.
1457          */
1458         orig_src_load = env->src_stats.load;
1459         orig_dst_load = env->dst_stats.load;
1460 
1461         if (orig_dst_load < orig_src_load)
1462                 swap(orig_dst_load, orig_src_load);
1463 
1464         old_imb = orig_dst_load * src_capacity * 100 -
1465                   orig_src_load * dst_capacity * env->imbalance_pct;
1466 
1467         /* Would this change make things worse? */
1468         return (imb > old_imb);
1469 }
1470 
1471 /*
1472  * This checks if the overall compute and NUMA accesses of the system would
1473  * be improved if the source tasks was migrated to the target dst_cpu taking
1474  * into account that it might be best if task running on the dst_cpu should
1475  * be exchanged with the source task
1476  */
1477 static void task_numa_compare(struct task_numa_env *env,
1478                               long taskimp, long groupimp)
1479 {
1480         struct rq *src_rq = cpu_rq(env->src_cpu);
1481         struct rq *dst_rq = cpu_rq(env->dst_cpu);
1482         struct task_struct *cur;
1483         long src_load, dst_load;
1484         long load;
1485         long imp = env->p->numa_group ? groupimp : taskimp;
1486         long moveimp = imp;
1487         int dist = env->dist;
1488 
1489         rcu_read_lock();
1490         cur = task_rcu_dereference(&dst_rq->curr);
1491         if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
1492                 cur = NULL;
1493 
1494         /*
1495          * Because we have preemption enabled we can get migrated around and
1496          * end try selecting ourselves (current == env->p) as a swap candidate.
1497          */
1498         if (cur == env->p)
1499                 goto unlock;
1500 
1501         /*
1502          * "imp" is the fault differential for the source task between the
1503          * source and destination node. Calculate the total differential for
1504          * the source task and potential destination task. The more negative
1505          * the value is, the more rmeote accesses that would be expected to
1506          * be incurred if the tasks were swapped.
1507          */
1508         if (cur) {
1509                 /* Skip this swap candidate if cannot move to the source cpu */
1510                 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1511                         goto unlock;
1512 
1513                 /*
1514                  * If dst and source tasks are in the same NUMA group, or not
1515                  * in any group then look only at task weights.
1516                  */
1517                 if (cur->numa_group == env->p->numa_group) {
1518                         imp = taskimp + task_weight(cur, env->src_nid, dist) -
1519                               task_weight(cur, env->dst_nid, dist);
1520                         /*
1521                          * Add some hysteresis to prevent swapping the
1522                          * tasks within a group over tiny differences.
1523                          */
1524                         if (cur->numa_group)
1525                                 imp -= imp/16;
1526                 } else {
1527                         /*
1528                          * Compare the group weights. If a task is all by
1529                          * itself (not part of a group), use the task weight
1530                          * instead.
1531                          */
1532                         if (cur->numa_group)
1533                                 imp += group_weight(cur, env->src_nid, dist) -
1534                                        group_weight(cur, env->dst_nid, dist);
1535                         else
1536                                 imp += task_weight(cur, env->src_nid, dist) -
1537                                        task_weight(cur, env->dst_nid, dist);
1538                 }
1539         }
1540 
1541         if (imp <= env->best_imp && moveimp <= env->best_imp)
1542                 goto unlock;
1543 
1544         if (!cur) {
1545                 /* Is there capacity at our destination? */
1546                 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1547                     !env->dst_stats.has_free_capacity)
1548                         goto unlock;
1549 
1550                 goto balance;
1551         }
1552 
1553         /* Balance doesn't matter much if we're running a task per cpu */
1554         if (imp > env->best_imp && src_rq->nr_running == 1 &&
1555                         dst_rq->nr_running == 1)
1556                 goto assign;
1557 
1558         /*
1559          * In the overloaded case, try and keep the load balanced.
1560          */
1561 balance:
1562         load = task_h_load(env->p);
1563         dst_load = env->dst_stats.load + load;
1564         src_load = env->src_stats.load - load;
1565 
1566         if (moveimp > imp && moveimp > env->best_imp) {
1567                 /*
1568                  * If the improvement from just moving env->p direction is
1569                  * better than swapping tasks around, check if a move is
1570                  * possible. Store a slightly smaller score than moveimp,
1571                  * so an actually idle CPU will win.
1572                  */
1573                 if (!load_too_imbalanced(src_load, dst_load, env)) {
1574                         imp = moveimp - 1;
1575                         cur = NULL;
1576                         goto assign;
1577                 }
1578         }
1579 
1580         if (imp <= env->best_imp)
1581                 goto unlock;
1582 
1583         if (cur) {
1584                 load = task_h_load(cur);
1585                 dst_load -= load;
1586                 src_load += load;
1587         }
1588 
1589         if (load_too_imbalanced(src_load, dst_load, env))
1590                 goto unlock;
1591 
1592         /*
1593          * One idle CPU per node is evaluated for a task numa move.
1594          * Call select_idle_sibling to maybe find a better one.
1595          */
1596         if (!cur) {
1597                 /*
1598                  * select_idle_siblings() uses an per-cpu cpumask that
1599                  * can be used from IRQ context.
1600                  */
1601                 local_irq_disable();
1602                 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
1603                                                    env->dst_cpu);
1604                 local_irq_enable();
1605         }
1606 
1607 assign:
1608         task_numa_assign(env, cur, imp);
1609 unlock:
1610         rcu_read_unlock();
1611 }
1612 
1613 static void task_numa_find_cpu(struct task_numa_env *env,
1614                                 long taskimp, long groupimp)
1615 {
1616         int cpu;
1617 
1618         for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1619                 /* Skip this CPU if the source task cannot migrate */
1620                 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1621                         continue;
1622 
1623                 env->dst_cpu = cpu;
1624                 task_numa_compare(env, taskimp, groupimp);
1625         }
1626 }
1627 
1628 /* Only move tasks to a NUMA node less busy than the current node. */
1629 static bool numa_has_capacity(struct task_numa_env *env)
1630 {
1631         struct numa_stats *src = &env->src_stats;
1632         struct numa_stats *dst = &env->dst_stats;
1633 
1634         if (src->has_free_capacity && !dst->has_free_capacity)
1635                 return false;
1636 
1637         /*
1638          * Only consider a task move if the source has a higher load
1639          * than the destination, corrected for CPU capacity on each node.
1640          *
1641          *      src->load                dst->load
1642          * --------------------- vs ---------------------
1643          * src->compute_capacity    dst->compute_capacity
1644          */
1645         if (src->load * dst->compute_capacity * env->imbalance_pct >
1646 
1647             dst->load * src->compute_capacity * 100)
1648                 return true;
1649 
1650         return false;
1651 }
1652 
1653 static int task_numa_migrate(struct task_struct *p)
1654 {
1655         struct task_numa_env env = {
1656                 .p = p,
1657 
1658                 .src_cpu = task_cpu(p),
1659                 .src_nid = task_node(p),
1660 
1661                 .imbalance_pct = 112,
1662 
1663                 .best_task = NULL,
1664                 .best_imp = 0,
1665                 .best_cpu = -1,
1666         };
1667         struct sched_domain *sd;
1668         unsigned long taskweight, groupweight;
1669         int nid, ret, dist;
1670         long taskimp, groupimp;
1671 
1672         /*
1673          * Pick the lowest SD_NUMA domain, as that would have the smallest
1674          * imbalance and would be the first to start moving tasks about.
1675          *
1676          * And we want to avoid any moving of tasks about, as that would create
1677          * random movement of tasks -- counter the numa conditions we're trying
1678          * to satisfy here.
1679          */
1680         rcu_read_lock();
1681         sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1682         if (sd)
1683                 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
1684         rcu_read_unlock();
1685 
1686         /*
1687          * Cpusets can break the scheduler domain tree into smaller
1688          * balance domains, some of which do not cross NUMA boundaries.
1689          * Tasks that are "trapped" in such domains cannot be migrated
1690          * elsewhere, so there is no point in (re)trying.
1691          */
1692         if (unlikely(!sd)) {
1693                 p->numa_preferred_nid = task_node(p);
1694                 return -EINVAL;
1695         }
1696 
1697         env.dst_nid = p->numa_preferred_nid;
1698         dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1699         taskweight = task_weight(p, env.src_nid, dist);
1700         groupweight = group_weight(p, env.src_nid, dist);
1701         update_numa_stats(&env.src_stats, env.src_nid);
1702         taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1703         groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
1704         update_numa_stats(&env.dst_stats, env.dst_nid);
1705 
1706         /* Try to find a spot on the preferred nid. */
1707         if (numa_has_capacity(&env))
1708                 task_numa_find_cpu(&env, taskimp, groupimp);
1709 
1710         /*
1711          * Look at other nodes in these cases:
1712          * - there is no space available on the preferred_nid
1713          * - the task is part of a numa_group that is interleaved across
1714          *   multiple NUMA nodes; in order to better consolidate the group,
1715          *   we need to check other locations.
1716          */
1717         if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
1718                 for_each_online_node(nid) {
1719                         if (nid == env.src_nid || nid == p->numa_preferred_nid)
1720                                 continue;
1721 
1722                         dist = node_distance(env.src_nid, env.dst_nid);
1723                         if (sched_numa_topology_type == NUMA_BACKPLANE &&
1724                                                 dist != env.dist) {
1725                                 taskweight = task_weight(p, env.src_nid, dist);
1726                                 groupweight = group_weight(p, env.src_nid, dist);
1727                         }
1728 
1729                         /* Only consider nodes where both task and groups benefit */
1730                         taskimp = task_weight(p, nid, dist) - taskweight;
1731                         groupimp = group_weight(p, nid, dist) - groupweight;
1732                         if (taskimp < 0 && groupimp < 0)
1733                                 continue;
1734 
1735                         env.dist = dist;
1736                         env.dst_nid = nid;
1737                         update_numa_stats(&env.dst_stats, env.dst_nid);
1738                         if (numa_has_capacity(&env))
1739                                 task_numa_find_cpu(&env, taskimp, groupimp);
1740                 }
1741         }
1742 
1743         /*
1744          * If the task is part of a workload that spans multiple NUMA nodes,
1745          * and is migrating into one of the workload's active nodes, remember
1746          * this node as the task's preferred numa node, so the workload can
1747          * settle down.
1748          * A task that migrated to a second choice node will be better off
1749          * trying for a better one later. Do not set the preferred node here.
1750          */
1751         if (p->numa_group) {
1752                 struct numa_group *ng = p->numa_group;
1753 
1754                 if (env.best_cpu == -1)
1755                         nid = env.src_nid;
1756                 else
1757                         nid = env.dst_nid;
1758 
1759                 if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
1760                         sched_setnuma(p, env.dst_nid);
1761         }
1762 
1763         /* No better CPU than the current one was found. */
1764         if (env.best_cpu == -1)
1765                 return -EAGAIN;
1766 
1767         /*
1768          * Reset the scan period if the task is being rescheduled on an
1769          * alternative node to recheck if the tasks is now properly placed.
1770          */
1771         p->numa_scan_period = task_scan_min(p);
1772 
1773         if (env.best_task == NULL) {
1774                 ret = migrate_task_to(p, env.best_cpu);
1775                 if (ret != 0)
1776                         trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1777                 return ret;
1778         }
1779 
1780         ret = migrate_swap(p, env.best_task);
1781         if (ret != 0)
1782                 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1783         put_task_struct(env.best_task);
1784         return ret;
1785 }
1786 
1787 /* Attempt to migrate a task to a CPU on the preferred node. */
1788 static void numa_migrate_preferred(struct task_struct *p)
1789 {
1790         unsigned long interval = HZ;
1791 
1792         /* This task has no NUMA fault statistics yet */
1793         if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1794                 return;
1795 
1796         /* Periodically retry migrating the task to the preferred node */
1797         interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1798         p->numa_migrate_retry = jiffies + interval;
1799 
1800         /* Success if task is already running on preferred CPU */
1801         if (task_node(p) == p->numa_preferred_nid)
1802                 return;
1803 
1804         /* Otherwise, try migrate to a CPU on the preferred node */
1805         task_numa_migrate(p);
1806 }
1807 
1808 /*
1809  * Find out how many nodes on the workload is actively running on. Do this by
1810  * tracking the nodes from which NUMA hinting faults are triggered. This can
1811  * be different from the set of nodes where the workload's memory is currently
1812  * located.
1813  */
1814 static void numa_group_count_active_nodes(struct numa_group *numa_group)
1815 {
1816         unsigned long faults, max_faults = 0;
1817         int nid, active_nodes = 0;
1818 
1819         for_each_online_node(nid) {
1820                 faults = group_faults_cpu(numa_group, nid);
1821                 if (faults > max_faults)
1822                         max_faults = faults;
1823         }
1824 
1825         for_each_online_node(nid) {
1826                 faults = group_faults_cpu(numa_group, nid);
1827                 if (faults * ACTIVE_NODE_FRACTION > max_faults)
1828                         active_nodes++;
1829         }
1830 
1831         numa_group->max_faults_cpu = max_faults;
1832         numa_group->active_nodes = active_nodes;
1833 }
1834 
1835 /*
1836  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1837  * increments. The more local the fault statistics are, the higher the scan
1838  * period will be for the next scan window. If local/(local+remote) ratio is
1839  * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1840  * the scan period will decrease. Aim for 70% local accesses.
1841  */
1842 #define NUMA_PERIOD_SLOTS 10
1843 #define NUMA_PERIOD_THRESHOLD 7
1844 
1845 /*
1846  * Increase the scan period (slow down scanning) if the majority of
1847  * our memory is already on our local node, or if the majority of
1848  * the page accesses are shared with other processes.
1849  * Otherwise, decrease the scan period.
1850  */
1851 static void update_task_scan_period(struct task_struct *p,
1852                         unsigned long shared, unsigned long private)
1853 {
1854         unsigned int period_slot;
1855         int ratio;
1856         int diff;
1857 
1858         unsigned long remote = p->numa_faults_locality[0];
1859         unsigned long local = p->numa_faults_locality[1];
1860 
1861         /*
1862          * If there were no record hinting faults then either the task is
1863          * completely idle or all activity is areas that are not of interest
1864          * to automatic numa balancing. Related to that, if there were failed
1865          * migration then it implies we are migrating too quickly or the local
1866          * node is overloaded. In either case, scan slower
1867          */
1868         if (local + shared == 0 || p->numa_faults_locality[2]) {
1869                 p->numa_scan_period = min(p->numa_scan_period_max,
1870                         p->numa_scan_period << 1);
1871 
1872                 p->mm->numa_next_scan = jiffies +
1873                         msecs_to_jiffies(p->numa_scan_period);
1874 
1875                 return;
1876         }
1877 
1878         /*
1879          * Prepare to scale scan period relative to the current period.
1880          *       == NUMA_PERIOD_THRESHOLD scan period stays the same
1881          *       <  NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1882          *       >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1883          */
1884         period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1885         ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1886         if (ratio >= NUMA_PERIOD_THRESHOLD) {
1887                 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1888                 if (!slot)
1889                         slot = 1;
1890                 diff = slot * period_slot;
1891         } else {
1892                 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1893 
1894                 /*
1895                  * Scale scan rate increases based on sharing. There is an
1896                  * inverse relationship between the degree of sharing and
1897                  * the adjustment made to the scanning period. Broadly
1898                  * speaking the intent is that there is little point
1899                  * scanning faster if shared accesses dominate as it may
1900                  * simply bounce migrations uselessly
1901                  */
1902                 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1903                 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1904         }
1905 
1906         p->numa_scan_period = clamp(p->numa_scan_period + diff,
1907                         task_scan_min(p), task_scan_max(p));
1908         memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1909 }
1910 
1911 /*
1912  * Get the fraction of time the task has been running since the last
1913  * NUMA placement cycle. The scheduler keeps similar statistics, but
1914  * decays those on a 32ms period, which is orders of magnitude off
1915  * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1916  * stats only if the task is so new there are no NUMA statistics yet.
1917  */
1918 static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1919 {
1920         u64 runtime, delta, now;
1921         /* Use the start of this time slice to avoid calculations. */
1922         now = p->se.exec_start;
1923         runtime = p->se.sum_exec_runtime;
1924 
1925         if (p->last_task_numa_placement) {
1926                 delta = runtime - p->last_sum_exec_runtime;
1927                 *period = now - p->last_task_numa_placement;
1928         } else {
1929                 delta = p->se.avg.load_sum / p->se.load.weight;
1930                 *period = LOAD_AVG_MAX;
1931         }
1932 
1933         p->last_sum_exec_runtime = runtime;
1934         p->last_task_numa_placement = now;
1935 
1936         return delta;
1937 }
1938 
1939 /*
1940  * Determine the preferred nid for a task in a numa_group. This needs to
1941  * be done in a way that produces consistent results with group_weight,
1942  * otherwise workloads might not converge.
1943  */
1944 static int preferred_group_nid(struct task_struct *p, int nid)
1945 {
1946         nodemask_t nodes;
1947         int dist;
1948 
1949         /* Direct connections between all NUMA nodes. */
1950         if (sched_numa_topology_type == NUMA_DIRECT)
1951                 return nid;
1952 
1953         /*
1954          * On a system with glueless mesh NUMA topology, group_weight
1955          * scores nodes according to the number of NUMA hinting faults on
1956          * both the node itself, and on nearby nodes.
1957          */
1958         if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1959                 unsigned long score, max_score = 0;
1960                 int node, max_node = nid;
1961 
1962                 dist = sched_max_numa_distance;
1963 
1964                 for_each_online_node(node) {
1965                         score = group_weight(p, node, dist);
1966                         if (score > max_score) {
1967                                 max_score = score;
1968                                 max_node = node;
1969                         }
1970                 }
1971                 return max_node;
1972         }
1973 
1974         /*
1975          * Finding the preferred nid in a system with NUMA backplane
1976          * interconnect topology is more involved. The goal is to locate
1977          * tasks from numa_groups near each other in the system, and
1978          * untangle workloads from different sides of the system. This requires
1979          * searching down the hierarchy of node groups, recursively searching
1980          * inside the highest scoring group of nodes. The nodemask tricks
1981          * keep the complexity of the search down.
1982          */
1983         nodes = node_online_map;
1984         for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1985                 unsigned long max_faults = 0;
1986                 nodemask_t max_group = NODE_MASK_NONE;
1987                 int a, b;
1988 
1989                 /* Are there nodes at this distance from each other? */
1990                 if (!find_numa_distance(dist))
1991                         continue;
1992 
1993                 for_each_node_mask(a, nodes) {
1994                         unsigned long faults = 0;
1995                         nodemask_t this_group;
1996                         nodes_clear(this_group);
1997 
1998                         /* Sum group's NUMA faults; includes a==b case. */
1999                         for_each_node_mask(b, nodes) {
2000                                 if (node_distance(a, b) < dist) {
2001                                         faults += group_faults(p, b);
2002                                         node_set(b, this_group);
2003                                         node_clear(b, nodes);
2004                                 }
2005                         }
2006 
2007                         /* Remember the top group. */
2008                         if (faults > max_faults) {
2009                                 max_faults = faults;
2010                                 max_group = this_group;
2011                                 /*
2012                                  * subtle: at the smallest distance there is
2013                                  * just one node left in each "group", the
2014                                  * winner is the preferred nid.
2015                                  */
2016                                 nid = a;
2017                         }
2018                 }
2019                 /* Next round, evaluate the nodes within max_group. */
2020                 if (!max_faults)
2021                         break;
2022                 nodes = max_group;
2023         }
2024         return nid;
2025 }
2026 
2027 static void task_numa_placement(struct task_struct *p)
2028 {
2029         int seq, nid, max_nid = -1, max_group_nid = -1;
2030         unsigned long max_faults = 0, max_group_faults = 0;
2031         unsigned long fault_types[2] = { 0, 0 };
2032         unsigned long total_faults;
2033         u64 runtime, period;
2034         spinlock_t *group_lock = NULL;
2035 
2036         /*
2037          * The p->mm->numa_scan_seq field gets updated without
2038          * exclusive access. Use READ_ONCE() here to ensure
2039          * that the field is read in a single access:
2040          */
2041         seq = READ_ONCE(p->mm->numa_scan_seq);
2042         if (p->numa_scan_seq == seq)
2043                 return;
2044         p->numa_scan_seq = seq;
2045         p->numa_scan_period_max = task_scan_max(p);
2046 
2047         total_faults = p->numa_faults_locality[0] +
2048                        p->numa_faults_locality[1];
2049         runtime = numa_get_avg_runtime(p, &period);
2050 
2051         /* If the task is part of a group prevent parallel updates to group stats */
2052         if (p->numa_group) {
2053                 group_lock = &p->numa_group->lock;
2054                 spin_lock_irq(group_lock);
2055         }
2056 
2057         /* Find the node with the highest number of faults */
2058         for_each_online_node(nid) {
2059                 /* Keep track of the offsets in numa_faults array */
2060                 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
2061                 unsigned long faults = 0, group_faults = 0;
2062                 int priv;
2063 
2064                 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
2065                         long diff, f_diff, f_weight;
2066 
2067                         mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2068                         membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2069                         cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2070                         cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
2071 
2072                         /* Decay existing window, copy faults since last scan */
2073                         diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2074                         fault_types[priv] += p->numa_faults[membuf_idx];
2075                         p->numa_faults[membuf_idx] = 0;
2076 
2077                         /*
2078                          * Normalize the faults_from, so all tasks in a group
2079                          * count according to CPU use, instead of by the raw
2080                          * number of faults. Tasks with little runtime have
2081                          * little over-all impact on throughput, and thus their
2082                          * faults are less important.
2083                          */
2084                         f_weight = div64_u64(runtime << 16, period + 1);
2085                         f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
2086                                    (total_faults + 1);
2087                         f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2088                         p->numa_faults[cpubuf_idx] = 0;
2089 
2090                         p->numa_faults[mem_idx] += diff;
2091                         p->numa_faults[cpu_idx] += f_diff;
2092                         faults += p->numa_faults[mem_idx];
2093                         p->total_numa_faults += diff;
2094                         if (p->numa_group) {
2095                                 /*
2096                                  * safe because we can only change our own group
2097                                  *
2098                                  * mem_idx represents the offset for a given
2099                                  * nid and priv in a specific region because it
2100                                  * is at the beginning of the numa_faults array.
2101                                  */
2102                                 p->numa_group->faults[mem_idx] += diff;
2103                                 p->numa_group->faults_cpu[mem_idx] += f_diff;
2104                                 p->numa_group->total_faults += diff;
2105                                 group_faults += p->numa_group->faults[mem_idx];
2106                         }
2107                 }
2108 
2109                 if (faults > max_faults) {
2110                         max_faults = faults;
2111                         max_nid = nid;
2112                 }
2113 
2114                 if (group_faults > max_group_faults) {
2115                         max_group_faults = group_faults;
2116                         max_group_nid = nid;
2117                 }
2118         }
2119 
2120         update_task_scan_period(p, fault_types[0], fault_types[1]);
2121 
2122         if (p->numa_group) {
2123                 numa_group_count_active_nodes(p->numa_group);
2124                 spin_unlock_irq(group_lock);
2125                 max_nid = preferred_group_nid(p, max_group_nid);
2126         }
2127 
2128         if (max_faults) {
2129                 /* Set the new preferred node */
2130                 if (max_nid != p->numa_preferred_nid)
2131                         sched_setnuma(p, max_nid);
2132 
2133                 if (task_node(p) != p->numa_preferred_nid)
2134                         numa_migrate_preferred(p);
2135         }
2136 }
2137 
2138 static inline int get_numa_group(struct numa_group *grp)
2139 {
2140         return atomic_inc_not_zero(&grp->refcount);
2141 }
2142 
2143 static inline void put_numa_group(struct numa_group *grp)
2144 {
2145         if (atomic_dec_and_test(&grp->refcount))
2146                 kfree_rcu(grp, rcu);
2147 }
2148 
2149 static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2150                         int *priv)
2151 {
2152         struct numa_group *grp, *my_grp;
2153         struct task_struct *tsk;
2154         bool join = false;
2155         int cpu = cpupid_to_cpu(cpupid);
2156         int i;
2157 
2158         if (unlikely(!p->numa_group)) {
2159                 unsigned int size = sizeof(struct numa_group) +
2160                                     4*nr_node_ids*sizeof(unsigned long);
2161 
2162                 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2163                 if (!grp)
2164                         return;
2165 
2166                 atomic_set(&grp->refcount, 1);
2167                 grp->active_nodes = 1;
2168                 grp->max_faults_cpu = 0;
2169                 spin_lock_init(&grp->lock);
2170                 grp->gid = p->pid;
2171                 /* Second half of the array tracks nids where faults happen */
2172                 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2173                                                 nr_node_ids;
2174 
2175                 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2176                         grp->faults[i] = p->numa_faults[i];
2177 
2178                 grp->total_faults = p->total_numa_faults;
2179 
2180                 grp->nr_tasks++;
2181                 rcu_assign_pointer(p->numa_group, grp);
2182         }
2183 
2184         rcu_read_lock();
2185         tsk = READ_ONCE(cpu_rq(cpu)->curr);
2186 
2187         if (!cpupid_match_pid(tsk, cpupid))
2188                 goto no_join;
2189 
2190         grp = rcu_dereference(tsk->numa_group);
2191         if (!grp)
2192                 goto no_join;
2193 
2194         my_grp = p->numa_group;
2195         if (grp == my_grp)
2196                 goto no_join;
2197 
2198         /*
2199          * Only join the other group if its bigger; if we're the bigger group,
2200          * the other task will join us.
2201          */
2202         if (my_grp->nr_tasks > grp->nr_tasks)
2203                 goto no_join;
2204 
2205         /*
2206          * Tie-break on the grp address.
2207          */
2208         if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
2209                 goto no_join;
2210 
2211         /* Always join threads in the same process. */
2212         if (tsk->mm == current->mm)
2213                 join = true;
2214 
2215         /* Simple filter to avoid false positives due to PID collisions */
2216         if (flags & TNF_SHARED)
2217                 join = true;
2218 
2219         /* Update priv based on whether false sharing was detected */
2220         *priv = !join;
2221 
2222         if (join && !get_numa_group(grp))
2223                 goto no_join;
2224 
2225         rcu_read_unlock();
2226 
2227         if (!join)
2228                 return;
2229 
2230         BUG_ON(irqs_disabled());
2231         double_lock_irq(&my_grp->lock, &grp->lock);
2232 
2233         for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
2234                 my_grp->faults[i] -= p->numa_faults[i];
2235                 grp->faults[i] += p->numa_faults[i];
2236         }
2237         my_grp->total_faults -= p->total_numa_faults;
2238         grp->total_faults += p->total_numa_faults;
2239 
2240         my_grp->nr_tasks--;
2241         grp->nr_tasks++;
2242 
2243         spin_unlock(&my_grp->lock);
2244         spin_unlock_irq(&grp->lock);
2245 
2246         rcu_assign_pointer(p->numa_group, grp);
2247 
2248         put_numa_group(my_grp);
2249         return;
2250 
2251 no_join:
2252         rcu_read_unlock();
2253         return;
2254 }
2255 
2256 void task_numa_free(struct task_struct *p)
2257 {
2258         struct numa_group *grp = p->numa_group;
2259         void *numa_faults = p->numa_faults;
2260         unsigned long flags;
2261         int i;
2262 
2263         if (grp) {
2264                 spin_lock_irqsave(&grp->lock, flags);
2265                 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
2266                         grp->faults[i] -= p->numa_faults[i];
2267                 grp->total_faults -= p->total_numa_faults;
2268 
2269                 grp->nr_tasks--;
2270                 spin_unlock_irqrestore(&grp->lock, flags);
2271                 RCU_INIT_POINTER(p->numa_group, NULL);
2272                 put_numa_group(grp);
2273         }
2274 
2275         p->numa_faults = NULL;
2276         kfree(numa_faults);
2277 }
2278 
2279 /*
2280  * Got a PROT_NONE fault for a page on @node.
2281  */
2282 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
2283 {
2284         struct task_struct *p = current;
2285         bool migrated = flags & TNF_MIGRATED;
2286         int cpu_node = task_node(current);
2287         int local = !!(flags & TNF_FAULT_LOCAL);
2288         struct numa_group *ng;
2289         int priv;
2290 
2291         if (!static_branch_likely(&sched_numa_balancing))
2292                 return;
2293 
2294         /* for example, ksmd faulting in a user's mm */
2295         if (!p->mm)
2296                 return;
2297 
2298         /* Allocate buffer to track faults on a per-node basis */
2299         if (unlikely(!p->numa_faults)) {
2300                 int size = sizeof(*p->numa_faults) *
2301                            NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
2302 
2303                 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2304                 if (!p->numa_faults)
2305                         return;
2306 
2307                 p->total_numa_faults = 0;
2308                 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
2309         }
2310 
2311         /*
2312          * First accesses are treated as private, otherwise consider accesses
2313          * to be private if the accessing pid has not changed
2314          */
2315         if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2316                 priv = 1;
2317         } else {
2318                 priv = cpupid_match_pid(p, last_cpupid);
2319                 if (!priv && !(flags & TNF_NO_GROUP))
2320                         task_numa_group(p, last_cpupid, flags, &priv);
2321         }
2322 
2323         /*
2324          * If a workload spans multiple NUMA nodes, a shared fault that
2325          * occurs wholly within the set of nodes that the workload is
2326          * actively using should be counted as local. This allows the
2327          * scan rate to slow down when a workload has settled down.
2328          */
2329         ng = p->numa_group;
2330         if (!priv && !local && ng && ng->active_nodes > 1 &&
2331                                 numa_is_active_node(cpu_node, ng) &&
2332                                 numa_is_active_node(mem_node, ng))
2333                 local = 1;
2334 
2335         task_numa_placement(p);
2336 
2337         /*
2338          * Retry task to preferred node migration periodically, in case it
2339          * case it previously failed, or the scheduler moved us.
2340          */
2341         if (time_after(jiffies, p->numa_migrate_retry))
2342                 numa_migrate_preferred(p);
2343 
2344         if (migrated)
2345                 p->numa_pages_migrated += pages;
2346         if (flags & TNF_MIGRATE_FAIL)
2347                 p->numa_faults_locality[2] += pages;
2348 
2349         p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2350         p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
2351         p->numa_faults_locality[local] += pages;
2352 }
2353 
2354 static void reset_ptenuma_scan(struct task_struct *p)
2355 {
2356         /*
2357          * We only did a read acquisition of the mmap sem, so
2358          * p->mm->numa_scan_seq is written to without exclusive access
2359          * and the update is not guaranteed to be atomic. That's not
2360          * much of an issue though, since this is just used for
2361          * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2362          * expensive, to avoid any form of compiler optimizations:
2363          */
2364         WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
2365         p->mm->numa_scan_offset = 0;
2366 }
2367 
2368 /*
2369  * The expensive part of numa migration is done from task_work context.
2370  * Triggered from task_tick_numa().
2371  */
2372 void task_numa_work(struct callback_head *work)
2373 {
2374         unsigned long migrate, next_scan, now = jiffies;
2375         struct task_struct *p = current;
2376         struct mm_struct *mm = p->mm;
2377         u64 runtime = p->se.sum_exec_runtime;
2378         struct vm_area_struct *vma;
2379         unsigned long start, end;
2380         unsigned long nr_pte_updates = 0;
2381         long pages, virtpages;
2382 
2383         SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
2384 
2385         work->next = work; /* protect against double add */
2386         /*
2387          * Who cares about NUMA placement when they're dying.
2388          *
2389          * NOTE: make sure not to dereference p->mm before this check,
2390          * exit_task_work() happens _after_ exit_mm() so we could be called
2391          * without p->mm even though we still had it when we enqueued this
2392          * work.
2393          */
2394         if (p->flags & PF_EXITING)
2395                 return;
2396 
2397         if (!mm->numa_next_scan) {
2398                 mm->numa_next_scan = now +
2399                         msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2400         }
2401 
2402         /*
2403          * Enforce maximal scan/migration frequency..
2404          */
2405         migrate = mm->numa_next_scan;
2406         if (time_before(now, migrate))
2407                 return;
2408 
2409         if (p->numa_scan_period == 0) {
2410                 p->numa_scan_period_max = task_scan_max(p);
2411                 p->numa_scan_period = task_scan_min(p);
2412         }
2413 
2414         next_scan = now + msecs_to_jiffies(p->numa_scan_period);
2415         if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2416                 return;
2417 
2418         /*
2419          * Delay this task enough that another task of this mm will likely win
2420          * the next time around.
2421          */
2422         p->node_stamp += 2 * TICK_NSEC;
2423 
2424         start = mm->numa_scan_offset;
2425         pages = sysctl_numa_balancing_scan_size;
2426         pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2427         virtpages = pages * 8;     /* Scan up to this much virtual space */
2428         if (!pages)
2429                 return;
2430 
2431 
2432         down_read(&mm->mmap_sem);
2433         vma = find_vma(mm, start);
2434         if (!vma) {
2435                 reset_ptenuma_scan(p);
2436                 start = 0;
2437                 vma = mm->mmap;
2438         }
2439         for (; vma; vma = vma->vm_next) {
2440                 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
2441                         is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
2442                         continue;
2443                 }
2444 
2445                 /*
2446                  * Shared library pages mapped by multiple processes are not
2447                  * migrated as it is expected they are cache replicated. Avoid
2448                  * hinting faults in read-only file-backed mappings or the vdso
2449                  * as migrating the pages will be of marginal benefit.
2450                  */
2451                 if (!vma->vm_mm ||
2452                     (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2453                         continue;
2454 
2455                 /*
2456                  * Skip inaccessible VMAs to avoid any confusion between
2457                  * PROT_NONE and NUMA hinting ptes
2458                  */
2459                 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2460                         continue;
2461 
2462                 do {
2463                         start = max(start, vma->vm_start);
2464                         end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2465                         end = min(end, vma->vm_end);
2466                         nr_pte_updates = change_prot_numa(vma, start, end);
2467 
2468                         /*
2469                          * Try to scan sysctl_numa_balancing_size worth of
2470                          * hpages that have at least one present PTE that
2471                          * is not already pte-numa. If the VMA contains
2472                          * areas that are unused or already full of prot_numa
2473                          * PTEs, scan up to virtpages, to skip through those
2474                          * areas faster.
2475                          */
2476                         if (nr_pte_updates)
2477                                 pages -= (end - start) >> PAGE_SHIFT;
2478                         virtpages -= (end - start) >> PAGE_SHIFT;
2479 
2480                         start = end;
2481                         if (pages <= 0 || virtpages <= 0)
2482                                 goto out;
2483 
2484                         cond_resched();
2485                 } while (end != vma->vm_end);
2486         }
2487 
2488 out:
2489         /*
2490          * It is possible to reach the end of the VMA list but the last few
2491          * VMAs are not guaranteed to the vma_migratable. If they are not, we
2492          * would find the !migratable VMA on the next scan but not reset the
2493          * scanner to the start so check it now.
2494          */
2495         if (vma)
2496                 mm->numa_scan_offset = start;
2497         else
2498                 reset_ptenuma_scan(p);
2499         up_read(&mm->mmap_sem);
2500 
2501         /*
2502          * Make sure tasks use at least 32x as much time to run other code
2503          * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2504          * Usually update_task_scan_period slows down scanning enough; on an
2505          * overloaded system we need to limit overhead on a per task basis.
2506          */
2507         if (unlikely(p->se.sum_exec_runtime != runtime)) {
2508                 u64 diff = p->se.sum_exec_runtime - runtime;
2509                 p->node_stamp += 32 * diff;
2510         }
2511 }
2512 
2513 /*
2514  * Drive the periodic memory faults..
2515  */
2516 void task_tick_numa(struct rq *rq, struct task_struct *curr)
2517 {
2518         struct callback_head *work = &curr->numa_work;
2519         u64 period, now;
2520 
2521         /*
2522          * We don't care about NUMA placement if we don't have memory.
2523          */
2524         if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2525                 return;
2526 
2527         /*
2528          * Using runtime rather than walltime has the dual advantage that
2529          * we (mostly) drive the selection from busy threads and that the
2530          * task needs to have done some actual work before we bother with
2531          * NUMA placement.
2532          */
2533         now = curr->se.sum_exec_runtime;
2534         period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2535 
2536         if (now > curr->node_stamp + period) {
2537                 if (!curr->node_stamp)
2538                         curr->numa_scan_period = task_scan_min(curr);
2539                 curr->node_stamp += period;
2540 
2541                 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2542                         init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2543                         task_work_add(curr, work, true);
2544                 }
2545         }
2546 }
2547 #else
2548 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2549 {
2550 }
2551 
2552 static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2553 {
2554 }
2555 
2556 static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2557 {
2558 }
2559 #endif /* CONFIG_NUMA_BALANCING */
2560 
2561 static void
2562 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2563 {
2564         update_load_add(&cfs_rq->load, se->load.weight);
2565         if (!parent_entity(se))
2566                 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
2567 #ifdef CONFIG_SMP
2568         if (entity_is_task(se)) {
2569                 struct rq *rq = rq_of(cfs_rq);
2570 
2571                 account_numa_enqueue(rq, task_of(se));
2572                 list_add(&se->group_node, &rq->cfs_tasks);
2573         }
2574 #endif
2575         cfs_rq->nr_running++;
2576 }
2577 
2578 static void
2579 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2580 {
2581         update_load_sub(&cfs_rq->load, se->load.weight);
2582         if (!parent_entity(se))
2583                 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
2584 #ifdef CONFIG_SMP
2585         if (entity_is_task(se)) {
2586                 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
2587                 list_del_init(&se->group_node);
2588         }
2589 #endif
2590         cfs_rq->nr_running--;
2591 }
2592 
2593 #ifdef CONFIG_FAIR_GROUP_SCHED
2594 # ifdef CONFIG_SMP
2595 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2596 {
2597         long tg_weight, load, shares;
2598 
2599         /*
2600          * This really should be: cfs_rq->avg.load_avg, but instead we use
2601          * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2602          * the shares for small weight interactive tasks.
2603          */
2604         load = scale_load_down(cfs_rq->load.weight);
2605 
2606         tg_weight = atomic_long_read(&tg->load_avg);
2607 
2608         /* Ensure tg_weight >= load */
2609         tg_weight -= cfs_rq->tg_load_avg_contrib;
2610         tg_weight += load;
2611 
2612         shares = (tg->shares * load);
2613         if (tg_weight)
2614                 shares /= tg_weight;
2615 
2616         if (shares < MIN_SHARES)
2617                 shares = MIN_SHARES;
2618         if (shares > tg->shares)
2619                 shares = tg->shares;
2620 
2621         return shares;
2622 }
2623 # else /* CONFIG_SMP */
2624 static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2625 {
2626         return tg->shares;
2627 }
2628 # endif /* CONFIG_SMP */
2629 
2630 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2631                             unsigned long weight)
2632 {
2633         if (se->on_rq) {
2634                 /* commit outstanding execution time */
2635                 if (cfs_rq->curr == se)
2636                         update_curr(cfs_rq);
2637                 account_entity_dequeue(cfs_rq, se);
2638         }
2639 
2640         update_load_set(&se->load, weight);
2641 
2642         if (se->on_rq)
2643                 account_entity_enqueue(cfs_rq, se);
2644 }
2645 
2646 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2647 
2648 static void update_cfs_shares(struct cfs_rq *cfs_rq)
2649 {
2650         struct task_group *tg;
2651         struct sched_entity *se;
2652         long shares;
2653 
2654         tg = cfs_rq->tg;
2655         se = tg->se[cpu_of(rq_of(cfs_rq))];
2656         if (!se || throttled_hierarchy(cfs_rq))
2657                 return;
2658 #ifndef CONFIG_SMP
2659         if (likely(se->load.weight == tg->shares))
2660                 return;
2661 #endif
2662         shares = calc_cfs_shares(cfs_rq, tg);
2663 
2664         reweight_entity(cfs_rq_of(se), se, shares);
2665 }
2666 #else /* CONFIG_FAIR_GROUP_SCHED */
2667 static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2668 {
2669 }
2670 #endif /* CONFIG_FAIR_GROUP_SCHED */
2671 
2672 #ifdef CONFIG_SMP
2673 /* Precomputed fixed inverse multiplies for multiplication by y^n */
2674 static const u32 runnable_avg_yN_inv[] = {
2675         0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2676         0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2677         0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2678         0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2679         0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2680         0x85aac367, 0x82cd8698,
2681 };
2682 
2683 /*
2684  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
2685  * over-estimates when re-combining.
2686  */
2687 static const u32 runnable_avg_yN_sum[] = {
2688             0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2689          9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2690         17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2691 };
2692 
2693 /*
2694  * Precomputed \Sum y^k { 1<=k<=n, where n%32=0). Values are rolled down to
2695  * lower integers. See Documentation/scheduler/sched-avg.txt how these
2696  * were generated:
2697  */
2698 static const u32 __accumulated_sum_N32[] = {
2699             0, 23371, 35056, 40899, 43820, 45281,
2700         46011, 46376, 46559, 46650, 46696, 46719,
2701 };
2702 
2703 /*
2704  * Approximate:
2705  *   val * y^n,    where y^32 ~= 0.5 (~1 scheduling period)
2706  */
2707 static __always_inline u64 decay_load(u64 val, u64 n)
2708 {
2709         unsigned int local_n;
2710 
2711         if (!n)
2712                 return val;
2713         else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2714                 return 0;
2715 
2716         /* after bounds checking we can collapse to 32-bit */
2717         local_n = n;
2718 
2719         /*
2720          * As y^PERIOD = 1/2, we can combine
2721          *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2722          * With a look-up table which covers y^n (n<PERIOD)
2723          *
2724          * To achieve constant time decay_load.
2725          */
2726         if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2727                 val >>= local_n / LOAD_AVG_PERIOD;
2728                 local_n %= LOAD_AVG_PERIOD;
2729         }
2730 
2731         val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2732         return val;
2733 }
2734 
2735 /*
2736  * For updates fully spanning n periods, the contribution to runnable
2737  * average will be: \Sum 1024*y^n
2738  *
2739  * We can compute this reasonably efficiently by combining:
2740  *   y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for  n <PERIOD}
2741  */
2742 static u32 __compute_runnable_contrib(u64 n)
2743 {
2744         u32 contrib = 0;
2745 
2746         if (likely(n <= LOAD_AVG_PERIOD))
2747                 return runnable_avg_yN_sum[n];
2748         else if (unlikely(n >= LOAD_AVG_MAX_N))
2749                 return LOAD_AVG_MAX;
2750 
2751         /* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */
2752         contrib = __accumulated_sum_N32[n/LOAD_AVG_PERIOD];
2753         n %= LOAD_AVG_PERIOD;
2754         contrib = decay_load(contrib, n);
2755         return contrib + runnable_avg_yN_sum[n];
2756 }
2757 
2758 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
2759 
2760 /*
2761  * We can represent the historical contribution to runnable average as the
2762  * coefficients of a geometric series.  To do this we sub-divide our runnable
2763  * history into segments of approximately 1ms (1024us); label the segment that
2764  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2765  *
2766  * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2767  *      p0            p1           p2
2768  *     (now)       (~1ms ago)  (~2ms ago)
2769  *
2770  * Let u_i denote the fraction of p_i that the entity was runnable.
2771  *
2772  * We then designate the fractions u_i as our co-efficients, yielding the
2773  * following representation of historical load:
2774  *   u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2775  *
2776  * We choose y based on the with of a reasonably scheduling period, fixing:
2777  *   y^32 = 0.5
2778  *
2779  * This means that the contribution to load ~32ms ago (u_32) will be weighted
2780  * approximately half as much as the contribution to load within the last ms
2781  * (u_0).
2782  *
2783  * When a period "rolls over" and we have new u_0`, multiplying the previous
2784  * sum again by y is sufficient to update:
2785  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2786  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2787  */
2788 static __always_inline int
2789 __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
2790                   unsigned long weight, int running, struct cfs_rq *cfs_rq)
2791 {
2792         u64 delta, scaled_delta, periods;
2793         u32 contrib;
2794         unsigned int delta_w, scaled_delta_w, decayed = 0;
2795         unsigned long scale_freq, scale_cpu;
2796 
2797         delta = now - sa->last_update_time;
2798         /*
2799          * This should only happen when time goes backwards, which it
2800          * unfortunately does during sched clock init when we swap over to TSC.
2801          */
2802         if ((s64)delta < 0) {
2803                 sa->last_update_time = now;
2804                 return 0;
2805         }
2806 
2807         /*
2808          * Use 1024ns as the unit of measurement since it's a reasonable
2809          * approximation of 1us and fast to compute.
2810          */
2811         delta >>= 10;
2812         if (!delta)
2813                 return 0;
2814         sa->last_update_time = now;
2815 
2816         scale_freq = arch_scale_freq_capacity(NULL, cpu);
2817         scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2818 
2819         /* delta_w is the amount already accumulated against our next period */
2820         delta_w = sa->period_contrib;
2821         if (delta + delta_w >= 1024) {
2822                 decayed = 1;
2823 
2824                 /* how much left for next period will start over, we don't know yet */
2825                 sa->period_contrib = 0;
2826 
2827                 /*
2828                  * Now that we know we're crossing a period boundary, figure
2829                  * out how much from delta we need to complete the current
2830                  * period and accrue it.
2831                  */
2832                 delta_w = 1024 - delta_w;
2833                 scaled_delta_w = cap_scale(delta_w, scale_freq);
2834                 if (weight) {
2835                         sa->load_sum += weight * scaled_delta_w;
2836                         if (cfs_rq) {
2837                                 cfs_rq->runnable_load_sum +=
2838                                                 weight * scaled_delta_w;
2839                         }
2840                 }
2841                 if (running)
2842                         sa->util_sum += scaled_delta_w * scale_cpu;
2843 
2844                 delta -= delta_w;
2845 
2846                 /* Figure out how many additional periods this update spans */
2847                 periods = delta / 1024;
2848                 delta %= 1024;
2849 
2850                 sa->load_sum = decay_load(sa->load_sum, periods + 1);
2851                 if (cfs_rq) {
2852                         cfs_rq->runnable_load_sum =
2853                                 decay_load(cfs_rq->runnable_load_sum, periods + 1);
2854                 }
2855                 sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
2856 
2857                 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2858                 contrib = __compute_runnable_contrib(periods);
2859                 contrib = cap_scale(contrib, scale_freq);
2860                 if (weight) {
2861                         sa->load_sum += weight * contrib;
2862                         if (cfs_rq)
2863                                 cfs_rq->runnable_load_sum += weight * contrib;
2864                 }
2865                 if (running)
2866                         sa->util_sum += contrib * scale_cpu;
2867         }
2868 
2869         /* Remainder of delta accrued against u_0` */
2870         scaled_delta = cap_scale(delta, scale_freq);
2871         if (weight) {
2872                 sa->load_sum += weight * scaled_delta;
2873                 if (cfs_rq)
2874                         cfs_rq->runnable_load_sum += weight * scaled_delta;
2875         }
2876         if (running)
2877                 sa->util_sum += scaled_delta * scale_cpu;
2878 
2879         sa->period_contrib += delta;
2880 
2881         if (decayed) {
2882                 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
2883                 if (cfs_rq) {
2884                         cfs_rq->runnable_load_avg =
2885                                 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2886                 }
2887                 sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
2888         }
2889 
2890         return decayed;
2891 }
2892 
2893 #ifdef CONFIG_FAIR_GROUP_SCHED
2894 /**
2895  * update_tg_load_avg - update the tg's load avg
2896  * @cfs_rq: the cfs_rq whose avg changed
2897  * @force: update regardless of how small the difference
2898  *
2899  * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
2900  * However, because tg->load_avg is a global value there are performance
2901  * considerations.
2902  *
2903  * In order to avoid having to look at the other cfs_rq's, we use a
2904  * differential update where we store the last value we propagated. This in
2905  * turn allows skipping updates if the differential is 'small'.
2906  *
2907  * Updating tg's load_avg is necessary before update_cfs_share() (which is
2908  * done) and effective_load() (which is not done because it is too costly).
2909  */
2910 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
2911 {
2912         long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
2913 
2914         /*
2915          * No need to update load_avg for root_task_group as it is not used.
2916          */
2917         if (cfs_rq->tg == &root_task_group)
2918                 return;
2919 
2920         if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
2921                 atomic_long_add(delta, &cfs_rq->tg->load_avg);
2922                 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
2923         }
2924 }
2925 
2926 /*
2927  * Called within set_task_rq() right before setting a task's cpu. The
2928  * caller only guarantees p->pi_lock is held; no other assumptions,
2929  * including the state of rq->lock, should be made.
2930  */
2931 void set_task_rq_fair(struct sched_entity *se,
2932                       struct cfs_rq *prev, struct cfs_rq *next)
2933 {
2934         if (!sched_feat(ATTACH_AGE_LOAD))
2935                 return;
2936 
2937         /*
2938          * We are supposed to update the task to "current" time, then its up to
2939          * date and ready to go to new CPU/cfs_rq. But we have difficulty in
2940          * getting what current time is, so simply throw away the out-of-date
2941          * time. This will result in the wakee task is less decayed, but giving
2942          * the wakee more load sounds not bad.
2943          */
2944         if (se->avg.last_update_time && prev) {
2945                 u64 p_last_update_time;
2946                 u64 n_last_update_time;
2947 
2948 #ifndef CONFIG_64BIT
2949                 u64 p_last_update_time_copy;
2950                 u64 n_last_update_time_copy;
2951 
2952                 do {
2953                         p_last_update_time_copy = prev->load_last_update_time_copy;
2954                         n_last_update_time_copy = next->load_last_update_time_copy;
2955 
2956                         smp_rmb();
2957 
2958                         p_last_update_time = prev->avg.last_update_time;
2959                         n_last_update_time = next->avg.last_update_time;
2960 
2961                 } while (p_last_update_time != p_last_update_time_copy ||
2962                          n_last_update_time != n_last_update_time_copy);
2963 #else
2964                 p_last_update_time = prev->avg.last_update_time;
2965                 n_last_update_time = next->avg.last_update_time;
2966 #endif
2967                 __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
2968                                   &se->avg, 0, 0, NULL);
2969                 se->avg.last_update_time = n_last_update_time;
2970         }
2971 }
2972 #else /* CONFIG_FAIR_GROUP_SCHED */
2973 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
2974 #endif /* CONFIG_FAIR_GROUP_SCHED */
2975 
2976 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
2977 {
2978         if (&this_rq()->cfs == cfs_rq) {
2979                 /*
2980                  * There are a few boundary cases this might miss but it should
2981                  * get called often enough that that should (hopefully) not be
2982                  * a real problem -- added to that it only calls on the local
2983                  * CPU, so if we enqueue remotely we'll miss an update, but
2984                  * the next tick/schedule should update.
2985                  *
2986                  * It will not get called when we go idle, because the idle
2987                  * thread is a different class (!fair), nor will the utilization
2988                  * number include things like RT tasks.
2989                  *
2990                  * As is, the util number is not freq-invariant (we'd have to
2991                  * implement arch_scale_freq_capacity() for that).
2992                  *
2993                  * See cpu_util().
2994                  */
2995                 cpufreq_update_util(rq_of(cfs_rq), 0);
2996         }
2997 }
2998 
2999 /*
3000  * Unsigned subtract and clamp on underflow.
3001  *
3002  * Explicitly do a load-store to ensure the intermediate value never hits
3003  * memory. This allows lockless observations without ever seeing the negative
3004  * values.
3005  */
3006 #define sub_positive(_ptr, _val) do {                           \
3007         typeof(_ptr) ptr = (_ptr);                              \
3008         typeof(*ptr) val = (_val);                              \
3009         typeof(*ptr) res, var = READ_ONCE(*ptr);                \
3010         res = var - val;                                        \
3011         if (res > var)                                          \
3012                 res = 0;                                        \
3013         WRITE_ONCE(*ptr, res);                                  \
3014 } while (0)
3015 
3016 /**
3017  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3018  * @now: current time, as per cfs_rq_clock_task()
3019  * @cfs_rq: cfs_rq to update
3020  * @update_freq: should we call cfs_rq_util_change() or will the call do so
3021  *
3022  * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3023  * avg. The immediate corollary is that all (fair) tasks must be attached, see
3024  * post_init_entity_util_avg().
3025  *
3026  * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3027  *
3028  * Returns true if the load decayed or we removed load.
3029  *
3030  * Since both these conditions indicate a changed cfs_rq->avg.load we should
3031  * call update_tg_load_avg() when this function returns true.
3032  */
3033 static inline int
3034 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3035 {
3036         struct sched_avg *sa = &cfs_rq->avg;
3037         int decayed, removed_load = 0, removed_util = 0;
3038 
3039         if (atomic_long_read(&cfs_rq->removed_load_avg)) {
3040                 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
3041                 sub_positive(&sa->load_avg, r);
3042                 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
3043                 removed_load = 1;
3044         }
3045 
3046         if (atomic_long_read(&cfs_rq->removed_util_avg)) {
3047                 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
3048                 sub_positive(&sa->util_avg, r);
3049                 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
3050                 removed_util = 1;
3051         }
3052 
3053         decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
3054                 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
3055 
3056 #ifndef CONFIG_64BIT
3057         smp_wmb();
3058         cfs_rq->load_last_update_time_copy = sa->last_update_time;
3059 #endif
3060 
3061         if (update_freq && (decayed || removed_util))
3062                 cfs_rq_util_change(cfs_rq);
3063 
3064         return decayed || removed_load;
3065 }
3066 
3067 /* Update task and its cfs_rq load average */
3068 static inline void update_load_avg(struct sched_entity *se, int update_tg)
3069 {
3070         struct cfs_rq *cfs_rq = cfs_rq_of(se);
3071         u64 now = cfs_rq_clock_task(cfs_rq);
3072         struct rq *rq = rq_of(cfs_rq);
3073         int cpu = cpu_of(rq);
3074 
3075         /*
3076          * Track task load average for carrying it to new CPU after migrated, and
3077          * track group sched_entity load average for task_h_load calc in migration
3078          */
3079         __update_load_avg(now, cpu, &se->avg,
3080                           se->on_rq * scale_load_down(se->load.weight),
3081                           cfs_rq->curr == se, NULL);
3082 
3083         if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg)
3084                 update_tg_load_avg(cfs_rq, 0);
3085 }
3086 
3087 /**
3088  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3089  * @cfs_rq: cfs_rq to attach to
3090  * @se: sched_entity to attach
3091  *
3092  * Must call update_cfs_rq_load_avg() before this, since we rely on
3093  * cfs_rq->avg.last_update_time being current.
3094  */
3095 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3096 {
3097         if (!sched_feat(ATTACH_AGE_LOAD))
3098                 goto skip_aging;
3099 
3100         /*
3101          * If we got migrated (either between CPUs or between cgroups) we'll
3102          * have aged the average right before clearing @last_update_time.
3103          *
3104          * Or we're fresh through post_init_entity_util_avg().
3105          */
3106         if (se->avg.last_update_time) {
3107                 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
3108                                   &se->avg, 0, 0, NULL);
3109 
3110                 /*
3111                  * XXX: we could have just aged the entire load away if we've been
3112                  * absent from the fair class for too long.
3113                  */
3114         }
3115 
3116 skip_aging:
3117         se->avg.last_update_time = cfs_rq->avg.last_update_time;
3118         cfs_rq->avg.load_avg += se->avg.load_avg;
3119         cfs_rq->avg.load_sum += se->avg.load_sum;
3120         cfs_rq->avg.util_avg += se->avg.util_avg;
3121         cfs_rq->avg.util_sum += se->avg.util_sum;
3122 
3123         cfs_rq_util_change(cfs_rq);
3124 }
3125 
3126 /**
3127  * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3128  * @cfs_rq: cfs_rq to detach from
3129  * @se: sched_entity to detach
3130  *
3131  * Must call update_cfs_rq_load_avg() before this, since we rely on
3132  * cfs_rq->avg.last_update_time being current.
3133  */
3134 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3135 {
3136         __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
3137                           &se->avg, se->on_rq * scale_load_down(se->load.weight),
3138                           cfs_rq->curr == se, NULL);
3139 
3140         sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3141         sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
3142         sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3143         sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3144 
3145         cfs_rq_util_change(cfs_rq);
3146 }
3147 
3148 /* Add the load generated by se into cfs_rq's load average */
3149 static inline void
3150 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3151 {
3152         struct sched_avg *sa = &se->avg;
3153         u64 now = cfs_rq_clock_task(cfs_rq);
3154         int migrated, decayed;
3155 
3156         migrated = !sa->last_update_time;
3157         if (!migrated) {
3158                 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
3159                         se->on_rq * scale_load_down(se->load.weight),
3160                         cfs_rq->curr == se, NULL);
3161         }
3162 
3163         decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated);
3164 
3165         cfs_rq->runnable_load_avg += sa->load_avg;
3166         cfs_rq->runnable_load_sum += sa->load_sum;
3167 
3168         if (migrated)
3169                 attach_entity_load_avg(cfs_rq, se);
3170 
3171         if (decayed || migrated)
3172                 update_tg_load_avg(cfs_rq, 0);
3173 }
3174 
3175 /* Remove the runnable load generated by se from cfs_rq's runnable load average */
3176 static inline void
3177 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3178 {
3179         update_load_avg(se, 1);
3180 
3181         cfs_rq->runnable_load_avg =
3182                 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
3183         cfs_rq->runnable_load_sum =
3184                 max_t(s64,  cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
3185 }
3186 
3187 #ifndef CONFIG_64BIT
3188 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3189 {
3190         u64 last_update_time_copy;
3191         u64 last_update_time;
3192 
3193         do {
3194                 last_update_time_copy = cfs_rq->load_last_update_time_copy;
3195                 smp_rmb();
3196                 last_update_time = cfs_rq->avg.last_update_time;
3197         } while (last_update_time != last_update_time_copy);
3198 
3199         return last_update_time;
3200 }
3201 #else
3202 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3203 {
3204         return cfs_rq->avg.last_update_time;
3205 }
3206 #endif
3207 
3208 /*
3209  * Task first catches up with cfs_rq, and then subtract
3210  * itself from the cfs_rq (task must be off the queue now).
3211  */
3212 void remove_entity_load_avg(struct sched_entity *se)
3213 {
3214         struct cfs_rq *cfs_rq = cfs_rq_of(se);
3215         u64 last_update_time;
3216 
3217         /*
3218          * tasks cannot exit without having gone through wake_up_new_task() ->
3219          * post_init_entity_util_avg() which will have added things to the
3220          * cfs_rq, so we can remove unconditionally.
3221          *
3222          * Similarly for groups, they will have passed through
3223          * post_init_entity_util_avg() before unregister_sched_fair_group()
3224          * calls this.
3225          */
3226 
3227         last_update_time = cfs_rq_last_update_time(cfs_rq);
3228 
3229         __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
3230         atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
3231         atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
3232 }
3233 
3234 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3235 {
3236         return cfs_rq->runnable_load_avg;
3237 }
3238 
3239 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3240 {
3241         return cfs_rq->avg.load_avg;
3242 }
3243 
3244 static int idle_balance(struct rq *this_rq);
3245 
3246 #else /* CONFIG_SMP */
3247 
3248 static inline int
3249 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3250 {
3251         return 0;
3252 }
3253 
3254 static inline void update_load_avg(struct sched_entity *se, int not_used)
3255 {
3256         cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
3257 }
3258 
3259 static inline void
3260 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3261 static inline void
3262 dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3263 static inline void remove_entity_load_avg(struct sched_entity *se) {}
3264 
3265 static inline void
3266 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3267 static inline void
3268 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3269 
3270 static inline int idle_balance(struct rq *rq)
3271 {
3272         return 0;
3273 }
3274 
3275 #endif /* CONFIG_SMP */
3276 
3277 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3278 {
3279 #ifdef CONFIG_SCHED_DEBUG
3280         s64 d = se->vruntime - cfs_rq->min_vruntime;
3281 
3282         if (d < 0)
3283                 d = -d;
3284 
3285         if (d > 3*sysctl_sched_latency)
3286                 schedstat_inc(cfs_rq->nr_spread_over);
3287 #endif
3288 }
3289 
3290 static void
3291 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3292 {
3293         u64 vruntime = cfs_rq->min_vruntime;
3294 
3295         /*
3296          * The 'current' period is already promised to the current tasks,
3297          * however the extra weight of the new task will slow them down a
3298          * little, place the new task so that it fits in the slot that
3299          * stays open at the end.
3300          */
3301         if (initial && sched_feat(START_DEBIT))
3302                 vruntime += sched_vslice(cfs_rq, se);
3303 
3304         /* sleeps up to a single latency don't count. */
3305         if (!initial) {
3306                 unsigned long thresh = sysctl_sched_latency;
3307 
3308                 /*
3309                  * Halve their sleep time's effect, to allow
3310                  * for a gentler effect of sleepers:
3311                  */
3312                 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3313                         thresh >>= 1;
3314 
3315                 vruntime -= thresh;
3316         }
3317 
3318         /* ensure we never gain time by being placed backwards. */
3319         se->vruntime = max_vruntime(se->vruntime, vruntime);
3320 }
3321 
3322 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3323 
3324 static inline void check_schedstat_required(void)
3325 {
3326 #ifdef CONFIG_SCHEDSTATS
3327         if (schedstat_enabled())
3328                 return;
3329 
3330         /* Force schedstat enabled if a dependent tracepoint is active */
3331         if (trace_sched_stat_wait_enabled()    ||
3332                         trace_sched_stat_sleep_enabled()   ||
3333                         trace_sched_stat_iowait_enabled()  ||
3334                         trace_sched_stat_blocked_enabled() ||
3335                         trace_sched_stat_runtime_enabled())  {
3336                 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3337                              "stat_blocked and stat_runtime require the "
3338                              "kernel parameter schedstats=enabled or "
3339                              "kernel.sched_schedstats=1\n");
3340         }
3341 #endif
3342 }
3343 
3344 
3345 /*
3346  * MIGRATION
3347  *
3348  *      dequeue
3349  *        update_curr()
3350  *          update_min_vruntime()
3351  *        vruntime -= min_vruntime
3352  *
3353  *      enqueue
3354  *        update_curr()
3355  *          update_min_vruntime()
3356  *        vruntime += min_vruntime
3357  *
3358  * this way the vruntime transition between RQs is done when both
3359  * min_vruntime are up-to-date.
3360  *
3361  * WAKEUP (remote)
3362  *
3363  *      ->migrate_task_rq_fair() (p->state == TASK_WAKING)
3364  *        vruntime -= min_vruntime
3365  *
3366  *      enqueue
3367  *        update_curr()
3368  *          update_min_vruntime()
3369  *        vruntime += min_vruntime
3370  *
3371  * this way we don't have the most up-to-date min_vruntime on the originating
3372  * CPU and an up-to-date min_vruntime on the destination CPU.
3373  */
3374 
3375 static void
3376 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3377 {
3378         bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
3379         bool curr = cfs_rq->curr == se;
3380 
3381         /*
3382          * If we're the current task, we must renormalise before calling
3383          * update_curr().
3384          */
3385         if (renorm && curr)
3386                 se->vruntime += cfs_rq->min_vruntime;
3387 
3388         update_curr(cfs_rq);
3389 
3390         /*
3391          * Otherwise, renormalise after, such that we're placed at the current
3392          * moment in time, instead of some random moment in the past. Being
3393          * placed in the past could significantly boost this task to the
3394          * fairness detriment of existing tasks.
3395          */
3396         if (renorm && !curr)
3397                 se->vruntime += cfs_rq->min_vruntime;
3398 
3399         enqueue_entity_load_avg(cfs_rq, se);
3400         account_entity_enqueue(cfs_rq, se);
3401         update_cfs_shares(cfs_rq);
3402 
3403         if (flags & ENQUEUE_WAKEUP)
3404                 place_entity(cfs_rq, se, 0);
3405 
3406         check_schedstat_required();
3407         update_stats_enqueue(cfs_rq, se, flags);
3408         check_spread(cfs_rq, se);
3409         if (!curr)
3410                 __enqueue_entity(cfs_rq, se);
3411         se->on_rq = 1;
3412 
3413         if (cfs_rq->nr_running == 1) {
3414                 list_add_leaf_cfs_rq(cfs_rq);
3415                 check_enqueue_throttle(cfs_rq);
3416         }
3417 }
3418 
3419 static void __clear_buddies_last(struct sched_entity *se)
3420 {
3421         for_each_sched_entity(se) {
3422                 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3423                 if (cfs_rq->last != se)
3424                         break;
3425 
3426                 cfs_rq->last = NULL;
3427         }
3428 }
3429 
3430 static void __clear_buddies_next(struct sched_entity *se)
3431 {
3432         for_each_sched_entity(se) {
3433                 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3434                 if (cfs_rq->next != se)
3435                         break;
3436 
3437                 cfs_rq->next = NULL;
3438         }
3439 }
3440 
3441 static void __clear_buddies_skip(struct sched_entity *se)
3442 {
3443         for_each_sched_entity(se) {
3444                 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3445                 if (cfs_rq->skip != se)
3446                         break;
3447 
3448                 cfs_rq->skip = NULL;
3449         }
3450 }
3451 
3452 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3453 {
3454         if (cfs_rq->last == se)
3455                 __clear_buddies_last(se);
3456 
3457         if (cfs_rq->next == se)
3458                 __clear_buddies_next(se);
3459 
3460         if (cfs_rq->skip == se)
3461                 __clear_buddies_skip(se);
3462 }
3463 
3464 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3465 
3466 static void
3467 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3468 {
3469         /*
3470          * Update run-time statistics of the 'current'.
3471          */
3472         update_curr(cfs_rq);
3473         dequeue_entity_load_avg(cfs_rq, se);
3474 
3475         update_stats_dequeue(cfs_rq, se, flags);
3476 
3477         clear_buddies(cfs_rq, se);
3478 
3479         if (se != cfs_rq->curr)
3480                 __dequeue_entity(cfs_rq, se);
3481         se->on_rq = 0;
3482         account_entity_dequeue(cfs_rq, se);
3483 
3484         /*
3485          * Normalize after update_curr(); which will also have moved
3486          * min_vruntime if @se is the one holding it back. But before doing
3487          * update_min_vruntime() again, which will discount @se's position and
3488          * can move min_vruntime forward still more.
3489          */
3490         if (!(flags & DEQUEUE_SLEEP))
3491                 se->vruntime -= cfs_rq->min_vruntime;
3492 
3493         /* return excess runtime on last dequeue */
3494         return_cfs_rq_runtime(cfs_rq);
3495 
3496         update_cfs_shares(cfs_rq);
3497 
3498         /*
3499          * Now advance min_vruntime if @se was the entity holding it back,
3500          * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
3501          * put back on, and if we advance min_vruntime, we'll be placed back
3502          * further than we started -- ie. we'll be penalized.
3503          */
3504         if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
3505                 update_min_vruntime(cfs_rq);
3506 }
3507 
3508 /*
3509  * Preempt the current task with a newly woken task if needed:
3510  */
3511 static void
3512 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3513 {
3514         unsigned long ideal_runtime, delta_exec;
3515         struct sched_entity *se;
3516         s64 delta;
3517 
3518         ideal_runtime = sched_slice(cfs_rq, curr);
3519         delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3520         if (delta_exec > ideal_runtime) {
3521                 resched_curr(rq_of(cfs_rq));
3522                 /*
3523                  * The current task ran long enough, ensure it doesn't get
3524                  * re-elected due to buddy favours.
3525                  */
3526                 clear_buddies(cfs_rq, curr);
3527                 return;
3528         }
3529 
3530         /*
3531          * Ensure that a task that missed wakeup preemption by a
3532          * narrow margin doesn't have to wait for a full slice.
3533          * This also mitigates buddy induced latencies under load.
3534          */
3535         if (delta_exec < sysctl_sched_min_granularity)
3536                 return;
3537 
3538         se = __pick_first_entity(cfs_rq);
3539         delta = curr->vruntime - se->vruntime;
3540 
3541         if (delta < 0)
3542                 return;
3543 
3544         if (delta > ideal_runtime)
3545                 resched_curr(rq_of(cfs_rq));
3546 }
3547 
3548 static void
3549 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
3550 {
3551         /* 'current' is not kept within the tree. */
3552         if (se->on_rq) {
3553                 /*
3554                  * Any task has to be enqueued before it get to execute on
3555                  * a CPU. So account for the time it spent waiting on the
3556                  * runqueue.
3557                  */
3558                 update_stats_wait_end(cfs_rq, se);
3559                 __dequeue_entity(cfs_rq, se);
3560                 update_load_avg(se, 1);
3561         }
3562 
3563         update_stats_curr_start(cfs_rq, se);
3564         cfs_rq->curr = se;
3565 
3566         /*
3567          * Track our maximum slice length, if the CPU's load is at
3568          * least twice that of our own weight (i.e. dont track it
3569          * when there are only lesser-weight tasks around):
3570          */
3571         if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
3572                 schedstat_set(se->statistics.slice_max,
3573                         max((u64)schedstat_val(se->statistics.slice_max),
3574                             se->sum_exec_runtime - se->prev_sum_exec_runtime));
3575         }
3576 
3577         se->prev_sum_exec_runtime = se->sum_exec_runtime;
3578 }
3579 
3580 static int
3581 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3582 
3583 /*
3584  * Pick the next process, keeping these things in mind, in this order:
3585  * 1) keep things fair between processes/task groups
3586  * 2) pick the "next" process, since someone really wants that to run
3587  * 3) pick the "last" process, for cache locality
3588  * 4) do not run the "skip" process, if something else is available
3589  */
3590 static struct sched_entity *
3591 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
3592 {
3593         struct sched_entity *left = __pick_first_entity(cfs_rq);
3594         struct sched_entity *se;
3595 
3596         /*
3597          * If curr is set we have to see if its left of the leftmost entity
3598          * still in the tree, provided there was anything in the tree at all.
3599          */
3600         if (!left || (curr && entity_before(curr, left)))
3601                 left = curr;
3602 
3603         se = left; /* ideally we run the leftmost entity */
3604 
3605         /*
3606          * Avoid running the skip buddy, if running something else can
3607          * be done without getting too unfair.
3608          */
3609         if (cfs_rq->skip == se) {
3610                 struct sched_entity *second;
3611 
3612                 if (se == curr) {
3613                         second = __pick_first_entity(cfs_rq);
3614                 } else {
3615                         second = __pick_next_entity(se);
3616                         if (!second || (curr && entity_before(curr, second)))
3617                                 second = curr;
3618                 }
3619 
3620                 if (second && wakeup_preempt_entity(second, left) < 1)
3621                         se = second;
3622         }
3623 
3624         /*
3625          * Prefer last buddy, try to return the CPU to a preempted task.
3626          */
3627         if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3628                 se = cfs_rq->last;
3629 
3630         /*
3631          * Someone really wants this to run. If it's not unfair, run it.
3632          */
3633         if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3634                 se = cfs_rq->next;
3635 
3636         clear_buddies(cfs_rq, se);
3637 
3638         return se;
3639 }
3640 
3641 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3642 
3643 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
3644 {
3645         /*
3646          * If still on the runqueue then deactivate_task()
3647          * was not called and update_curr() has to be done:
3648          */
3649         if (prev->on_rq)
3650                 update_curr(cfs_rq);
3651 
3652         /* throttle cfs_rqs exceeding runtime */
3653         check_cfs_rq_runtime(cfs_rq);
3654 
3655         check_spread(cfs_rq, prev);
3656 
3657         if (prev->on_rq) {
3658                 update_stats_wait_start(cfs_rq, prev);
3659                 /* Put 'current' back into the tree. */
3660                 __enqueue_entity(cfs_rq, prev);
3661                 /* in !on_rq case, update occurred at dequeue */
3662                 update_load_avg(prev, 0);
3663         }
3664         cfs_rq->curr = NULL;
3665 }
3666 
3667 static void
3668 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
3669 {
3670         /*
3671          * Update run-time statistics of the 'current'.
3672          */
3673         update_curr(cfs_rq);
3674 
3675         /*
3676          * Ensure that runnable average is periodically updated.
3677          */
3678         update_load_avg(curr, 1);
3679         update_cfs_shares(cfs_rq);
3680 
3681 #ifdef CONFIG_SCHED_HRTICK
3682         /*
3683          * queued ticks are scheduled to match the slice, so don't bother
3684          * validating it and just reschedule.
3685          */
3686         if (queued) {
3687                 resched_curr(rq_of(cfs_rq));
3688                 return;
3689         }
3690         /*
3691          * don't let the period tick interfere with the hrtick preemption
3692          */
3693         if (!sched_feat(DOUBLE_TICK) &&
3694                         hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3695                 return;
3696 #endif
3697 
3698         if (cfs_rq->nr_running > 1)
3699                 check_preempt_tick(cfs_rq, curr);
3700 }
3701 
3702 
3703 /**************************************************
3704  * CFS bandwidth control machinery
3705  */
3706 
3707 #ifdef CONFIG_CFS_BANDWIDTH
3708 
3709 #ifdef HAVE_JUMP_LABEL
3710 static struct static_key __cfs_bandwidth_used;
3711 
3712 static inline bool cfs_bandwidth_used(void)
3713 {
3714         return static_key_false(&__cfs_bandwidth_used);
3715 }
3716 
3717 void cfs_bandwidth_usage_inc(void)
3718 {
3719         static_key_slow_inc(&__cfs_bandwidth_used);
3720 }
3721 
3722 void cfs_bandwidth_usage_dec(void)
3723 {
3724         static_key_slow_dec(&__cfs_bandwidth_used);
3725 }
3726 #else /* HAVE_JUMP_LABEL */
3727 static bool cfs_bandwidth_used(void)
3728 {
3729         return true;
3730 }
3731 
3732 void cfs_bandwidth_usage_inc(void) {}
3733 void cfs_bandwidth_usage_dec(void) {}
3734 #endif /* HAVE_JUMP_LABEL */
3735 
3736 /*
3737  * default period for cfs group bandwidth.
3738  * default: 0.1s, units: nanoseconds
3739  */
3740 static inline u64 default_cfs_period(void)
3741 {
3742         return 100000000ULL;
3743 }
3744 
3745 static inline u64 sched_cfs_bandwidth_slice(void)
3746 {
3747         return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3748 }
3749 
3750 /*
3751  * Replenish runtime according to assigned quota and update expiration time.
3752  * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3753  * additional synchronization around rq->lock.
3754  *
3755  * requires cfs_b->lock
3756  */
3757 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
3758 {
3759         u64 now;
3760 
3761         if (cfs_b->quota == RUNTIME_INF)
3762                 return;
3763 
3764         now = sched_clock_cpu(smp_processor_id());
3765         cfs_b->runtime = cfs_b->quota;
3766         cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3767 }
3768 
3769 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3770 {
3771         return &tg->cfs_bandwidth;
3772 }
3773 
3774 /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3775 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3776 {
3777         if (unlikely(cfs_rq->throttle_count))
3778                 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
3779 
3780         return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
3781 }
3782 
3783 /* returns 0 on failure to allocate runtime */
3784 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3785 {
3786         struct task_group *tg = cfs_rq->tg;
3787         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
3788         u64 amount = 0, min_amount, expires;
3789 
3790         /* note: this is a positive sum as runtime_remaining <= 0 */
3791         min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3792 
3793         raw_spin_lock(&cfs_b->lock);
3794         if (cfs_b->quota == RUNTIME_INF)
3795                 amount = min_amount;
3796         else {
3797                 start_cfs_bandwidth(cfs_b);
3798 
3799                 if (cfs_b->runtime > 0) {
3800                         amount = min(cfs_b->runtime, min_amount);
3801                         cfs_b->runtime -= amount;
3802                         cfs_b->idle = 0;
3803                 }
3804         }
3805         expires = cfs_b->runtime_expires;
3806         raw_spin_unlock(&cfs_b->lock);
3807 
3808         cfs_rq->runtime_remaining += amount;
3809         /*
3810          * we may have advanced our local expiration to account for allowed
3811          * spread between our sched_clock and the one on which runtime was
3812          * issued.
3813          */
3814         if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3815                 cfs_rq->runtime_expires = expires;
3816 
3817         return cfs_rq->runtime_remaining > 0;
3818 }
3819 
3820 /*
3821  * Note: This depends on the synchronization provided by sched_clock and the
3822  * fact that rq->clock snapshots this value.
3823  */
3824 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3825 {
3826         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3827 
3828         /* if the deadline is ahead of our clock, nothing to do */
3829         if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
3830                 return;
3831 
3832         if (cfs_rq->runtime_remaining < 0)
3833                 return;
3834 
3835         /*
3836          * If the local deadline has passed we have to consider the
3837          * possibility that our sched_clock is 'fast' and the global deadline
3838          * has not truly expired.
3839          *
3840          * Fortunately we can check determine whether this the case by checking
3841          * whether the global deadline has advanced. It is valid to compare
3842          * cfs_b->runtime_expires without any locks since we only care about
3843          * exact equality, so a partial write will still work.
3844          */
3845 
3846         if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
3847                 /* extend local deadline, drift is bounded above by 2 ticks */
3848                 cfs_rq->runtime_expires += TICK_NSEC;
3849         } else {
3850                 /* global deadline is ahead, expiration has passed */
3851                 cfs_rq->runtime_remaining = 0;
3852         }
3853 }
3854 
3855 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3856 {
3857         /* dock delta_exec before expiring quota (as it could span periods) */
3858         cfs_rq->runtime_remaining -= delta_exec;
3859         expire_cfs_rq_runtime(cfs_rq);
3860 
3861         if (likely(cfs_rq->runtime_remaining > 0))
3862                 return;
3863 
3864         /*
3865          * if we're unable to extend our runtime we resched so that the active
3866          * hierarchy can be throttled
3867          */
3868         if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3869                 resched_curr(rq_of(cfs_rq));
3870 }
3871 
3872 static __always_inline
3873 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
3874 {
3875         if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
3876                 return;
3877 
3878         __account_cfs_rq_runtime(cfs_rq, delta_exec);
3879 }
3880 
3881 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3882 {
3883         return cfs_bandwidth_used() && cfs_rq->throttled;
3884 }
3885 
3886 /* check whether cfs_rq, or any parent, is throttled */
3887 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3888 {
3889         return cfs_bandwidth_used() && cfs_rq->throttle_count;
3890 }
3891 
3892 /*
3893  * Ensure that neither of the group entities corresponding to src_cpu or
3894  * dest_cpu are members of a throttled hierarchy when performing group
3895  * load-balance operations.
3896  */
3897 static inline int throttled_lb_pair(struct task_group *tg,
3898                                     int src_cpu, int dest_cpu)
3899 {
3900         struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3901 
3902         src_cfs_rq = tg->cfs_rq[src_cpu];
3903         dest_cfs_rq = tg->cfs_rq[dest_cpu];
3904 
3905         return throttled_hierarchy(src_cfs_rq) ||
3906                throttled_hierarchy(dest_cfs_rq);
3907 }
3908 
3909 /* updated child weight may affect parent so we have to do this bottom up */
3910 static int tg_unthrottle_up(struct task_group *tg, void *data)
3911 {
3912         struct rq *rq = data;
3913         struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3914 
3915         cfs_rq->throttle_count--;
3916         if (!cfs_rq->throttle_count) {
3917                 /* adjust cfs_rq_clock_task() */
3918                 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
3919                                              cfs_rq->throttled_clock_task;
3920         }
3921 
3922         return 0;
3923 }
3924 
3925 static int tg_throttle_down(struct task_group *tg, void *data)
3926 {
3927         struct rq *rq = data;
3928         struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3929 
3930         /* group is entering throttled state, stop time */
3931         if (!cfs_rq->throttle_count)
3932                 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3933         cfs_rq->throttle_count++;
3934 
3935         return 0;
3936 }
3937 
3938 static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3939 {
3940         struct rq *rq = rq_of(cfs_rq);
3941         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3942         struct sched_entity *se;
3943         long task_delta, dequeue = 1;
3944         bool empty;
3945 
3946         se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3947 
3948         /* freeze hierarchy runnable averages while throttled */
3949         rcu_read_lock();
3950         walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3951         rcu_read_unlock();
3952 
3953         task_delta = cfs_rq->h_nr_running;
3954         for_each_sched_entity(se) {
3955                 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3956                 /* throttled entity or throttle-on-deactivate */
3957                 if (!se->on_rq)
3958                         break;
3959 
3960                 if (dequeue)
3961                         dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3962                 qcfs_rq->h_nr_running -= task_delta;
3963 
3964                 if (qcfs_rq->load.weight)
3965                         dequeue = 0;
3966         }
3967 
3968         if (!se)
3969                 sub_nr_running(rq, task_delta);
3970 
3971         cfs_rq->throttled = 1;
3972         cfs_rq->throttled_clock = rq_clock(rq);
3973         raw_spin_lock(&cfs_b->lock);
3974         empty = list_empty(&cfs_b->throttled_cfs_rq);
3975 
3976         /*
3977          * Add to the _head_ of the list, so that an already-started
3978          * distribute_cfs_runtime will not see us
3979          */
3980         list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
3981 
3982         /*
3983          * If we're the first throttled task, make sure the bandwidth
3984          * timer is running.
3985          */
3986         if (empty)
3987                 start_cfs_bandwidth(cfs_b);
3988 
3989         raw_spin_unlock(&cfs_b->lock);
3990 }
3991 
3992 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
3993 {
3994         struct rq *rq = rq_of(cfs_rq);
3995         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3996         struct sched_entity *se;
3997         int enqueue = 1;
3998         long task_delta;
3999 
4000         se = cfs_rq->tg->se[cpu_of(rq)];
4001 
4002         cfs_rq->throttled = 0;
4003 
4004         update_rq_clock(rq);
4005 
4006         raw_spin_lock(&cfs_b->lock);
4007         cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
4008         list_del_rcu(&cfs_rq->throttled_list);
4009         raw_spin_unlock(&cfs_b->lock);
4010 
4011         /* update hierarchical throttle state */
4012         walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
4013 
4014         if (!cfs_rq->load.weight)
4015                 return;
4016 
4017         task_delta = cfs_rq->h_nr_running;
4018         for_each_sched_entity(se) {
4019                 if (se->on_rq)
4020                         enqueue = 0;
4021 
4022                 cfs_rq = cfs_rq_of(se);
4023                 if (enqueue)
4024                         enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4025                 cfs_rq->h_nr_running += task_delta;
4026 
4027                 if (cfs_rq_throttled(cfs_rq))
4028                         break;
4029         }
4030 
4031         if (!se)
4032                 add_nr_running(rq, task_delta);
4033 
4034         /* determine whether we need to wake up potentially idle cpu */
4035         if (rq->curr == rq->idle && rq->cfs.nr_running)
4036                 resched_curr(rq);
4037 }
4038 
4039 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4040                 u64 remaining, u64 expires)
4041 {
4042         struct cfs_rq *cfs_rq;
4043         u64 runtime;
4044         u64 starting_runtime = remaining;
4045 
4046         rcu_read_lock();
4047         list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
4048                                 throttled_list) {
4049                 struct rq *rq = rq_of(cfs_rq);
4050 
4051                 raw_spin_lock(&rq->lock);
4052                 if (!cfs_rq_throttled(cfs_rq))
4053                         goto next;
4054 
4055                 runtime = -cfs_rq->runtime_remaining + 1;
4056                 if (runtime > remaining)
4057                         runtime = remaining;
4058                 remaining -= runtime;
4059 
4060                 cfs_rq->runtime_remaining += runtime;
4061                 cfs_rq->runtime_expires = expires;
4062 
4063                 /* we check whether we're throttled above */
4064                 if (cfs_rq->runtime_remaining > 0)
4065                         unthrottle_cfs_rq(cfs_rq);
4066 
4067 next:
4068                 raw_spin_unlock(&rq->lock);
4069 
4070                 if (!remaining)
4071                         break;
4072         }
4073         rcu_read_unlock();
4074 
4075         return starting_runtime - remaining;
4076 }
4077 
4078 /*
4079  * Responsible for refilling a task_group's bandwidth and unthrottling its
4080  * cfs_rqs as appropriate. If there has been no activity within the last
4081  * period the timer is deactivated until scheduling resumes; cfs_b->idle is
4082  * used to track this state.
4083  */
4084 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4085 {
4086         u64 runtime, runtime_expires;
4087         int throttled;
4088 
4089         /* no need to continue the timer with no bandwidth constraint */
4090         if (cfs_b->quota == RUNTIME_INF)
4091                 goto out_deactivate;
4092 
4093         throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4094         cfs_b->nr_periods += overrun;
4095 
4096         /*
4097          * idle depends on !throttled (for the case of a large deficit), and if
4098          * we're going inactive then everything else can be deferred
4099          */
4100         if (cfs_b->idle && !throttled)
4101                 goto out_deactivate;
4102 
4103         __refill_cfs_bandwidth_runtime(cfs_b);
4104 
4105         if (!throttled) {
4106                 /* mark as potentially idle for the upcoming period */
4107                 cfs_b->idle = 1;
4108                 return 0;
4109         }
4110 
4111         /* account preceding periods in which throttling occurred */
4112         cfs_b->nr_throttled += overrun;
4113 
4114         runtime_expires = cfs_b->runtime_expires;
4115 
4116         /*
4117          * This check is repeated as we are holding onto the new bandwidth while
4118          * we unthrottle. This can potentially race with an unthrottled group
4119          * trying to acquire new bandwidth from the global pool. This can result
4120          * in us over-using our runtime if it is all used during this loop, but
4121          * only by limited amounts in that extreme case.
4122          */
4123         while (throttled && cfs_b->runtime > 0) {
4124                 runtime = cfs_b->runtime;
4125                 raw_spin_unlock(&cfs_b->lock);
4126                 /* we can't nest cfs_b->lock while distributing bandwidth */
4127                 runtime = distribute_cfs_runtime(cfs_b, runtime,
4128                                                  runtime_expires);
4129                 raw_spin_lock(&cfs_b->lock);
4130 
4131                 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4132 
4133                 cfs_b->runtime -= min(runtime, cfs_b->runtime);
4134         }
4135 
4136         /*
4137          * While we are ensured activity in the period following an
4138          * unthrottle, this also covers the case in which the new bandwidth is
4139          * insufficient to cover the existing bandwidth deficit.  (Forcing the
4140          * timer to remain active while there are any throttled entities.)
4141          */
4142         cfs_b->idle = 0;
4143 
4144         return 0;
4145 
4146 out_deactivate:
4147         return 1;
4148 }
4149 
4150 /* a cfs_rq won't donate quota below this amount */
4151 static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
4152 /* minimum remaining period time to redistribute slack quota */
4153 static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
4154 /* how long we wait to gather additional slack before distributing */
4155 static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
4156 
4157 /*
4158  * Are we near the end of the current quota period?
4159  *
4160  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
4161  * hrtimer base being cleared by hrtimer_start. In the case of
4162  * migrate_hrtimers, base is never cleared, so we are fine.
4163  */
4164 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
4165 {
4166         struct hrtimer *refresh_timer = &cfs_b->period_timer;
4167         u64 remaining;
4168 
4169         /* if the call-back is running a quota refresh is already occurring */
4170         if (hrtimer_callback_running(refresh_timer))
4171                 return 1;
4172 
4173         /* is a quota refresh about to occur? */
4174         remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
4175         if (remaining < min_expire)
4176                 return 1;
4177 
4178         return 0;
4179 }
4180 
4181 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4182 {
4183         u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
4184 
4185         /* if there's a quota refresh soon don't bother with slack */
4186         if (runtime_refresh_within(cfs_b, min_left))
4187                 return;
4188 
4189         hrtimer_start(&cfs_b->slack_timer,
4190                         ns_to_ktime(cfs_bandwidth_slack_period),
4191                         HRTIMER_MODE_REL);
4192 }
4193 
4194 /* we know any runtime found here is valid as update_curr() precedes return */
4195 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4196 {
4197         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4198         s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4199 
4200         if (slack_runtime <= 0)
4201                 return;
4202 
4203         raw_spin_lock(&cfs_b->lock);
4204         if (cfs_b->quota != RUNTIME_INF &&
4205             cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4206                 cfs_b->runtime += slack_runtime;
4207 
4208                 /* we are under rq->lock, defer unthrottling using a timer */
4209                 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4210                     !list_empty(&cfs_b->throttled_cfs_rq))
4211                         start_cfs_slack_bandwidth(cfs_b);
4212         }
4213         raw_spin_unlock(&cfs_b->lock);
4214 
4215         /* even if it's not valid for return we don't want to try again */
4216         cfs_rq->runtime_remaining -= slack_runtime;
4217 }
4218 
4219 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4220 {
4221         if (!cfs_bandwidth_used())
4222                 return;
4223 
4224         if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
4225                 return;
4226 
4227         __return_cfs_rq_runtime(cfs_rq);
4228 }
4229 
4230 /*
4231  * This is done with a timer (instead of inline with bandwidth return) since
4232  * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4233  */
4234 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4235 {
4236         u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4237         u64 expires;
4238 
4239         /* confirm we're still not at a refresh boundary */
4240         raw_spin_lock(&cfs_b->lock);
4241         if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4242                 raw_spin_unlock(&cfs_b->lock);
4243                 return;
4244         }
4245 
4246         if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
4247                 runtime = cfs_b->runtime;
4248 
4249         expires = cfs_b->runtime_expires;
4250         raw_spin_unlock(&cfs_b->lock);
4251 
4252         if (!runtime)
4253                 return;
4254 
4255         runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4256 
4257         raw_spin_lock(&cfs_b->lock);
4258         if (expires == cfs_b->runtime_expires)
4259                 cfs_b->runtime -= min(runtime, cfs_b->runtime);
4260         raw_spin_unlock(&cfs_b->lock);
4261 }
4262 
4263 /*
4264  * When a group wakes up we want to make sure that its quota is not already
4265  * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4266  * runtime as update_curr() throttling can not not trigger until it's on-rq.
4267  */
4268 static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4269 {
4270         if (!cfs_bandwidth_used())
4271                 return;
4272 
4273         /* an active group must be handled by the update_curr()->put() path */
4274         if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4275                 return;
4276 
4277         /* ensure the group is not already throttled */
4278         if (cfs_rq_throttled(cfs_rq))
4279                 return;
4280 
4281         /* update runtime allocation */
4282         account_cfs_rq_runtime(cfs_rq, 0);
4283         if (cfs_rq->runtime_remaining <= 0)
4284                 throttle_cfs_rq(cfs_rq);
4285 }
4286 
4287 static void sync_throttle(struct task_group *tg, int cpu)
4288 {
4289         struct cfs_rq *pcfs_rq, *cfs_rq;
4290 
4291         if (!cfs_bandwidth_used())
4292                 return;
4293 
4294         if (!tg->parent)
4295                 return;
4296 
4297         cfs_rq = tg->cfs_rq[cpu];
4298         pcfs_rq = tg->parent->cfs_rq[cpu];
4299 
4300         cfs_rq->throttle_count = pcfs_rq->throttle_count;
4301         cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
4302 }
4303 
4304 /* conditionally throttle active cfs_rq's from put_prev_entity() */
4305 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4306 {
4307         if (!cfs_bandwidth_used())
4308                 return false;
4309 
4310         if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
4311                 return false;
4312 
4313         /*
4314          * it's possible for a throttled entity to be forced into a running
4315          * state (e.g. set_curr_task), in this case we're finished.
4316          */
4317         if (cfs_rq_throttled(cfs_rq))
4318                 return true;
4319 
4320         throttle_cfs_rq(cfs_rq);
4321         return true;
4322 }
4323 
4324 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4325 {
4326         struct cfs_bandwidth *cfs_b =
4327                 container_of(timer, struct cfs_bandwidth, slack_timer);
4328 
4329         do_sched_cfs_slack_timer(cfs_b);
4330 
4331         return HRTIMER_NORESTART;
4332 }
4333 
4334 static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4335 {
4336         struct cfs_bandwidth *cfs_b =
4337                 container_of(timer, struct cfs_bandwidth, period_timer);
4338         int overrun;
4339         int idle = 0;
4340 
4341         raw_spin_lock(&cfs_b->lock);
4342         for (;;) {
4343                 overrun = hrtimer_forward_now(timer, cfs_b->period);
4344                 if (!overrun)
4345                         break;
4346 
4347                 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4348         }
4349         if (idle)
4350                 cfs_b->period_active = 0;
4351         raw_spin_unlock(&cfs_b->lock);
4352 
4353         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4354 }
4355 
4356 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4357 {
4358         raw_spin_lock_init(&cfs_b->lock);
4359         cfs_b->runtime = 0;
4360         cfs_b->quota = RUNTIME_INF;
4361         cfs_b->period = ns_to_ktime(default_cfs_period());
4362 
4363         INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4364         hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
4365         cfs_b->period_timer.function = sched_cfs_period_timer;
4366         hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4367         cfs_b->slack_timer.function = sched_cfs_slack_timer;
4368 }
4369 
4370 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4371 {
4372         cfs_rq->runtime_enabled = 0;
4373         INIT_LIST_HEAD(&cfs_rq->throttled_list);
4374 }
4375 
4376 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4377 {
4378         lockdep_assert_held(&cfs_b->lock);
4379 
4380         if (!cfs_b->period_active) {
4381                 cfs_b->period_active = 1;
4382                 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4383                 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4384         }
4385 }
4386 
4387 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4388 {
4389         /* init_cfs_bandwidth() was not called */
4390         if (!cfs_b->throttled_cfs_rq.next)
4391                 return;
4392 
4393         hrtimer_cancel(&cfs_b->period_timer);
4394         hrtimer_cancel(&cfs_b->slack_timer);
4395 }
4396 
4397 static void __maybe_unused update_runtime_enabled(struct rq *rq)
4398 {
4399         struct cfs_rq *cfs_rq;
4400 
4401         for_each_leaf_cfs_rq(rq, cfs_rq) {
4402                 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4403 
4404                 raw_spin_lock(&cfs_b->lock);
4405                 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4406                 raw_spin_unlock(&cfs_b->lock);
4407         }
4408 }
4409 
4410 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
4411 {
4412         struct cfs_rq *cfs_rq;
4413 
4414         for_each_leaf_cfs_rq(rq, cfs_rq) {
4415                 if (!cfs_rq->runtime_enabled)
4416                         continue;
4417 
4418                 /*
4419                  * clock_task is not advancing so we just need to make sure
4420                  * there's some valid quota amount
4421                  */
4422                 cfs_rq->runtime_remaining = 1;
4423                 /*
4424                  * Offline rq is schedulable till cpu is completely disabled
4425                  * in take_cpu_down(), so we prevent new cfs throttling here.
4426                  */
4427                 cfs_rq->runtime_enabled = 0;
4428 
4429                 if (cfs_rq_throttled(cfs_rq))
4430                         unthrottle_cfs_rq(cfs_rq);
4431         }
4432 }
4433 
4434 #else /* CONFIG_CFS_BANDWIDTH */
4435 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4436 {
4437         return rq_clock_task(rq_of(cfs_rq));
4438 }
4439 
4440 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
4441 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
4442 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4443 static inline void sync_throttle(struct task_group *tg, int cpu) {}
4444 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4445 
4446 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4447 {
4448         return 0;
4449 }
4450 
4451 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4452 {
4453         return 0;
4454 }
4455 
4456 static inline int throttled_lb_pair(struct task_group *tg,
4457                                     int src_cpu, int dest_cpu)
4458 {
4459         return 0;
4460 }
4461 
4462 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4463 
4464 #ifdef CONFIG_FAIR_GROUP_SCHED
4465 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
4466 #endif
4467 
4468 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4469 {
4470         return NULL;
4471 }
4472 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4473 static inline void update_runtime_enabled(struct rq *rq) {}
4474 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
4475 
4476 #endif /* CONFIG_CFS_BANDWIDTH */
4477 
4478 /**************************************************
4479  * CFS operations on tasks:
4480  */
4481 
4482 #ifdef CONFIG_SCHED_HRTICK
4483 static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4484 {
4485         struct sched_entity *se = &p->se;
4486         struct cfs_rq *cfs_rq = cfs_rq_of(se);
4487 
4488         SCHED_WARN_ON(task_rq(p) != rq);
4489 
4490         if (rq->cfs.h_nr_running > 1) {
4491                 u64 slice = sched_slice(cfs_rq, se);
4492                 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4493                 s64 delta = slice - ran;
4494 
4495                 if (delta < 0) {
4496                         if (rq->curr == p)
4497                                 resched_curr(rq);
4498                         return;
4499                 }
4500                 hrtick_start(rq, delta);
4501         }
4502 }
4503 
4504 /*
4505  * called from enqueue/dequeue and updates the hrtick when the
4506  * current task is from our class and nr_running is low enough
4507  * to matter.
4508  */
4509 static void hrtick_update(struct rq *rq)
4510 {
4511         struct task_struct *curr = rq->curr;
4512 
4513         if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
4514                 return;
4515 
4516         if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4517                 hrtick_start_fair(rq, curr);
4518 }
4519 #else /* !CONFIG_SCHED_HRTICK */
4520 static inline void
4521 hrtick_start_fair(struct rq *rq, struct task_struct *p)
4522 {
4523 }
4524 
4525 static inline void hrtick_update(struct rq *rq)
4526 {
4527 }
4528 #endif
4529 
4530 /*
4531  * The enqueue_task method is called before nr_running is
4532  * increased. Here we update the fair scheduling stats and
4533  * then put the task into the rbtree:
4534  */
4535 static void
4536 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4537 {
4538         struct cfs_rq *cfs_rq;
4539         struct sched_entity *se = &p->se;
4540 
4541         /*
4542          * If in_iowait is set, the code below may not trigger any cpufreq
4543          * utilization updates, so do it here explicitly with the IOWAIT flag
4544          * passed.
4545          */
4546         if (p->in_iowait)
4547                 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
4548 
4549         for_each_sched_entity(se) {
4550                 if (se->on_rq)
4551                         break;
4552                 cfs_rq = cfs_rq_of(se);
4553                 enqueue_entity(cfs_rq, se, flags);
4554 
4555                 /*
4556                  * end evaluation on encountering a throttled cfs_rq
4557                  *
4558                  * note: in the case of encountering a throttled cfs_rq we will
4559                  * post the final h_nr_running increment below.
4560                  */
4561                 if (cfs_rq_throttled(cfs_rq))
4562                         break;
4563                 cfs_rq->h_nr_running++;
4564 
4565                 flags = ENQUEUE_WAKEUP;
4566         }
4567 
4568         for_each_sched_entity(se) {
4569                 cfs_rq = cfs_rq_of(se);
4570                 cfs_rq->h_nr_running++;
4571 
4572                 if (cfs_rq_throttled(cfs_rq))
4573                         break;
4574 
4575                 update_load_avg(se, 1);
4576                 update_cfs_shares(cfs_rq);
4577         }
4578 
4579         if (!se)
4580                 add_nr_running(rq, 1);
4581 
4582         hrtick_update(rq);
4583 }
4584 
4585 static void set_next_buddy(struct sched_entity *se);
4586 
4587 /*
4588  * The dequeue_task method is called before nr_running is
4589  * decreased. We remove the task from the rbtree and
4590  * update the fair scheduling stats:
4591  */
4592 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4593 {
4594         struct cfs_rq *cfs_rq;
4595         struct sched_entity *se = &p->se;
4596         int task_sleep = flags & DEQUEUE_SLEEP;
4597 
4598         for_each_sched_entity(se) {
4599                 cfs_rq = cfs_rq_of(se);
4600                 dequeue_entity(cfs_rq, se, flags);
4601 
4602                 /*
4603                  * end evaluation on encountering a throttled cfs_rq
4604                  *
4605                  * note: in the case of encountering a throttled cfs_rq we will
4606                  * post the final h_nr_running decrement below.
4607                 */
4608                 if (cfs_rq_throttled(cfs_rq))
4609                         break;
4610                 cfs_rq->h_nr_running--;
4611 
4612                 /* Don't dequeue parent if it has other entities besides us */
4613                 if (cfs_rq->load.weight) {
4614                         /* Avoid re-evaluating load for this entity: */
4615                         se = parent_entity(se);
4616                         /*
4617                          * Bias pick_next to pick a task from this cfs_rq, as
4618                          * p is sleeping when it is within its sched_slice.
4619                          */
4620                         if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4621                                 set_next_buddy(se);
4622                         break;
4623                 }
4624                 flags |= DEQUEUE_SLEEP;
4625         }
4626 
4627         for_each_sched_entity(se) {
4628                 cfs_rq = cfs_rq_of(se);
4629                 cfs_rq->h_nr_running--;
4630 
4631                 if (cfs_rq_throttled(cfs_rq))
4632                         break;
4633 
4634                 update_load_avg(se, 1);
4635                 update_cfs_shares(cfs_rq);
4636         }
4637 
4638         if (!se)
4639                 sub_nr_running(rq, 1);
4640 
4641         hrtick_update(rq);
4642 }
4643 
4644 #ifdef CONFIG_SMP
4645 
4646 /* Working cpumask for: load_balance, load_balance_newidle. */
4647 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
4648 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
4649 
4650 #ifdef CONFIG_NO_HZ_COMMON
4651 /*
4652  * per rq 'load' arrray crap; XXX kill this.
4653  */
4654 
4655 /*
4656  * The exact cpuload calculated at every tick would be:
4657  *
4658  *   load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
4659  *
4660  * If a cpu misses updates for n ticks (as it was idle) and update gets
4661  * called on the n+1-th tick when cpu may be busy, then we have:
4662  *
4663  *   load_n   = (1 - 1/2^i)^n * load_0
4664  *   load_n+1 = (1 - 1/2^i)   * load_n + (1/2^i) * cur_load
4665  *
4666  * decay_load_missed() below does efficient calculation of
4667  *
4668  *   load' = (1 - 1/2^i)^n * load
4669  *
4670  * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
4671  * This allows us to precompute the above in said factors, thereby allowing the
4672  * reduction of an arbitrary n in O(log_2 n) steps. (See also
4673  * fixed_power_int())
4674  *
4675  * The calculation is approximated on a 128 point scale.
4676  */
4677 #define DEGRADE_SHIFT           7
4678 
4679 static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4680 static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4681         {   0,   0,  0,  0,  0,  0, 0, 0 },
4682         {  64,  32,  8,  0,  0,  0, 0, 0 },
4683         {  96,  72, 40, 12,  1,  0, 0, 0 },
4684         { 112,  98, 75, 43, 15,  1, 0, 0 },
4685         { 120, 112, 98, 76, 45, 16, 2, 0 }
4686 };
4687 
4688 /*
4689  * Update cpu_load for any missed ticks, due to tickless idle. The backlog
4690  * would be when CPU is idle and so we just decay the old load without
4691  * adding any new load.
4692  */
4693 static unsigned long
4694 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4695 {
4696         int j = 0;
4697 
4698         if (!missed_updates)
4699                 return load;
4700 
4701         if (missed_updates >= degrade_zero_ticks[idx])
4702                 return 0;
4703 
4704         if (idx == 1)
4705                 return load >> missed_updates;
4706 
4707         while (missed_updates) {
4708                 if (missed_updates % 2)
4709                         load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4710 
4711                 missed_updates >>= 1;
4712                 j++;
4713         }
4714         return load;
4715 }
4716 #endif /* CONFIG_NO_HZ_COMMON */
4717 
4718 /**
4719  * __cpu_load_update - update the rq->cpu_load[] statistics
4720  * @this_rq: The rq to update statistics for
4721  * @this_load: The current load
4722  * @pending_updates: The number of missed updates
4723  *
4724  * Update rq->cpu_load[] statistics. This function is usually called every
4725  * scheduler tick (TICK_NSEC).
4726  *
4727  * This function computes a decaying average:
4728  *
4729  *   load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
4730  *
4731  * Because of NOHZ it might not get called on every tick which gives need for
4732  * the @pending_updates argument.
4733  *
4734  *   load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
4735  *             = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
4736  *             = A * (A * load[i]_n-2 + B) + B
4737  *             = A * (A * (A * load[i]_n-3 + B) + B) + B
4738  *             = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
4739  *             = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
4740  *             = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
4741  *             = (1 - 1/2^i)^n * (load[i]_0 - load) + load
4742  *
4743  * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
4744  * any change in load would have resulted in the tick being turned back on.
4745  *
4746  * For regular NOHZ, this reduces to:
4747  *
4748  *   load[i]_n = (1 - 1/2^i)^n * load[i]_0
4749  *
4750  * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
4751  * term.
4752  */
4753 static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
4754                             unsigned long pending_updates)
4755 {
4756         unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
4757         int i, scale;
4758 
4759         this_rq->nr_load_updates++;
4760 
4761         /* Update our load: */
4762         this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
4763         for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
4764                 unsigned long old_load, new_load;
4765 
4766                 /* scale is effectively 1 << i now, and >> i divides by scale */
4767 
4768                 old_load = this_rq->cpu_load[i];
4769 #ifdef CONFIG_NO_HZ_COMMON
4770                 old_load = decay_load_missed(old_load, pending_updates - 1, i);
4771                 if (tickless_load) {
4772                         old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
4773                         /*
4774                          * old_load can never be a negative value because a
4775                          * decayed tickless_load cannot be greater than the
4776                          * original tickless_load.
4777                          */
4778                         old_load += tickless_load;
4779                 }
4780 #endif
4781                 new_load = this_load;
4782                 /*
4783                  * Round up the averaging division if load is increasing. This
4784                  * prevents us from getting stuck on 9 if the load is 10, for
4785                  * example.
4786                  */
4787                 if (new_load > old_load)
4788                         new_load += scale - 1;
4789 
4790                 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
4791         }
4792 
4793         sched_avg_update(this_rq);
4794 }
4795 
4796 /* Used instead of source_load when we know the type == 0 */
4797 static unsigned long weighted_cpuload(const int cpu)
4798 {
4799         return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
4800 }
4801 
4802 #ifdef CONFIG_NO_HZ_COMMON
4803 /*
4804  * There is no sane way to deal with nohz on smp when using jiffies because the
4805  * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
4806  * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
4807  *
4808  * Therefore we need to avoid the delta approach from the regular tick when
4809  * possible since that would seriously skew the load calculation. This is why we
4810  * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
4811  * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
4812  * loop exit, nohz_idle_balance, nohz full exit...)
4813  *
4814  * This means we might still be one tick off for nohz periods.
4815  */
4816 
4817 static void cpu_load_update_nohz(struct rq *this_rq,
4818                                  unsigned long curr_jiffies,
4819                                  unsigned long load)
4820 {
4821         unsigned long pending_updates;
4822 
4823         pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4824         if (pending_updates) {
4825                 this_rq->last_load_update_tick = curr_jiffies;
4826                 /*
4827                  * In the regular NOHZ case, we were idle, this means load 0.
4828                  * In the NOHZ_FULL case, we were non-idle, we should consider
4829                  * its weighted load.
4830                  */
4831                 cpu_load_update(this_rq, load, pending_updates);
4832         }
4833 }
4834 
4835 /*
4836  * Called from nohz_idle_balance() to update the load ratings before doing the
4837  * idle balance.
4838  */
4839 static void cpu_load_update_idle(struct rq *this_rq)
4840 {
4841         /*
4842          * bail if there's load or we're actually up-to-date.
4843          */
4844         if (weighted_cpuload(cpu_of(this_rq)))
4845                 return;
4846 
4847         cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
4848 }
4849 
4850 /*
4851  * Record CPU load on nohz entry so we know the tickless load to account
4852  * on nohz exit. cpu_load[0] happens then to be updated more frequently
4853  * than other cpu_load[idx] but it should be fine as cpu_load readers
4854  * shouldn't rely into synchronized cpu_load[*] updates.
4855  */
4856 void cpu_load_update_nohz_start(void)
4857 {
4858         struct rq *this_rq = this_rq();
4859 
4860         /*
4861          * This is all lockless but should be fine. If weighted_cpuload changes
4862          * concurrently we'll exit nohz. And cpu_load write can race with
4863          * cpu_load_update_idle() but both updater would be writing the same.
4864          */
4865         this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
4866 }
4867 
4868 /*
4869  * Account the tickless load in the end of a nohz frame.
4870  */
4871 void cpu_load_update_nohz_stop(void)
4872 {
4873         unsigned long curr_jiffies = READ_ONCE(jiffies);
4874         struct rq *this_rq = this_rq();
4875         unsigned long load;
4876 
4877         if (curr_jiffies == this_rq->last_load_update_tick)
4878                 return;
4879 
4880         load = weighted_cpuload(cpu_of(this_rq));
4881         raw_spin_lock(&this_rq->lock);
4882         update_rq_clock(this_rq);
4883         cpu_load_update_nohz(this_rq, curr_jiffies, load);
4884         raw_spin_unlock(&this_rq->lock);
4885 }
4886 #else /* !CONFIG_NO_HZ_COMMON */
4887 static inline void cpu_load_update_nohz(struct rq *this_rq,
4888                                         unsigned long curr_jiffies,
4889                                         unsigned long load) { }
4890 #endif /* CONFIG_NO_HZ_COMMON */
4891 
4892 static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
4893 {
4894 #ifdef CONFIG_NO_HZ_COMMON
4895         /* See the mess around cpu_load_update_nohz(). */
4896         this_rq->last_load_update_tick = READ_ONCE(jiffies);
4897 #endif
4898         cpu_load_update(this_rq, load, 1);
4899 }
4900 
4901 /*
4902  * Called from scheduler_tick()
4903  */
4904 void cpu_load_update_active(struct rq *this_rq)
4905 {
4906         unsigned long load = weighted_cpuload(cpu_of(this_rq));
4907 
4908         if (tick_nohz_tick_stopped())
4909                 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
4910         else
4911                 cpu_load_update_periodic(this_rq, load);
4912 }
4913 
4914 /*
4915  * Return a low guess at the load of a migration-source cpu weighted
4916  * according to the scheduling class and "nice" value.
4917  *
4918  * We want to under-estimate the load of migration sources, to
4919  * balance conservatively.
4920  */
4921 static unsigned long source_load(int cpu, int type)
4922 {
4923         struct rq *rq = cpu_rq(cpu);
4924         unsigned long total = weighted_cpuload(cpu);
4925 
4926         if (type == 0 || !sched_feat(LB_BIAS))
4927                 return total;
4928 
4929         return min(rq->cpu_load[type-1], total);
4930 }
4931 
4932 /*
4933  * Return a high guess at the load of a migration-target cpu weighted
4934  * according to the scheduling class and "nice" value.
4935  */
4936 static unsigned long target_load(int cpu, int type)
4937 {
4938         struct rq *rq = cpu_rq(cpu);
4939         unsigned long total = weighted_cpuload(cpu);
4940 
4941         if (type == 0 || !sched_feat(LB_BIAS))
4942                 return total;
4943 
4944         return max(rq->cpu_load[type-1], total);
4945 }
4946 
4947 static unsigned long capacity_of(int cpu)
4948 {
4949         return cpu_rq(cpu)->cpu_capacity;
4950 }
4951 
4952 static unsigned long capacity_orig_of(int cpu)
4953 {
4954         return cpu_rq(cpu)->cpu_capacity_orig;
4955 }
4956 
4957 static unsigned long cpu_avg_load_per_task(int cpu)
4958 {
4959         struct rq *rq = cpu_rq(cpu);
4960         unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
4961         unsigned long load_avg = weighted_cpuload(cpu);
4962 
4963         if (nr_running)
4964                 return load_avg / nr_running;
4965 
4966         return 0;
4967 }
4968 
4969 #ifdef CONFIG_FAIR_GROUP_SCHED
4970 /*
4971  * effective_load() calculates the load change as seen from the root_task_group
4972  *
4973  * Adding load to a group doesn't make a group heavier, but can cause movement
4974  * of group shares between cpus. Assuming the shares were perfectly aligned one
4975  * can calculate the shift in shares.
4976  *
4977  * Calculate the effective load difference if @wl is added (subtracted) to @tg
4978  * on this @cpu and results in a total addition (subtraction) of @wg to the
4979  * total group weight.
4980  *
4981  * Given a runqueue weight distribution (rw_i) we can compute a shares
4982  * distribution (s_i) using:
4983  *
4984  *   s_i = rw_i / \Sum rw_j                                             (1)
4985  *
4986  * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4987  * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4988  * shares distribution (s_i):
4989  *
4990  *   rw_i = {   2,   4,   1,   0 }
4991  *   s_i  = { 2/7, 4/7, 1/7,   0 }
4992  *
4993  * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4994  * task used to run on and the CPU the waker is running on), we need to
4995  * compute the effect of waking a task on either CPU and, in case of a sync
4996  * wakeup, compute the effect of the current task going to sleep.
4997  *
4998  * So for a change of @wl to the local @cpu with an overall group weight change
4999  * of @wl we can compute the new shares distribution (s'_i) using:
5000  *
5001  *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)                            (2)
5002  *
5003  * Suppose we're interested in CPUs 0 and 1, and want to compute the load
5004  * differences in waking a task to CPU 0. The additional task changes the
5005  * weight and shares distributions like:
5006  *
5007  *   rw'_i = {   3,   4,   1,   0 }
5008  *   s'_i  = { 3/8, 4/8, 1/8,   0 }
5009  *
5010  * We can then compute the difference in effective weight by using:
5011  *
5012  *   dw_i = S * (s'_i - s_i)                                            (3)
5013  *
5014  * Where 'S' is the group weight as seen by its parent.
5015  *
5016  * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
5017  * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
5018  * 4/7) times the weight of the group.
5019  */
5020 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5021 {
5022         struct sched_entity *se = tg->se[cpu];
5023 
5024         if (!tg->parent)        /* the trivial, non-cgroup case */
5025                 return wl;
5026 
5027         for_each_sched_entity(se) {
5028                 struct cfs_rq *cfs_rq = se->my_q;
5029                 long W, w = cfs_rq_load_avg(cfs_rq);
5030 
5031                 tg = cfs_rq->tg;
5032 
5033                 /*
5034                  * W = @wg + \Sum rw_j
5035                  */
5036                 W = wg + atomic_long_read(&tg->load_avg);
5037 
5038                 /* Ensure \Sum rw_j >= rw_i */
5039                 W -= cfs_rq->tg_load_avg_contrib;
5040                 W += w;
5041 
5042                 /*
5043                  * w = rw_i + @wl
5044                  */
5045                 w += wl;
5046 
5047                 /*
5048                  * wl = S * s'_i; see (2)
5049                  */
5050                 if (W > 0 && w < W)
5051                         wl = (w * (long)scale_load_down(tg->shares)) / W;
5052                 else
5053                         wl = scale_load_down(tg->shares);
5054 
5055                 /*
5056                  * Per the above, wl is the new se->load.weight value; since
5057                  * those are clipped to [MIN_SHARES, ...) do so now. See
5058                  * calc_cfs_shares().
5059                  */
5060                 if (wl < MIN_SHARES)
5061                         wl = MIN_SHARES;
5062 
5063                 /*
5064                  * wl = dw_i = S * (s'_i - s_i); see (3)
5065                  */
5066                 wl -= se->avg.load_avg;
5067 
5068                 /*
5069                  * Recursively apply this logic to all parent groups to compute
5070                  * the final effective load change on the root group. Since
5071                  * only the @tg group gets extra weight, all parent groups can
5072                  * only redistribute existing shares. @wl is the shift in shares
5073                  * resulting from this level per the above.
5074                  */
5075                 wg = 0;
5076         }
5077 
5078         return wl;
5079 }
5080 #else
5081 
5082 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
5083 {
5084         return wl;
5085 }
5086 
5087 #endif
5088 
5089 static void record_wakee(struct task_struct *p)
5090 {
5091         /*
5092          * Only decay a single time; tasks that have less then 1 wakeup per
5093          * jiffy will not have built up many flips.
5094          */
5095         if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5096                 current->wakee_flips >>= 1;
5097                 current->wakee_flip_decay_ts = jiffies;
5098         }
5099 
5100         if (current->last_wakee != p) {
5101                 current->last_wakee = p;
5102                 current->wakee_flips++;
5103         }
5104 }
5105 
5106 /*
5107  * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
5108  *
5109  * A waker of many should wake a different task than the one last awakened
5110  * at a frequency roughly N times higher than one of its wakees.
5111  *
5112  * In order to determine whether we should let the load spread vs consolidating
5113  * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5114  * partner, and a factor of lls_size higher frequency in the other.
5115  *
5116  * With both conditions met, we can be relatively sure that the relationship is
5117  * non-monogamous, with partner count exceeding socket size.
5118  *
5119  * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5120  * whatever is irrelevant, spread criteria is apparent partner count exceeds
5121  * socket size.
5122  */
5123 static int wake_wide(struct task_struct *p)
5124 {
5125         unsigned int master = current->wakee_flips;
5126         unsigned int slave = p->wakee_flips;
5127         int factor = this_cpu_read(sd_llc_size);
5128 
5129         if (master < slave)
5130                 swap(master, slave);
5131         if (slave < factor || master < slave * factor)
5132                 return 0;
5133         return 1;
5134 }
5135 
5136 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5137                        int prev_cpu, int sync)
5138 {
5139         s64 this_load, load;
5140         s64 this_eff_load, prev_eff_load;
5141         int idx, this_cpu;
5142         struct task_group *tg;
5143         unsigned long weight;
5144         int balanced;
5145 
5146         idx       = sd->wake_idx;
5147         this_cpu  = smp_processor_id();
5148         load      = source_load(prev_cpu, idx);
5149         this_load = target_load(this_cpu, idx);
5150 
5151         /*
5152          * If sync wakeup then subtract the (maximum possible)
5153          * effect of the currently running task from the load
5154          * of the current CPU:
5155          */
5156         if (sync) {
5157                 tg = task_group(current);
5158                 weight = current->se.avg.load_avg;
5159 
5160                 this_load += effective_load(tg, this_cpu, -weight, -weight);
5161                 load += effective_load(tg, prev_cpu, 0, -weight);
5162         }
5163 
5164         tg = task_group(p);
5165         weight = p->se.avg.load_avg;
5166 
5167         /*
5168          * In low-load situations, where prev_cpu is idle and this_cpu is idle
5169          * due to the sync cause above having dropped this_load to 0, we'll
5170          * always have an imbalance, but there's really nothing you can do
5171          * about that, so that's good too.
5172          *
5173          * Otherwise check if either cpus are near enough in load to allow this
5174          * task to be woken on this_cpu.
5175          */
5176         this_eff_load = 100;
5177         this_eff_load *= capacity_of(prev_cpu);
5178 
5179         prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
5180         prev_eff_load *= capacity_of(this_cpu);
5181 
5182         if (this_load > 0) {
5183                 this_eff_load *= this_load +
5184                         effective_load(tg, this_cpu, weight, weight);
5185 
5186                 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
5187         }
5188 
5189         balanced = this_eff_load <= prev_eff_load;
5190 
5191         schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
5192 
5193         if (!balanced)
5194                 return 0;
5195 
5196         schedstat_inc(sd->ttwu_move_affine);
5197         schedstat_inc(p->se.statistics.nr_wakeups_affine);
5198 
5199         return 1;
5200 }
5201 
5202 /*
5203  * find_idlest_group finds and returns the least busy CPU group within the
5204  * domain.
5205  */
5206 static struct sched_group *
5207 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5208                   int this_cpu, int sd_flag)
5209 {
5210         struct sched_group *idlest = NULL, *group = sd->groups;
5211         unsigned long min_load = ULONG_MAX, this_load = 0;
5212         int load_idx = sd->forkexec_idx;
5213         int imbalance = 100 + (sd->imbalance_pct-100)/2;
5214 
5215         if (sd_flag & SD_BALANCE_WAKE)
5216                 load_idx = sd->wake_idx;
5217 
5218         do {
5219                 unsigned long load, avg_load;
5220                 int local_group;
5221                 int i;
5222 
5223                 /* Skip over this group if it has no CPUs allowed */
5224                 if (!cpumask_intersects(sched_group_cpus(group),
5225                                         tsk_cpus_allowed(p)))
5226                         continue;
5227 
5228                 local_group = cpumask_test_cpu(this_cpu,
5229                                                sched_group_cpus(group));
5230 
5231                 /* Tally up the load of all CPUs in the group */
5232                 avg_load = 0;
5233 
5234                 for_each_cpu(i, sched_group_cpus(group)) {
5235                         /* Bias balancing toward cpus of our domain */
5236                         if (local_group)
5237                                 load = source_load(i, load_idx);
5238                         else
5239                                 load = target_load(i, load_idx);
5240 
5241                         avg_load += load;
5242                 }
5243 
5244                 /* Adjust by relative CPU capacity of the group */
5245                 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
5246 
5247                 if (local_group) {
5248                         this_load = avg_load;
5249                 } else if (avg_load < min_load) {
5250                         min_load = avg_load;
5251                         idlest = group;
5252                 }
5253         } while (group = group->next, group != sd->groups);
5254 
5255         if (!idlest || 100*this_load < imbalance*min_load)
5256                 return NULL;
5257         return idlest;
5258 }
5259 
5260 /*
5261  * find_idlest_cpu - find the idlest cpu among the cpus in group.
5262  */
5263 static int
5264 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5265 {
5266         unsigned long load, min_load = ULONG_MAX;
5267         unsigned int min_exit_latency = UINT_MAX;
5268         u64 latest_idle_timestamp = 0;
5269         int least_loaded_cpu = this_cpu;
5270         int shallowest_idle_cpu = -1;
5271         int i;
5272 
5273         /* Check if we have any choice: */
5274         if (group->group_weight == 1)
5275                 return cpumask_first(sched_group_cpus(group));
5276 
5277         /* Traverse only the allowed CPUs */
5278         for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
5279                 if (idle_cpu(i)) {
5280                         struct rq *rq = cpu_rq(i);
5281                         struct cpuidle_state *idle = idle_get_state(rq);
5282                         if (idle && idle->exit_latency < min_exit_latency) {
5283                                 /*
5284                                  * We give priority to a CPU whose idle state
5285                                  * has the smallest exit latency irrespective
5286                                  * of any idle timestamp.
5287                                  */
5288                                 min_exit_latency = idle->exit_latency;
5289                                 latest_idle_timestamp = rq->idle_stamp;
5290                                 shallowest_idle_cpu = i;
5291                         } else if ((!idle || idle->exit_latency == min_exit_latency) &&
5292                                    rq->idle_stamp > latest_idle_timestamp) {
5293                                 /*
5294                                  * If equal or no active idle state, then
5295                                  * the most recently idled CPU might have
5296                                  * a warmer cache.
5297                                  */
5298                                 latest_idle_timestamp = rq->idle_stamp;
5299                                 shallowest_idle_cpu = i;
5300                         }
5301                 } else if (shallowest_idle_cpu == -1) {
5302                         load = weighted_cpuload(i);
5303                         if (load < min_load || (load == min_load && i == this_cpu)) {
5304                                 min_load = load;
5305                                 least_loaded_cpu = i;
5306                         }
5307                 }
5308         }
5309 
5310         return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
5311 }
5312 
5313 /*
5314  * Implement a for_each_cpu() variant that starts the scan at a given cpu
5315  * (@start), and wraps around.
5316  *
5317  * This is used to scan for idle CPUs; such that not all CPUs looking for an
5318  * idle CPU find the same CPU. The down-side is that tasks tend to cycle
5319  * through the LLC domain.
5320  *
5321  * Especially tbench is found sensitive to this.
5322  */
5323 
5324 static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
5325 {
5326         int next;
5327 
5328 again:
5329         next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
5330 
5331         if (*wrapped) {
5332                 if (next >= start)
5333                         return nr_cpumask_bits;
5334         } else {
5335                 if (next >= nr_cpumask_bits) {
5336                         *wrapped = 1;
5337                         n = -1;
5338                         goto again;
5339                 }
5340         }
5341 
5342         return next;
5343 }
5344 
5345 #define for_each_cpu_wrap(cpu, mask, start, wrap)                               \
5346         for ((wrap) = 0, (cpu) = (start)-1;                                     \
5347                 (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)),     \
5348                 (cpu) < nr_cpumask_bits; )
5349 
5350 #ifdef CONFIG_SCHED_SMT
5351 
5352 static inline void set_idle_cores(int cpu, int val)
5353 {
5354         struct sched_domain_shared *sds;
5355 
5356         sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5357         if (sds)
5358                 WRITE_ONCE(sds->has_idle_cores, val);
5359 }
5360 
5361 static inline bool test_idle_cores(int cpu, bool def)
5362 {
5363         struct sched_domain_shared *sds;
5364 
5365         sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5366         if (sds)
5367                 return READ_ONCE(sds->has_idle_cores);
5368 
5369         return def;
5370 }
5371 
5372 /*
5373  * Scans the local SMT mask to see if the entire core is idle, and records this
5374  * information in sd_llc_shared->has_idle_cores.
5375  *
5376  * Since SMT siblings share all cache levels, inspecting this limited remote
5377  * state should be fairly cheap.
5378  */
5379 void __update_idle_core(struct rq *rq)
5380 {
5381         int core = cpu_of(rq);
5382         int cpu;
5383 
5384         rcu_read_lock();
5385         if (test_idle_cores(core, true))
5386                 goto unlock;
5387 
5388         for_each_cpu(cpu, cpu_smt_mask(core)) {
5389                 if (cpu == core)
5390                         continue;
5391 
5392                 if (!idle_cpu(cpu))
5393                         goto unlock;
5394         }
5395 
5396         set_idle_cores(core, 1);
5397 unlock:
5398         rcu_read_unlock();
5399 }
5400 
5401 /*
5402  * Scan the entire LLC domain for idle cores; this dynamically switches off if
5403  * there are no idle cores left in the system; tracked through
5404  * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
5405  */
5406 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5407 {
5408         struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5409         int core, cpu, wrap;
5410 
5411         if (!static_branch_likely(&sched_smt_present))
5412                 return -1;
5413 
5414         if (!test_idle_cores(target, false))
5415                 return -1;
5416 
5417         cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p));
5418 
5419         for_each_cpu_wrap(core, cpus, target, wrap) {
5420                 bool idle = true;
5421 
5422                 for_each_cpu(cpu, cpu_smt_mask(core)) {
5423                         cpumask_clear_cpu(cpu, cpus);
5424                         if (!idle_cpu(cpu))
5425                                 idle = false;
5426                 }
5427 
5428                 if (idle)
5429                         return core;
5430         }
5431 
5432         /*
5433          * Failed to find an idle core; stop looking for one.
5434          */
5435         set_idle_cores(target, 0);
5436 
5437         return -1;
5438 }
5439 
5440 /*
5441  * Scan the local SMT mask for idle CPUs.
5442  */
5443 static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5444 {
5445         int cpu;
5446 
5447         if (!static_branch_likely(&sched_smt_present))
5448                 return -1;
5449 
5450         for_each_cpu(cpu, cpu_smt_mask(target)) {
5451                 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
5452                         continue;
5453                 if (idle_cpu(cpu))
5454                         return cpu;
5455         }
5456 
5457         return -1;
5458 }
5459 
5460 #else /* CONFIG_SCHED_SMT */
5461 
5462 static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5463 {
5464         return -1;
5465 }
5466 
5467 static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5468 {
5469         return -1;
5470 }
5471 
5472 #endif /* CONFIG_SCHED_SMT */
5473 
5474 /*
5475  * Scan the LLC domain for idle CPUs; this is dynamically regulated by
5476  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
5477  * average idle time for this rq (as found in rq->avg_idle).
5478  */
5479 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
5480 {
5481         struct sched_domain *this_sd;
5482         u64 avg_cost, avg_idle = this_rq()->avg_idle;
5483         u64 time, cost;
5484         s64 delta;
5485         int cpu, wrap;
5486 
5487         this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
5488         if (!this_sd)
5489                 return -1;
5490 
5491         avg_cost = this_sd->avg_scan_cost;
5492 
5493         /*
5494          * Due to large variance we need a large fuzz factor; hackbench in
5495          * particularly is sensitive here.
5496          */
5497         if ((avg_idle / 512) < avg_cost)
5498                 return -1;
5499 
5500         time = local_clock();
5501 
5502         for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
5503                 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
5504                         continue;
5505                 if (idle_cpu(cpu))
5506                         break;
5507         }
5508 
5509         time = local_clock() - time;
5510         cost = this_sd->avg_scan_cost;
5511         delta = (s64)(time - cost) / 8;
5512         this_sd->avg_scan_cost += delta;
5513 
5514         return cpu;
5515 }
5516 
5517 /*
5518  * Try and locate an idle core/thread in the LLC cache domain.
5519  */
5520 static int select_idle_sibling(struct task_struct *p, int prev, int target)
5521 {
5522         struct sched_domain *sd;
5523         int i;
5524 
5525         if (idle_cpu(target))
5526                 return target;
5527 
5528         /*
5529          * If the previous cpu is cache affine and idle, don't be stupid.
5530          */
5531         if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev))
5532                 return prev;
5533 
5534         sd = rcu_dereference(per_cpu(sd_llc, target));
5535         if (!sd)
5536                 return target;
5537 
5538         i = select_idle_core(p, sd, target);
5539         if ((unsigned)i < nr_cpumask_bits)
5540                 return i;
5541 
5542         i = select_idle_cpu(p, sd, target);
5543         if ((unsigned)i < nr_cpumask_bits)
5544                 return i;
5545 
5546         i = select_idle_smt(p, sd, target);
5547         if ((unsigned)i < nr_cpumask_bits)
5548                 return i;
5549 
5550         return target;
5551 }
5552 
5553 /*
5554  * cpu_util returns the amount of capacity of a CPU that is used by CFS
5555  * tasks. The unit of the return value must be the one of capacity so we can
5556  * compare the utilization with the capacity of the CPU that is available for
5557  * CFS task (ie cpu_capacity).
5558  *
5559  * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
5560  * recent utilization of currently non-runnable tasks on a CPU. It represents
5561  * the amount of utilization of a CPU in the range [0..capacity_orig] where
5562  * capacity_orig is the cpu_capacity available at the highest frequency
5563  * (arch_scale_freq_capacity()).
5564  * The utilization of a CPU converges towards a sum equal to or less than the
5565  * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
5566  * the running time on this CPU scaled by capacity_curr.
5567  *
5568  * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
5569  * higher than capacity_orig because of unfortunate rounding in
5570  * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
5571  * the average stabilizes with the new running time. We need to check that the
5572  * utilization stays within the range of [0..capacity_orig] and cap it if
5573  * necessary. Without utilization capping, a group could be seen as overloaded
5574  * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
5575  * available capacity. We allow utilization to overshoot capacity_curr (but not
5576  * capacity_orig) as it useful for predicting the capacity required after task
5577  * migrations (scheduler-driven DVFS).
5578  */
5579 static int cpu_util(int cpu)
5580 {
5581         unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
5582         unsigned long capacity = capacity_orig_of(cpu);
5583 
5584         return (util >= capacity) ? capacity : util;
5585 }
5586 
5587 static inline int task_util(struct task_struct *p)
5588 {
5589         return p->se.avg.util_avg;
5590 }
5591 
5592 /*
5593  * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
5594  * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
5595  *
5596  * In that case WAKE_AFFINE doesn't make sense and we'll let
5597  * BALANCE_WAKE sort things out.
5598  */
5599 static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
5600 {
5601         long min_cap, max_cap;
5602 
5603         min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
5604         max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
5605 
5606         /* Minimum capacity is close to max, no need to abort wake_affine */
5607         if (max_cap - min_cap < max_cap >> 3)
5608                 return 0;
5609 
5610         return min_cap * 1024 < task_util(p) * capacity_margin;
5611 }
5612 
5613 /*
5614  * select_task_rq_fair: Select target runqueue for the waking task in domains
5615  * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
5616  * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
5617  *
5618  * Balances load by selecting the idlest cpu in the idlest group, or under
5619  * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
5620  *
5621  * Returns the target cpu number.
5622  *
5623  * preempt must be disabled.
5624  */
5625 static int
5626 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
5627 {
5628         struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
5629         int cpu = smp_processor_id();
5630         int new_cpu = prev_cpu;
5631         int want_affine = 0;
5632         int sync = wake_flags & WF_SYNC;
5633 
5634         if (sd_flag & SD_BALANCE_WAKE) {
5635                 record_wakee(p);
5636                 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
5637                               && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
5638         }
5639 
5640         rcu_read_lock();
5641         for_each_domain(cpu, tmp) {
5642                 if (!(tmp->flags & SD_LOAD_BALANCE))
5643                         break;
5644 
5645                 /*
5646                  * If both cpu and prev_cpu are part of this domain,
5647                  * cpu is a valid SD_WAKE_AFFINE target.
5648                  */
5649                 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
5650                     cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
5651                         affine_sd = tmp;
5652                         break;
5653                 }
5654 
5655                 if (tmp->flags & sd_flag)
5656                         sd = tmp;
5657                 else if (!want_affine)
5658                         break;
5659         }
5660 
5661         if (affine_sd) {
5662                 sd = NULL; /* Prefer wake_affine over balance flags */
5663                 if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
5664                         new_cpu = cpu;
5665         }
5666 
5667         if (!sd) {
5668                 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
5669                         new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
5670 
5671         } else while (sd) {
5672                 struct sched_group *group;
5673                 int weight;
5674 
5675                 if (!(sd->flags & sd_flag)) {
5676                         sd = sd->child;
5677                         continue;
5678                 }
5679 
5680                 group = find_idlest_group(sd, p, cpu, sd_flag);
5681                 if (!group) {
5682                         sd = sd->child;
5683                         continue;
5684                 }
5685 
5686                 new_cpu = find_idlest_cpu(group, p, cpu);
5687                 if (new_cpu == -1 || new_cpu == cpu) {
5688                         /* Now try balancing at a lower domain level of cpu */
5689                         sd = sd->child;
5690                         continue;
5691                 }
5692 
5693                 /* Now try balancing at a lower domain level of new_cpu */
5694                 cpu = new_cpu;
5695                 weight = sd->span_weight;
5696                 sd = NULL;
5697                 for_each_domain(cpu, tmp) {
5698                         if (weight <= tmp->span_weight)
5699                                 break;
5700                         if (tmp->flags & sd_flag)
5701                                 sd = tmp;
5702                 }
5703                 /* while loop will break here if sd == NULL */
5704         }
5705         rcu_read_unlock();
5706 
5707         return new_cpu;
5708 }
5709 
5710 /*
5711  * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
5712  * cfs_rq_of(p) references at time of call are still valid and identify the
5713  * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
5714  */
5715 static void migrate_task_rq_fair(struct task_struct *p)
5716 {
5717         /*
5718          * As blocked tasks retain absolute vruntime the migration needs to
5719          * deal with this by subtracting the old and adding the new
5720          * min_vruntime -- the latter is done by enqueue_entity() when placing
5721          * the task on the new runqueue.
5722          */
5723         if (p->state == TASK_WAKING) {
5724                 struct sched_entity *se = &p->se;
5725                 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5726                 u64 min_vruntime;
5727 
5728 #ifndef CONFIG_64BIT
5729                 u64 min_vruntime_copy;
5730 
5731                 do {
5732                         min_vruntime_copy = cfs_rq->min_vruntime_copy;
5733                         smp_rmb();
5734                         min_vruntime = cfs_rq->min_vruntime;
5735                 } while (min_vruntime != min_vruntime_copy);
5736 #else
5737                 min_vruntime = cfs_rq->min_vruntime;
5738 #endif
5739 
5740                 se->vruntime -= min_vruntime;
5741         }
5742 
5743         /*
5744          * We are supposed to update the task to "current" time, then its up to date
5745          * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
5746          * what current time is, so simply throw away the out-of-date time. This
5747          * will result in the wakee task is less decayed, but giving the wakee more
5748          * load sounds not bad.
5749          */
5750         remove_entity_load_avg(&p->se);
5751 
5752         /* Tell new CPU we are migrated */
5753         p->se.avg.last_update_time = 0;
5754 
5755         /* We have migrated, no longer consider this task hot */
5756         p->se.exec_start = 0;
5757 }
5758 
5759 static void task_dead_fair(struct task_struct *p)
5760 {
5761         remove_entity_load_avg(&p->se);
5762 }
5763 #endif /* CONFIG_SMP */
5764 
5765 static unsigned long
5766 wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
5767 {
5768         unsigned long gran = sysctl_sched_wakeup_granularity;
5769 
5770         /*
5771          * Since its curr running now, convert the gran from real-time
5772          * to virtual-time in his units.
5773          *
5774          * By using 'se' instead of 'curr' we penalize light tasks, so
5775          * they get preempted easier. That is, if 'se' < 'curr' then
5776          * the resulting gran will be larger, therefore penalizing the
5777          * lighter, if otoh 'se' > 'curr' then the resulting gran will
5778          * be smaller, again penalizing the lighter task.
5779          *
5780          * This is especially important for buddies when the leftmost
5781          * task is higher priority than the buddy.
5782          */
5783         return calc_delta_fair(gran, se);
5784 }
5785 
5786 /*
5787  * Should 'se' preempt 'curr'.
5788  *
5789  *             |s1
5790  *        |s2
5791  *   |s3
5792  *         g
5793  *      |<--->|c
5794  *
5795  *  w(c, s1) = -1
5796  *  w(c, s2) =  0
5797  *  w(c, s3) =  1
5798  *
5799  */
5800 static int
5801 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
5802 {
5803         s64 gran, vdiff = curr->vruntime - se->vruntime;
5804 
5805         if (vdiff <= 0)
5806                 return -1;
5807 
5808         gran = wakeup_gran(curr, se);
5809         if (vdiff > gran)
5810                 return 1;
5811 
5812         return 0;
5813 }
5814 
5815 static void set_last_buddy(struct sched_entity *se)
5816 {
5817         if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5818                 return;
5819 
5820         for_each_sched_entity(se)
5821                 cfs_rq_of(se)->last = se;
5822 }
5823 
5824 static void set_next_buddy(struct sched_entity *se)
5825 {
5826         if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5827                 return;
5828 
5829         for_each_sched_entity(se)
5830                 cfs_rq_of(se)->next = se;
5831 }
5832 
5833 static void set_skip_buddy(struct sched_entity *se)
5834 {
5835         for_each_sched_entity(se)
5836                 cfs_rq_of(se)->skip = se;
5837 }
5838 
5839 /*
5840  * Preempt the current task with a newly woken task if needed:
5841  */
5842 static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
5843 {
5844         struct task_struct *curr = rq->curr;
5845         struct sched_entity *se = &curr->se, *pse = &p->se;
5846         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5847         int scale = cfs_rq->nr_running >= sched_nr_latency;
5848         int next_buddy_marked = 0;
5849 
5850         if (unlikely(se == pse))
5851                 return;
5852 
5853         /*
5854          * This is possible from callers such as attach_tasks(), in which we
5855          * unconditionally check_prempt_curr() after an enqueue (which may have
5856          * lead to a throttle).  This both saves work and prevents false
5857          * next-buddy nomination below.
5858          */
5859         if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
5860                 return;
5861 
5862         if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
5863                 set_next_buddy(pse);
5864                 next_buddy_marked = 1;
5865         }
5866 
5867         /*
5868          * We can come here with TIF_NEED_RESCHED already set from new task
5869          * wake up path.
5870          *
5871          * Note: this also catches the edge-case of curr being in a throttled
5872          * group (e.g. via set_curr_task), since update_curr() (in the
5873          * enqueue of curr) will have resulted in resched being set.  This
5874          * prevents us from potentially nominating it as a false LAST_BUDDY
5875          * below.
5876          */
5877         if (test_tsk_need_resched(curr))
5878                 return;
5879 
5880         /* Idle tasks are by definition preempted by non-idle tasks. */
5881         if (unlikely(curr->policy == SCHED_IDLE) &&
5882             likely(p->policy != SCHED_IDLE))
5883                 goto preempt;
5884 
5885         /*
5886          * Batch and idle tasks do not preempt non-idle tasks (their preemption
5887          * is driven by the tick):
5888          */
5889         if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
5890                 return;
5891 
5892         find_matching_se(&se, &pse);
5893         update_curr(cfs_rq_of(se));
5894         BUG_ON(!pse);
5895         if (wakeup_preempt_entity(se, pse) == 1) {
5896                 /*
5897                  * Bias pick_next to pick the sched entity that is
5898                  * triggering this preemption.
5899                  */
5900                 if (!next_buddy_marked)
5901                         set_next_buddy(pse);
5902                 goto preempt;
5903         }
5904 
5905         return;
5906 
5907 preempt:
5908         resched_curr(rq);
5909         /*
5910          * Only set the backward buddy when the current task is still
5911          * on the rq. This can happen when a wakeup gets interleaved
5912          * with schedule on the ->pre_schedule() or idle_balance()
5913          * point, either of which can * drop the rq lock.
5914          *
5915          * Also, during early boot the idle thread is in the fair class,
5916          * for obvious reasons its a bad idea to schedule back to it.
5917          */
5918         if (unlikely(!se->on_rq || curr == rq->idle))
5919                 return;
5920 
5921         if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
5922                 set_last_buddy(se);
5923 }
5924 
5925 static struct task_struct *
5926 pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
5927 {
5928         struct cfs_rq *cfs_rq = &rq->cfs;
5929         struct sched_entity *se;
5930         struct task_struct *p;
5931         int new_tasks;
5932 
5933 again:
5934 #ifdef CONFIG_FAIR_GROUP_SCHED
5935         if (!cfs_rq->nr_running)
5936                 goto idle;
5937 
5938         if (prev->sched_class != &fair_sched_class)
5939                 goto simple;
5940 
5941         /*
5942          * Because of the set_next_buddy() in dequeue_task_fair() it is rather
5943          * likely that a next task is from the same cgroup as the current.
5944          *
5945          * Therefore attempt to avoid putting and setting the entire cgroup
5946          * hierarchy, only change the part that actually changes.
5947          */
5948 
5949         do {
5950                 struct sched_entity *curr = cfs_rq->curr;
5951 
5952                 /*
5953                  * Since we got here without doing put_prev_entity() we also
5954                  * have to consider cfs_rq->curr. If it is still a runnable
5955                  * entity, update_curr() will update its vruntime, otherwise
5956                  * forget we've ever seen it.
5957                  */
5958                 if (curr) {
5959                         if (curr->on_rq)
5960                                 update_curr(cfs_rq);
5961                         else
5962                                 curr = NULL;
5963 
5964                         /*
5965                          * This call to check_cfs_rq_runtime() will do the
5966                          * throttle and dequeue its entity in the parent(s).
5967                          * Therefore the 'simple' nr_running test will indeed
5968                          * be correct.
5969                          */
5970                         if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5971                                 goto simple;
5972                 }
5973 
5974                 se = pick_next_entity(cfs_rq, curr);
5975                 cfs_rq = group_cfs_rq(se);
5976         } while (cfs_rq);
5977 
5978         p = task_of(se);
5979 
5980         /*
5981          * Since we haven't yet done put_prev_entity and if the selected task
5982          * is a different task than we started out with, try and touch the
5983          * least amount of cfs_rqs.
5984          */
5985         if (prev != p) {
5986                 struct sched_entity *pse = &prev->se;
5987 
5988                 while (!(cfs_rq = is_same_group(se, pse))) {
5989                         int se_depth = se->depth;
5990                         int pse_depth = pse->depth;
5991 
5992                         if (se_depth <= pse_depth) {
5993                                 put_prev_entity(cfs_rq_of(pse), pse);
5994                                 pse = parent_entity(pse);
5995                         }
5996                         if (se_depth >= pse_depth) {
5997                                 set_next_entity(cfs_rq_of(se), se);
5998                                 se = parent_entity(se);
5999                         }
6000                 }
6001 
6002                 put_prev_entity(cfs_rq, pse);
6003                 set_next_entity(cfs_rq, se);
6004         }
6005 
6006         if (hrtick_enabled(rq))
6007                 hrtick_start_fair(rq, p);
6008 
6009         return p;
6010 simple:
6011         cfs_rq = &rq->cfs;
6012 #endif
6013 
6014         if (!cfs_rq->nr_running)
6015                 goto idle;
6016 
6017         put_prev_task(rq, prev);
6018 
6019         do {
6020                 se = pick_next_entity(cfs_rq, NULL);
6021                 set_next_entity(cfs_rq, se);
6022                 cfs_rq = group_cfs_rq(se);
6023         } while (cfs_rq);
6024 
6025         p = task_of(se);
6026 
6027         if (hrtick_enabled(rq))
6028                 hrtick_start_fair(rq, p);
6029 
6030         return p;
6031 
6032 idle:
6033         /*
6034          * This is OK, because current is on_cpu, which avoids it being picked
6035          * for load-balance and preemption/IRQs are still disabled avoiding
6036          * further scheduler activity on it and we're being very careful to
6037          * re-start the picking loop.
6038          */
6039         lockdep_unpin_lock(&rq->lock, cookie);
6040         new_tasks = idle_balance(rq);
6041         lockdep_repin_lock(&rq->lock, cookie);
6042         /*
6043          * Because idle_balance() releases (and re-acquires) rq->lock, it is
6044          * possible for any higher priority task to appear. In that case we
6045          * must re-start the pick_next_entity() loop.
6046          */
6047         if (new_tasks < 0)
6048                 return RETRY_TASK;
6049 
6050         if (new_tasks > 0)
6051                 goto again;
6052 
6053         return NULL;
6054 }
6055 
6056 /*
6057  * Account for a descheduled task:
6058  */
6059 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
6060 {
6061         struct sched_entity *se = &prev->se;
6062         struct cfs_rq *cfs_rq;
6063 
6064         for_each_sched_entity(se) {
6065                 cfs_rq = cfs_rq_of(se);
6066                 put_prev_entity(cfs_rq, se);
6067         }
6068 }
6069 
6070 /*
6071  * sched_yield() is very simple
6072  *
6073  * The magic of dealing with the ->skip buddy is in pick_next_entity.
6074  */
6075 static void yield_task_fair(struct rq *rq)
6076 {
6077         struct task_struct *curr = rq->curr;
6078         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6079         struct sched_entity *se = &curr->se;
6080 
6081         /*
6082          * Are we the only task in the tree?
6083          */
6084         if (unlikely(rq->nr_running == 1))
6085                 return;
6086 
6087         clear_buddies(cfs_rq, se);
6088 
6089         if (curr->policy != SCHED_BATCH) {
6090                 update_rq_clock(rq);
6091                 /*
6092                  * Update run-time statistics of the 'current'.
6093                  */
6094                 update_curr(cfs_rq);
6095                 /*
6096                  * Tell update_rq_clock() that we've just updated,
6097                  * so we don't do microscopic update in schedule()
6098                  * and double the fastpath cost.
6099                  */
6100                 rq_clock_skip_update(rq, true);
6101         }
6102 
6103         set_skip_buddy(se);
6104 }
6105 
6106 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
6107 {
6108         struct sched_entity *se = &p->se;
6109 
6110         /* throttled hierarchies are not runnable */
6111         if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
6112                 return false;
6113 
6114         /* Tell the scheduler that we'd really like pse to run next. */
6115         set_next_buddy(se);
6116 
6117         yield_task_fair(rq);
6118 
6119         return true;
6120 }
6121 
6122 #ifdef CONFIG_SMP
6123 /**************************************************
6124  * Fair scheduling class load-balancing methods.
6125  *
6126  * BASICS
6127  *
6128  * The purpose of load-balancing is to achieve the same basic fairness the
6129  * per-cpu scheduler provides, namely provide a proportional amount of compute
6130  * time to each task. This is expressed in the following equation:
6131  *
6132  *   W_i,n/P_i == W_j,n/P_j for all i,j                               (1)
6133  *
6134  * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
6135  * W_i,0 is defined as:
6136  *
6137  *   W_i,0 = \Sum_j w_i,j                                             (2)
6138  *
6139  * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
6140  * is derived from the nice value as per sched_prio_to_weight[].
6141  *
6142  * The weight average is an exponential decay average of the instantaneous
6143  * weight:
6144  *
6145  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
6146  *
6147  * C_i is the compute capacity of cpu i, typically it is the
6148  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
6149  * can also include other factors [XXX].
6150  *
6151  * To achieve this balance we define a measure of imbalance which follows
6152  * directly from (1):
6153  *
6154  *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
6155  *
6156  * We them move tasks around to minimize the imbalance. In the continuous
6157  * function space it is obvious this converges, in the discrete case we get
6158  * a few fun cases generally called infeasible weight scenarios.
6159  *
6160  * [XXX expand on:
6161  *     - infeasible weights;
6162  *     - local vs global optima in the discrete case. ]
6163  *
6164  *
6165  * SCHED DOMAINS
6166  *
6167  * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
6168  * for all i,j solution, we create a tree of cpus that follows the hardware
6169  * topology where each level pairs two lower groups (or better). This results
6170  * in O(log n) layers. Furthermore we reduce the number of cpus going up the
6171  * tree to only the first of the previous level and we decrease the frequency
6172  * of load-balance at each level inv. proportional to the number of cpus in
6173  * the groups.
6174  *
6175  * This yields:
6176  *
6177  *     log_2 n     1     n
6178  *   \Sum       { --- * --- * 2^i } = O(n)                            (5)
6179  *     i = 0      2^i   2^i
6180  *                               `- size of each group
6181  *         |         |     `- number of cpus doing load-balance
6182  *         |         `- freq
6183  *         `- sum over all levels
6184  *
6185  * Coupled with a limit on how many tasks we can migrate every balance pass,
6186  * this makes (5) the runtime complexity of the balancer.
6187  *
6188  * An important property here is that each CPU is still (indirectly) connected
6189  * to every other cpu in at most O(log n) steps:
6190  *
6191  * The adjacency matrix of the resulting graph is given by:
6192  *
6193  *             log_2 n
6194  *   A_i,j = \Union     (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1)  (6)
6195  *             k = 0
6196  *
6197  * And you'll find that:
6198  *
6199  *   A^(log_2 n)_i,j != 0  for all i,j                                (7)
6200  *
6201  * Showing there's indeed a path between every cpu in at most O(log n) steps.
6202  * The task movement gives a factor of O(m), giving a convergence complexity
6203  * of:
6204  *
6205  *   O(nm log n),  n := nr_cpus, m := nr_tasks                        (8)
6206  *
6207  *
6208  * WORK CONSERVING
6209  *
6210  * In order to avoid CPUs going idle while there's still work to do, new idle
6211  * balancing is more aggressive and has the newly idle cpu iterate up the domain
6212  * tree itself instead of relying on other CPUs to bring it work.
6213  *
6214  * This adds some complexity to both (5) and (8) but it reduces the total idle
6215  * time.
6216  *
6217  * [XXX more?]
6218  *
6219  *
6220  * CGROUPS
6221  *
6222  * Cgroups make a horror show out of (2), instead of a simple sum we get:
6223  *
6224  *                                s_k,i
6225  *   W_i,0 = \Sum_j \Prod_k w_k * -----                               (9)
6226  *                                 S_k
6227  *
6228  * Where
6229  *
6230  *   s_k,i = \Sum_j w_i,j,k  and  S_k = \Sum_i s_k,i                 (10)
6231  *
6232  * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
6233  *
6234  * The big problem is S_k, its a global sum needed to compute a local (W_i)
6235  * property.
6236  *
6237  * [XXX write more on how we solve this.. _after_ merging pjt's patches that
6238  *      rewrite all of this once again.]
6239  */
6240 
6241 static unsigned long __read_mostly max_load_balance_interval = HZ/10;
6242 
6243 enum fbq_type { regular, remote, all };
6244 
6245 #define LBF_ALL_PINNED  0x01
6246 #define LBF_NEED_BREAK  0x02
6247 #define LBF_DST_PINNED  0x04
6248 #define LBF_SOME_PINNED 0x08
6249 
6250 struct lb_env {
6251         struct sched_domain     *sd;
6252 
6253         struct rq               *src_rq;
6254         int                     src_cpu;
6255 
6256         int                     dst_cpu;
6257         struct rq               *dst_rq;
6258 
6259         struct cpumask          *dst_grpmask;
6260         int                     new_dst_cpu;
6261         enum cpu_idle_type      idle;
6262         long                    imbalance;
6263         /* The set of CPUs under consideration for load-balancing */
6264         struct cpumask          *cpus;
6265 
6266         unsigned int            flags;
6267 
6268         unsigned int            loop;
6269         unsigned int            loop_break;
6270         unsigned int            loop_max;
6271 
6272         enum fbq_type           fbq_type;
6273         struct list_head        tasks;
6274 };
6275 
6276 /*
6277  * Is this task likely cache-hot:
6278  */
6279 static int task_hot(struct task_struct *p, struct lb_env *env)
6280 {
6281         s64 delta;
6282 
6283         lockdep_assert_held(&env->src_rq->lock);
6284 
6285         if (p->sched_class != &fair_sched_class)
6286                 return 0;
6287 
6288         if (unlikely(p->policy == SCHED_IDLE))
6289                 return 0;
6290 
6291         /*
6292          * Buddy candidates are cache hot:
6293          */
6294         if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
6295                         (&p->se == cfs_rq_of(&p->se)->next ||
6296                          &p->se == cfs_rq_of(&p->se)->last))
6297                 return 1;
6298 
6299         if (sysctl_sched_migration_cost == -1)
6300                 return 1;
6301         if (sysctl_sched_migration_cost == 0)
6302                 return 0;
6303 
6304         delta = rq_clock_task(env->src_rq) - p->se.exec_start;
6305 
6306         return delta < (s64)sysctl_sched_migration_cost;
6307 }
6308 
6309 #ifdef CONFIG_NUMA_BALANCING
6310 /*
6311  * Returns 1, if task migration degrades locality
6312  * Returns 0, if task migration improves locality i.e migration preferred.
6313  * Returns -1, if task migration is not affected by locality.
6314  */
6315 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
6316 {
6317         struct numa_group *numa_group = rcu_dereference(p->numa_group);
6318         unsigned long src_faults, dst_faults;
6319         int src_nid, dst_nid;
6320 
6321         if (!static_branch_likely(&sched_numa_balancing))
6322                 return -1;
6323 
6324         if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
6325                 return -1;
6326 
6327         src_nid = cpu_to_node(env->src_cpu);
6328         dst_nid = cpu_to_node(env->dst_cpu);
6329 
6330         if (src_nid == dst_nid)
6331                 return -1;
6332 
6333         /* Migrating away from the preferred node is always bad. */
6334         if (src_nid == p->numa_preferred_nid) {
6335                 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
6336                         return 1;
6337                 else
6338                         return -1;
6339         }
6340 
6341         /* Encourage migration to the preferred node. */
6342         if (dst_nid == p->numa_preferred_nid)
6343                 return 0;
6344 
6345         if (numa_group) {
6346                 src_faults = group_faults(p, src_nid);
6347                 dst_faults = group_faults(p, dst_nid);
6348         } else {
6349                 src_faults = task_faults(p, src_nid);
6350                 dst_faults = task_faults(p, dst_nid);
6351         }
6352 
6353         return dst_faults < src_faults;
6354 }
6355 
6356 #else
6357 static inline int migrate_degrades_locality(struct task_struct *p,
6358                                              struct lb_env *env)
6359 {
6360         return -1;
6361 }
6362 #endif
6363 
6364 /*
6365  * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
6366  */
6367 static
6368 int can_migrate_task(struct task_struct *p, struct lb_env *env)
6369 {
6370         int tsk_cache_hot;
6371 
6372         lockdep_assert_held(&env->src_rq->lock);
6373 
6374         /*
6375          * We do not migrate tasks that are:
6376          * 1) throttled_lb_pair, or
6377          * 2) cannot be migrated to this CPU due to cpus_allowed, or
6378          * 3) running (obviously), or
6379          * 4) are cache-hot on their current CPU.
6380          */
6381         if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
6382                 return 0;
6383 
6384         if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
6385                 int cpu;
6386 
6387                 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
6388 
6389                 env->flags |= LBF_SOME_PINNED;
6390 
6391                 /*
6392                  * Remember if this task can be migrated to any other cpu in
6393                  * our sched_group. We may want to revisit it if we couldn't
6394                  * meet load balance goals by pulling other tasks on src_cpu.
6395                  *
6396                  * Also avoid computing new_dst_cpu if we have already computed
6397                  * one in current iteration.
6398                  */
6399                 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
6400                         return 0;
6401 
6402                 /* Prevent to re-select dst_cpu via env's cpus */
6403                 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
6404                         if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
6405                                 env->flags |= LBF_DST_PINNED;
6406                                 env->new_dst_cpu = cpu;
6407                                 break;
6408                         }
6409                 }
6410 
6411                 return 0;
6412         }
6413 
6414         /* Record that we found atleast one task that could run on dst_cpu */
6415         env->flags &= ~LBF_ALL_PINNED;
6416 
6417         if (task_running(env->src_rq, p)) {
6418                 schedstat_inc(p->se.statistics.nr_failed_migrations_running);
6419                 return 0;
6420         }
6421 
6422         /*
6423          * Aggressive migration if:
6424          * 1) destination numa is preferred
6425          * 2) task is cache cold, or
6426          * 3) too many balance attempts have failed.
6427          */
6428         tsk_cache_hot = migrate_degrades_locality(p, env);
6429         if (tsk_cache_hot == -1)
6430                 tsk_cache_hot = task_hot(p, env);
6431 
6432         if (tsk_cache_hot <= 0 ||
6433             env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
6434                 if (tsk_cache_hot == 1) {
6435                         schedstat_inc(env->sd->lb_hot_gained[env->idle]);
6436                         schedstat_inc(p->se.statistics.nr_forced_migrations);
6437                 }
6438                 return 1;
6439         }
6440 
6441         schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
6442         return 0;
6443 }
6444 
6445 /*
6446  * detach_task() -- detach the task for the migration specified in env
6447  */
6448 static void detach_task(struct task_struct *p, struct lb_env *env)
6449 {
6450         lockdep_assert_held(&env->src_rq->lock);
6451 
6452         p->on_rq = TASK_ON_RQ_MIGRATING;
6453         deactivate_task(env->src_rq, p, 0);
6454         set_task_cpu(p, env->dst_cpu);
6455 }
6456 
6457 /*
6458  * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
6459  * part of active balancing operations within "domain".
6460  *
6461  * Returns a task if successful and NULL otherwise.
6462  */
6463 static struct task_struct *detach_one_task(struct lb_env *env)
6464 {
6465         struct task_struct *p, *n;
6466 
6467         lockdep_assert_held(&env->src_rq->lock);
6468 
6469         list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
6470                 if (!can_migrate_task(p, env))
6471                         continue;
6472 
6473                 detach_task(p, env);
6474 
6475                 /*
6476                  * Right now, this is only the second place where
6477                  * lb_gained[env->idle] is updated (other is detach_tasks)
6478                  * so we can safely collect stats here rather than
6479                  * inside detach_tasks().
6480                  */
6481                 schedstat_inc(env->sd->lb_gained[env->idle]);
6482                 return p;
6483         }
6484         return NULL;
6485 }
6486 
6487 static const unsigned int sched_nr_migrate_break = 32;
6488 
6489 /*
6490  * detach_tasks() -- tries to detach up to imbalance weighted load from
6491  * busiest_rq, as part of a balancing operation within domain "sd".
6492  *
6493  * Returns number of detached tasks if successful and 0 otherwise.
6494  */
6495 static int detach_tasks(struct lb_env *env)
6496 {
6497         struct list_head *tasks = &env->src_rq->cfs_tasks;
6498         struct task_struct *p;
6499         unsigned long load;
6500         int detached = 0;
6501 
6502         lockdep_assert_held(&env->src_rq->lock);
6503 
6504         if (env->imbalance <= 0)
6505                 return 0;
6506 
6507         while (!list_empty(tasks)) {
6508                 /*
6509                  * We don't want to steal all, otherwise we may be treated likewise,
6510                  * which could at worst lead to a livelock crash.
6511                  */
6512                 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
6513                         break;
6514 
6515                 p = list_first_entry(tasks, struct task_struct, se.group_node);
6516 
6517                 env->loop++;
6518                 /* We've more or less seen every task there is, call it quits */
6519                 if (env->loop > env->loop_max)
6520                         break;
6521 
6522                 /* take a breather every nr_migrate tasks */
6523                 if (env->loop > env->loop_break) {
6524                         env->loop_break += sched_nr_migrate_break;
6525                         env->flags |= LBF_NEED_BREAK;
6526                         break;
6527                 }
6528 
6529                 if (!can_migrate_task(p, env))
6530                         goto next;
6531 
6532                 load = task_h_load(p);
6533 
6534                 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
6535                         goto next;
6536 
6537                 if ((load / 2) > env->imbalance)
6538                         goto next;
6539 
6540                 detach_task(p, env);
6541                 list_add(&p->se.group_node, &env->tasks);
6542 
6543                 detached++;
6544                 env->imbalance -= load;
6545 
6546 #ifdef CONFIG_PREEMPT
6547                 /*
6548                  * NEWIDLE balancing is a source of latency, so preemptible
6549                  * kernels will stop after the first task is detached to minimize
6550                  * the critical section.
6551                  */
6552                 if (env->idle == CPU_NEWLY_IDLE)
6553                         break;
6554 #endif
6555 
6556                 /*
6557                  * We only want to steal up to the prescribed amount of
6558                  * weighted load.
6559                  */
6560                 if (env->imbalance <= 0)
6561                         break;
6562 
6563                 continue;
6564 next:
6565                 list_move_tail(&p->se.group_node, tasks);
6566         }
6567 
6568         /*
6569          * Right now, this is one of only two places we collect this stat
6570          * so we can safely collect detach_one_task() stats here rather
6571          * than inside detach_one_task().
6572          */
6573         schedstat_add(env->sd->lb_gained[env->idle], detached);
6574 
6575         return detached;
6576 }
6577 
6578 /*
6579  * attach_task() -- attach the task detached by detach_task() to its new rq.
6580  */
6581 static void attach_task(struct rq *rq, struct task_struct *p)
6582 {
6583         lockdep_assert_held(&rq->lock);
6584 
6585         BUG_ON(task_rq(p) != rq);
6586         activate_task(rq, p, 0);
6587         p->on_rq = TASK_ON_RQ_QUEUED;
6588         check_preempt_curr(rq, p, 0);
6589 }
6590 
6591 /*
6592  * attach_one_task() -- attaches the task returned from detach_one_task() to
6593  * its new rq.
6594  */
6595 static void attach_one_task(struct rq *rq, struct task_struct *p)
6596 {
6597         raw_spin_lock(&rq->lock);
6598         attach_task(rq, p);
6599         raw_spin_unlock(&rq->lock);
6600 }
6601 
6602 /*
6603  * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
6604  * new rq.
6605  */
6606 static void attach_tasks(struct lb_env *env)
6607 {
6608         struct list_head *tasks = &env->tasks;
6609         struct task_struct *p;
6610 
6611         raw_spin_lock(&env->dst_rq->lock);
6612 
6613         while (!list_empty(tasks)) {
6614                 p = list_first_entry(tasks, struct task_struct, se.group_node);
6615                 list_del_init(&p->se.group_node);
6616 
6617                 attach_task(env->dst_rq, p);
6618         }
6619 
6620         raw_spin_unlock(&env->dst_rq->lock);
6621 }
6622 
6623 #ifdef CONFIG_FAIR_GROUP_SCHED
6624 static void update_blocked_averages(int cpu)
6625 {
6626         struct rq *rq = cpu_rq(cpu);
6627         struct cfs_rq *cfs_rq;
6628         unsigned long flags;
6629 
6630         raw_spin_lock_irqsave(&rq->lock, flags);
6631         update_rq_clock(rq);
6632 
6633         /*
6634          * Iterates the task_group tree in a bottom up fashion, see
6635          * list_add_leaf_cfs_rq() for details.
6636          */
6637         for_each_leaf_cfs_rq(rq, cfs_rq) {
6638                 /* throttled entities do not contribute to load */
6639                 if (throttled_hierarchy(cfs_rq))
6640                         continue;
6641 
6642                 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
6643                         update_tg_load_avg(cfs_rq, 0);
6644         }
6645         raw_spin_unlock_irqrestore(&rq->lock, flags);
6646 }
6647 
6648 /*
6649  * Compute the hierarchical load factor for cfs_rq and all its ascendants.
6650  * This needs to be done in a top-down fashion because the load of a child
6651  * group is a fraction of its parents load.
6652  */
6653 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
6654 {
6655         struct rq *rq = rq_of(cfs_rq);
6656         struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
6657         unsigned long now = jiffies;
6658         unsigned long load;
6659 
6660         if (cfs_rq->last_h_load_update == now)
6661                 return;
6662 
6663         cfs_rq->h_load_next = NULL;
6664         for_each_sched_entity(se) {
6665                 cfs_rq = cfs_rq_of(se);
6666                 cfs_rq->h_load_next = se;
6667                 if (cfs_rq->last_h_load_update == now)
6668                         break;
6669         }
6670 
6671         if (!se) {
6672                 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
6673                 cfs_rq->last_h_load_update = now;
6674         }
6675 
6676         while ((se = cfs_rq->h_load_next) != NULL) {
6677                 load = cfs_rq->h_load;
6678                 load = div64_ul(load * se->avg.load_avg,
6679                         cfs_rq_load_avg(cfs_rq) + 1);
6680                 cfs_rq = group_cfs_rq(se);
6681                 cfs_rq->h_load = load;
6682                 cfs_rq->last_h_load_update = now;
6683         }
6684 }
6685 
6686 static unsigned long task_h_load(struct task_struct *p)
6687 {
6688         struct cfs_rq *cfs_rq = task_cfs_rq(p);
6689 
6690         update_cfs_rq_h_load(cfs_rq);
6691         return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
6692                         cfs_rq_load_avg(cfs_rq) + 1);
6693 }
6694 #else
6695 static inline void update_blocked_averages(int cpu)
6696 {
6697         struct rq *rq = cpu_rq(cpu);
6698         struct cfs_rq *cfs_rq = &rq->cfs;
6699         unsigned long flags;
6700 
6701         raw_spin_lock_irqsave(&rq->lock, flags);
6702         update_rq_clock(rq);
6703         update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
6704         raw_spin_unlock_irqrestore(&rq->lock, flags);
6705 }
6706 
6707 static unsigned long task_h_load(struct task_struct *p)
6708 {
6709         return p->se.avg.load_avg;
6710 }
6711 #endif
6712 
6713 /********** Helpers for find_busiest_group ************************/
6714 
6715 enum group_type {
6716         group_other = 0,
6717         group_imbalanced,
6718         group_overloaded,
6719 };
6720 
6721 /*
6722  * sg_lb_stats - stats of a sched_group required for load_balancing
6723  */
6724 struct sg_lb_stats {
6725         unsigned long avg_load; /*Avg load across the CPUs of the group */
6726         unsigned long group_load; /* Total load over the CPUs of the group */
6727         unsigned long sum_weighted_load; /* Weighted load of group's tasks */
6728         unsigned long load_per_task;
6729         unsigned long group_capacity;
6730         unsigned long group_util; /* Total utilization of the group */
6731         unsigned int sum_nr_running; /* Nr tasks running in the group */
6732         unsigned int idle_cpus;
6733         unsigned int group_weight;
6734         enum group_type group_type;
6735         int group_no_capacity;
6736 #ifdef CONFIG_NUMA_BALANCING
6737         unsigned int nr_numa_running;
6738         unsigned int nr_preferred_running;
6739 #endif
6740 };
6741 
6742 /*
6743  * sd_lb_stats - Structure to store the statistics of a sched_domain
6744  *               during load balancing.
6745  */
6746 struct sd_lb_stats {
6747         struct sched_group *busiest;    /* Busiest group in this sd */
6748         struct sched_group *local;      /* Local group in this sd */
6749         unsigned long total_load;       /* Total load of all groups in sd */
6750         unsigned long total_capacity;   /* Total capacity of all groups in sd */
6751         unsigned long avg_load; /* Average load across all groups in sd */
6752 
6753         struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
6754         struct sg_lb_stats local_stat;  /* Statistics of the local group */
6755 };
6756 
6757 static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
6758 {
6759         /*
6760          * Skimp on the clearing to avoid duplicate work. We can avoid clearing
6761          * local_stat because update_sg_lb_stats() does a full clear/assignment.
6762          * We must however clear busiest_stat::avg_load because
6763          * update_sd_pick_busiest() reads this before assignment.
6764          */
6765         *sds = (struct sd_lb_stats){
6766                 .busiest = NULL,
6767                 .local = NULL,
6768                 .total_load = 0UL,
6769                 .total_capacity = 0UL,
6770                 .busiest_stat = {
6771                         .avg_load = 0UL,
6772                         .sum_nr_running = 0,
6773                         .group_type = group_other,
6774                 },
6775         };
6776 }
6777 
6778 /**
6779  * get_sd_load_idx - Obtain the load index for a given sched domain.
6780  * @sd: The sched_domain whose load_idx is to be obtained.
6781  * @idle: The idle status of the CPU for whose sd load_idx is obtained.
6782  *
6783  * Return: The load index.
6784  */
6785 static inline int get_sd_load_idx(struct sched_domain *sd,
6786                                         enum cpu_idle_type idle)
6787 {
6788         int load_idx;
6789 
6790         switch (idle) {
6791         case CPU_NOT_IDLE:
6792                 load_idx = sd->busy_idx;
6793                 break;
6794 
6795         case CPU_NEWLY_IDLE:
6796                 load_idx = sd->newidle_idx;
6797                 break;
6798         default:
6799                 load_idx = sd->idle_idx;
6800                 break;
6801         }
6802 
6803         return load_idx;
6804 }
6805 
6806 static unsigned long scale_rt_capacity(int cpu)
6807 {
6808         struct rq *rq = cpu_rq(cpu);
6809         u64 total, used, age_stamp, avg;
6810         s64 delta;
6811 
6812         /*
6813          * Since we're reading these variables without serialization make sure
6814          * we read them once before doing sanity checks on them.
6815          */
6816         age_stamp = READ_ONCE(rq->age_stamp);
6817         avg = READ_ONCE(rq->rt_avg);
6818         delta = __rq_clock_broken(rq) - age_stamp;
6819 
6820         if (unlikely(delta < 0))
6821                 delta = 0;
6822 
6823         total = sched_avg_period() + delta;
6824 
6825         used = div_u64(avg, total);
6826 
6827         if (likely(used < SCHED_CAPACITY_SCALE))
6828                 return SCHED_CAPACITY_SCALE - used;
6829 
6830         return 1;
6831 }
6832 
6833 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
6834 {
6835         unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
6836         struct sched_group *sdg = sd->groups;
6837 
6838         cpu_rq(cpu)->cpu_capacity_orig = capacity;
6839 
6840         capacity *= scale_rt_capacity(cpu);
6841         capacity >>= SCHED_CAPACITY_SHIFT;
6842 
6843         if (!capacity)
6844                 capacity = 1;
6845 
6846         cpu_rq(cpu)->cpu_capacity = capacity;
6847         sdg->sgc->capacity = capacity;
6848 }
6849 
6850 void update_group_capacity(struct sched_domain *sd, int cpu)
6851 {
6852         struct sched_domain *child = sd->child;
6853         struct sched_group *group, *sdg = sd->groups;
6854         unsigned long capacity;
6855         unsigned long interval;
6856 
6857         interval = msecs_to_jiffies(sd->balance_interval);
6858         interval = clamp(interval, 1UL, max_load_balance_interval);
6859         sdg->sgc->next_update = jiffies + interval;
6860 
6861         if (!child) {
6862                 update_cpu_capacity(sd, cpu);
6863                 return;
6864         }
6865 
6866         capacity = 0;
6867 
6868         if (child->flags & SD_OVERLAP) {
6869                 /*
6870                  * SD_OVERLAP domains cannot assume that child groups
6871                  * span the current group.
6872                  */
6873 
6874                 for_each_cpu(cpu, sched_group_cpus(sdg)) {
6875                         struct sched_group_capacity *sgc;
6876                         struct rq *rq = cpu_rq(cpu);
6877 
6878                         /*
6879                          * build_sched_domains() -> init_sched_groups_capacity()
6880                          * gets here before we've attached the domains to the
6881                          * runqueues.
6882                          *
6883                          * Use capacity_of(), which is set irrespective of domains
6884                          * in update_cpu_capacity().
6885                          *
6886                          * This avoids capacity from being 0 and
6887                          * causing divide-by-zero issues on boot.
6888                          */
6889                         if (unlikely(!rq->sd)) {
6890                                 capacity += capacity_of(cpu);
6891                                 continue;
6892                         }
6893 
6894                         sgc = rq->sd->groups->sgc;
6895                         capacity += sgc->capacity;
6896                 }
6897         } else  {
6898                 /*
6899                  * !SD_OVERLAP domains can assume that child groups
6900                  * span the current group.
6901                  */
6902 
6903                 group = child->groups;
6904                 do {
6905                         capacity += group->sgc->capacity;
6906                         group = group->next;
6907                 } while (group != child->groups);
6908         }
6909 
6910         sdg->sgc->capacity = capacity;
6911 }
6912 
6913 /*
6914  * Check whether the capacity of the rq has been noticeably reduced by side
6915  * activity. The imbalance_pct is used for the threshold.
6916  * Return true is the capacity is reduced
6917  */
6918 static inline int
6919 check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
6920 {
6921         return ((rq->cpu_capacity * sd->imbalance_pct) <
6922                                 (rq->cpu_capacity_orig * 100));
6923 }
6924 
6925 /*
6926  * Group imbalance indicates (and tries to solve) the problem where balancing
6927  * groups is inadequate due to tsk_cpus_allowed() constraints.
6928  *
6929  * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
6930  * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
6931  * Something like:
6932  *
6933  *      { 0 1 2 3 } { 4 5 6 7 }
6934  *              *     * * *
6935  *
6936  * If we were to balance group-wise we'd place two tasks in the first group and
6937  * two tasks in the second group. Clearly this is undesired as it will overload
6938  * cpu 3 and leave one of the cpus in the second group unused.
6939  *
6940  * The current solution to this issue is detecting the skew in the first group
6941  * by noticing the lower domain failed to reach balance and had difficulty
6942  * moving tasks due to affinity constraints.
6943  *
6944  * When this is so detected; this group becomes a candidate for busiest; see
6945  * update_sd_pick_busiest(). And calculate_imbalance() and
6946  * find_busiest_group() avoid some of the usual balance conditions to allow it
6947  * to create an effective group imbalance.
6948  *
6949  * This is a somewhat tricky proposition since the next run might not find the
6950  * group imbalance and decide the groups need to be balanced again. A most
6951  * subtle and fragile situation.
6952  */
6953 
6954 static inline int sg_imbalanced(struct sched_group *group)
6955 {
6956         return group->sgc->imbalance;
6957 }
6958 
6959 /*
6960  * group_has_capacity returns true if the group has spare capacity that could
6961  * be used by some tasks.
6962  * We consider that a group has spare capacity if the  * number of task is
6963  * smaller than the number of CPUs or if the utilization is lower than the
6964  * available capacity for CFS tasks.
6965  * For the latter, we use a threshold to stabilize the state, to take into
6966  * account the variance of the tasks' load and to return true if the available
6967  * capacity in meaningful for the load balancer.
6968  * As an example, an available capacity of 1% can appear but it doesn't make
6969  * any benefit for the load balance.
6970  */
6971 static inline bool
6972 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
6973 {
6974         if (sgs->sum_nr_running < sgs->group_weight)
6975                 return true;
6976 
6977         if ((sgs->group_capacity * 100) >
6978                         (sgs->group_util * env->sd->imbalance_pct))
6979                 return true;
6980 
6981         return false;
6982 }
6983 
6984 /*
6985  *  group_is_overloaded returns true if the group has more tasks than it can
6986  *  handle.
6987  *  group_is_overloaded is not equals to !group_has_capacity because a group
6988  *  with the exact right number of tasks, has no more spare capacity but is not
6989  *  overloaded so both group_has_capacity and group_is_overloaded return
6990  *  false.
6991  */
6992 static inline bool
6993 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
6994 {
6995         if (sgs->sum_nr_running <= sgs->group_weight)
6996                 return false;
6997 
6998         if ((sgs->group_capacity * 100) <
6999                         (sgs->group_util * env->sd->imbalance_pct))
7000                 return true;
7001 
7002         return false;
7003 }
7004 
7005 static inline enum
7006 group_type group_classify(struct sched_group *group,
7007                           struct sg_lb_stats *sgs)
7008 {
7009         if (sgs->group_no_capacity)
7010                 return group_overloaded;
7011 
7012         if (sg_imbalanced(group))
7013                 return group_imbalanced;
7014 
7015         return group_other;
7016 }
7017 
7018 /**
7019  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
7020  * @env: The load balancing environment.
7021  * @group: sched_group whose statistics are to be updated.
7022  * @load_idx: Load index of sched_domain of this_cpu for load calc.
7023  * @local_group: Does group contain this_cpu.
7024  * @sgs: variable to hold the statistics for this group.
7025  * @overload: Indicate more than one runnable task for any CPU.
7026  */
7027 static inline void update_sg_lb_stats(struct lb_env *env,
7028                         struct sched_group *group, int load_idx,
7029                         int local_group, struct sg_lb_stats *sgs,
7030                         bool *overload)
7031 {
7032         unsigned long load;
7033         int i, nr_running;
7034 
7035         memset(sgs, 0, sizeof(*sgs));
7036 
7037         for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7038                 struct rq *rq = cpu_rq(i);
7039 
7040                 /* Bias balancing toward cpus of our domain */
7041                 if (local_group)
7042                         load = target_load(i, load_idx);
7043                 else
7044                         load = source_load(i, load_idx);
7045 
7046                 sgs->group_load += load;
7047                 sgs->group_util += cpu_util(i);
7048                 sgs->sum_nr_running += rq->cfs.h_nr_running;
7049 
7050                 nr_running = rq->nr_running;
7051                 if (nr_running > 1)
7052                         *overload = true;
7053 
7054 #ifdef CONFIG_NUMA_BALANCING
7055                 sgs->nr_numa_running += rq->nr_numa_running;
7056                 sgs->nr_preferred_running += rq->nr_preferred_running;
7057 #endif
7058                 sgs->sum_weighted_load += weighted_cpuload(i);
7059                 /*
7060                  * No need to call idle_cpu() if nr_running is not 0
7061                  */
7062                 if (!nr_running && idle_cpu(i))
7063                         sgs->idle_cpus++;
7064         }
7065 
7066         /* Adjust by relative CPU capacity of the group */
7067         sgs->group_capacity = group->sgc->capacity;
7068         sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
7069 
7070         if (sgs->sum_nr_running)
7071                 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
7072 
7073         sgs->group_weight = group->group_weight;
7074 
7075         sgs->group_no_capacity = group_is_overloaded(env, sgs);
7076         sgs->group_type = group_classify(group, sgs);
7077 }
7078 
7079 /**
7080  * update_sd_pick_busiest - return 1 on busiest group
7081  * @env: The load balancing environment.
7082  * @sds: sched_domain statistics
7083  * @sg: sched_group candidate to be checked for being the busiest
7084  * @sgs: sched_group statistics
7085  *
7086  * Determine if @sg is a busier group than the previously selected
7087  * busiest group.
7088  *
7089  * Return: %true if @sg is a busier group than the previously selected
7090  * busiest group. %false otherwise.
7091  */
7092 static bool update_sd_pick_busiest(struct lb_env *env,
7093                                    struct sd_lb_stats *sds,
7094                                    struct sched_group *sg,
7095                                    struct sg_lb_stats *sgs)
7096 {
7097         struct sg_lb_stats *busiest = &sds->busiest_stat;
7098 
7099         if (sgs->group_type > busiest->group_type)
7100                 return true;
7101 
7102         if (sgs->group_type < busiest->group_type)
7103                 return false;
7104 
7105         if (sgs->avg_load <= busiest->avg_load)
7106                 return false;
7107 
7108         /* This is the busiest node in its class. */
7109         if (!(env->sd->flags & SD_ASYM_PACKING))
7110                 return true;
7111 
7112         /* No ASYM_PACKING if target cpu is already busy */
7113         if (env->idle == CPU_NOT_IDLE)
7114                 return true;
7115         /*
7116          * ASYM_PACKING needs to move all the work to the lowest
7117          * numbered CPUs in the group, therefore mark all groups
7118          * higher than ourself as busy.
7119          */
7120         if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
7121                 if (!sds->busiest)
7122                         return true;
7123 
7124                 /* Prefer to move from highest possible cpu's work */
7125                 if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
7126                         return true;
7127         }
7128 
7129         return false;
7130 }
7131 
7132 #ifdef CONFIG_NUMA_BALANCING
7133 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7134 {
7135         if (sgs->sum_nr_running > sgs->nr_numa_running)
7136                 return regular;
7137         if (sgs->sum_nr_running > sgs->nr_preferred_running)
7138                 return remote;
7139         return all;
7140 }
7141 
7142 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7143 {
7144         if (rq->nr_running > rq->nr_numa_running)
7145                 return regular;
7146         if (rq->nr_running > rq->nr_preferred_running)
7147                 return remote;
7148         return all;
7149 }
7150 #else
7151 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7152 {
7153         return all;
7154 }
7155 
7156 static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7157 {
7158         return regular;
7159 }
7160 #endif /* CONFIG_NUMA_BALANCING */
7161 
7162 /**
7163  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
7164  * @env: The load balancing environment.
7165  * @sds: variable to hold the statistics for this sched_domain.
7166  */
7167 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
7168 {
7169         struct sched_domain *child = env->sd->child;
7170         struct sched_group *sg = env->sd->groups;
7171         struct sg_lb_stats tmp_sgs;
7172         int load_idx, prefer_sibling = 0;
7173         bool overload = false;
7174 
7175         if (child && child->flags & SD_PREFER_SIBLING)
7176                 prefer_sibling = 1;
7177 
7178         load_idx = get_sd_load_idx(env->sd, env->idle);
7179 
7180         do {
7181                 struct sg_lb_stats *sgs = &tmp_sgs;
7182                 int local_group;
7183 
7184                 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
7185                 if (local_group) {
7186                         sds->local = sg;
7187                         sgs = &sds->local_stat;
7188 
7189                         if (env->idle != CPU_NEWLY_IDLE ||
7190                             time_after_eq(jiffies, sg->sgc->next_update))
7191                                 update_group_capacity(env->sd, env->dst_cpu);
7192                 }
7193 
7194                 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
7195                                                 &overload);
7196 
7197                 if (local_group)
7198                         goto next_group;
7199 
7200                 /*
7201                  * In case the child domain prefers tasks go to siblings
7202                  * first, lower the sg capacity so that we'll try
7203                  * and move all the excess tasks away. We lower the capacity
7204                  * of a group only if the local group has the capacity to fit
7205                  * these excess tasks. The extra check prevents the case where
7206                  * you always pull from the heaviest group when it is already
7207                  * under-utilized (possible with a large weight task outweighs
7208                  * the tasks on the system).
7209                  */
7210                 if (prefer_sibling && sds->local &&
7211                     group_has_capacity(env, &sds->local_stat) &&
7212                     (sgs->sum_nr_running > 1)) {
7213                         sgs->group_no_capacity = 1;
7214                         sgs->group_type = group_classify(sg, sgs);
7215                 }
7216 
7217                 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
7218                         sds->busiest = sg;
7219                         sds->busiest_stat = *sgs;
7220                 }
7221 
7222 next_group:
7223                 /* Now, start updating sd_lb_stats */
7224                 sds->total_load += sgs->group_load;
7225                 sds->total_capacity += sgs->group_capacity;
7226 
7227                 sg = sg->next;
7228         } while (sg != env->sd->groups);
7229 
7230         if (env->sd->flags & SD_NUMA)
7231                 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
7232 
7233         if (!env->sd->parent) {
7234                 /* update overload indicator if we are at root domain */
7235                 if (env->dst_rq->rd->overload != overload)
7236                         env->dst_rq->rd->overload = overload;
7237         }
7238 
7239 }
7240 
7241 /**
7242  * check_asym_packing - Check to see if the group is packed into the
7243  *                      sched doman.
7244  *
7245  * This is primarily intended to used at the sibling level.  Some
7246  * cores like POWER7 prefer to use lower numbered SMT threads.  In the
7247  * case of POWER7, it can move to lower SMT modes only when higher
7248  * threads are idle.  When in lower SMT modes, the threads will
7249  * perform better since they share less core resources.  Hence when we
7250  * have idle threads, we want them to be the higher ones.
7251  *
7252  * This packing function is run on idle threads.  It checks to see if
7253  * the busiest CPU in this domain (core in the P7 case) has a higher
7254  * CPU number than the packing function is being run on.  Here we are
7255  * assuming lower CPU number will be equivalent to lower a SMT thread
7256  * number.
7257  *
7258  * Return: 1 when packing is required and a task should be moved to
7259  * this CPU.  The amount of the imbalance is returned in *imbalance.
7260  *
7261  * @env: The load balancing environment.
7262  * @sds: Statistics of the sched_domain which is to be packed
7263  */
7264 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
7265 {
7266         int busiest_cpu;
7267 
7268         if (!(env->sd->flags & SD_ASYM_PACKING))
7269                 return 0;
7270 
7271         if (env->idle == CPU_NOT_IDLE)
7272                 return 0;
7273 
7274         if (!sds->busiest)
7275                 return 0;
7276 
7277         busiest_cpu = group_first_cpu(sds->busiest);
7278         if (env->dst_cpu > busiest_cpu)
7279                 return 0;
7280 
7281         env->imbalance = DIV_ROUND_CLOSEST(
7282                 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
7283                 SCHED_CAPACITY_SCALE);
7284 
7285         return 1;
7286 }
7287 
7288 /**
7289  * fix_small_imbalance - Calculate the minor imbalance that exists
7290  *                      amongst the groups of a sched_domain, during
7291  *                      load balancing.
7292  * @env: The load balancing environment.
7293  * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
7294  */
7295 static inline
7296 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7297 {
7298         unsigned long tmp, capa_now = 0, capa_move = 0;
7299         unsigned int imbn = 2;
7300         unsigned long scaled_busy_load_per_task;
7301         struct sg_lb_stats *local, *busiest;
7302 
7303         local = &sds->local_stat;
7304         busiest = &sds->busiest_stat;
7305 
7306         if (!local->sum_nr_running)
7307                 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
7308         else if (busiest->load_per_task > local->load_per_task)
7309                 imbn = 1;
7310 
7311         scaled_busy_load_per_task =
7312                 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7313                 busiest->group_capacity;
7314 
7315         if (busiest->avg_load + scaled_busy_load_per_task >=
7316             local->avg_load + (scaled_busy_load_per_task * imbn)) {
7317                 env->imbalance = busiest->load_per_task;
7318                 return;
7319         }
7320 
7321         /*
7322          * OK, we don't have enough imbalance to justify moving tasks,
7323          * however we may be able to increase total CPU capacity used by
7324          * moving them.
7325          */
7326 
7327         capa_now += busiest->group_capacity *
7328                         min(busiest->load_per_task, busiest->avg_load);
7329         capa_now += local->group_capacity *
7330                         min(local->load_per_task, local->avg_load);
7331         capa_now /= SCHED_CAPACITY_SCALE;
7332 
7333         /* Amount of load we'd subtract */
7334         if (busiest->avg_load > scaled_busy_load_per_task) {
7335                 capa_move += busiest->group_capacity *
7336                             min(busiest->load_per_task,
7337                                 busiest->avg_load - scaled_busy_load_per_task);
7338         }
7339 
7340         /* Amount of load we'd add */
7341         if (busiest->avg_load * busiest->group_capacity <
7342             busiest->load_per_task * SCHED_CAPACITY_SCALE) {
7343                 tmp = (busiest->avg_load * busiest->group_capacity) /
7344                       local->group_capacity;
7345         } else {
7346                 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
7347                       local->group_capacity;
7348         }
7349         capa_move += local->group_capacity *
7350                     min(local->load_per_task, local->avg_load + tmp);
7351         capa_move /= SCHED_CAPACITY_SCALE;
7352 
7353         /* Move if we gain throughput */
7354         if (capa_move > capa_now)
7355                 env->imbalance = busiest->load_per_task;
7356 }
7357 
7358 /**
7359  * calculate_imbalance - Calculate the amount of imbalance present within the
7360  *                       groups of a given sched_domain during load balance.
7361  * @env: load balance environment
7362  * @sds: statistics of the sched_domain whose imbalance is to be calculated.
7363  */
7364 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
7365 {
7366         unsigned long max_pull, load_above_capacity = ~0UL;
7367         struct sg_lb_stats *local, *busiest;
7368 
7369         local = &sds->local_stat;
7370         busiest = &sds->busiest_stat;
7371 
7372         if (busiest->group_type == group_imbalanced) {
7373                 /*
7374                  * In the group_imb case we cannot rely on group-wide averages
7375                  * to ensure cpu-load equilibrium, look at wider averages. XXX
7376                  */
7377                 busiest->load_per_task =
7378                         min(busiest->load_per_task, sds->avg_load);
7379         }
7380 
7381         /*
7382          * Avg load of busiest sg can be less and avg load of local sg can
7383          * be greater than avg load across all sgs of sd because avg load
7384          * factors in sg capacity and sgs with smaller group_type are
7385          * skipped when updating the busiest sg:
7386          */
7387         if (busiest->avg_load <= sds->avg_load ||
7388             local->avg_load >= sds->avg_load) {
7389                 env->imbalance = 0;
7390                 return fix_small_imbalance(env, sds);
7391         }
7392 
7393         /*
7394          * If there aren't any idle cpus, avoid creating some.
7395          */
7396         if (busiest->group_type == group_overloaded &&
7397             local->group_type   == group_overloaded) {
7398                 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
7399                 if (load_above_capacity > busiest->group_capacity) {
7400                         load_above_capacity -= busiest->group_capacity;
7401                         load_above_capacity *= scale_load_down(NICE_0_LOAD);
7402                         load_above_capacity /= busiest->group_capacity;
7403                 } else
7404                         load_above_capacity = ~0UL;
7405         }
7406 
7407         /*
7408          * We're trying to get all the cpus to the average_load, so we don't
7409          * want to push ourselves above the average load, nor do we wish to
7410          * reduce the max loaded cpu below the average load. At the same time,
7411          * we also don't want to reduce the group load below the group
7412          * capacity. Thus we look for the minimum possible imbalance.
7413          */
7414         max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
7415 
7416         /* How much load to actually move to equalise the imbalance */
7417         env->imbalance = min(
7418                 max_pull * busiest->group_capacity,
7419                 (sds->avg_load - local->avg_load) * local->group_capacity
7420         ) / SCHED_CAPACITY_SCALE;
7421 
7422         /*
7423          * if *imbalance is less than the average load per runnable task
7424          * there is no guarantee that any tasks will be moved so we'll have
7425          * a think about bumping its value to force at least one task to be
7426          * moved
7427          */
7428         if (env->imbalance < busiest->load_per_task)
7429                 return fix_small_imbalance(env, sds);
7430 }
7431 
7432 /******* find_busiest_group() helpers end here *********************/
7433 
7434 /**
7435  * find_busiest_group - Returns the busiest group within the sched_domain
7436  * if there is an imbalance.
7437  *
7438  * Also calculates the amount of weighted load which should be moved
7439  * to restore balance.
7440  *
7441  * @env: The load balancing environment.
7442  *
7443  * Return:      - The busiest group if imbalance exists.
7444  */
7445 static struct sched_group *find_busiest_group(struct lb_env *env)
7446 {
7447         struct sg_lb_stats *local, *busiest;
7448         struct sd_lb_stats sds;
7449 
7450         init_sd_lb_stats(&sds);
7451 
7452         /*
7453          * Compute the various statistics relavent for load balancing at
7454          * this level.
7455          */
7456         update_sd_lb_stats(env, &sds);
7457         local = &sds.local_stat;
7458         busiest = &sds.busiest_stat;
7459 
7460         /* ASYM feature bypasses nice load balance check */
7461         if (check_asym_packing(env, &sds))
7462                 return sds.busiest;
7463 
7464         /* There is no busy sibling group to pull tasks from */
7465         if (!sds.busiest || busiest->sum_nr_running == 0)
7466                 goto out_balanced;
7467 
7468         sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
7469                                                 / sds.total_capacity;
7470 
7471         /*
7472          * If the busiest group is imbalanced the below checks don't
7473          * work because they assume all things are equal, which typically
7474          * isn't true due to cpus_allowed constraints and the like.
7475          */
7476         if (busiest->group_type == group_imbalanced)
7477                 goto force_balance;
7478 
7479         /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
7480         if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
7481             busiest->group_no_capacity)
7482                 goto force_balance;
7483 
7484         /*
7485          * If the local group is busier than the selected busiest group
7486          * don't try and pull any tasks.
7487          */
7488         if (local->avg_load >= busiest->avg_load)
7489                 goto out_balanced;
7490 
7491         /*
7492          * Don't pull any tasks if this group is already above the domain
7493          * average load.
7494          */
7495         if (local->avg_load >= sds.avg_load)
7496                 goto out_balanced;
7497 
7498         if (env->idle == CPU_IDLE) {
7499                 /*
7500                  * This cpu is idle. If the busiest group is not overloaded
7501                  * and there is no imbalance between this and busiest group
7502                  * wrt idle cpus, it is balanced. The imbalance becomes
7503                  * significant if the diff is greater than 1 otherwise we
7504                  * might end up to just move the imbalance on another group
7505                  */
7506                 if ((busiest->group_type != group_overloaded) &&
7507                                 (local->idle_cpus <= (busiest->idle_cpus + 1)))
7508                         goto out_balanced;
7509         } else {
7510                 /*
7511                  * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
7512                  * imbalance_pct to be conservative.
7513                  */
7514                 if (100 * busiest->avg_load <=
7515                                 env->sd->imbalance_pct * local->avg_load)
7516                         goto out_balanced;
7517         }
7518 
7519 force_balance:
7520         /* Looks like there is an imbalance. Compute it */
7521         calculate_imbalance(env, &sds);
7522         return sds.busiest;
7523 
7524 out_balanced:
7525         env->imbalance = 0;
7526         return NULL;
7527 }
7528 
7529 /*
7530  * find_busiest_queue - find the busiest runqueue among the cpus in group.
7531  */
7532 static struct rq *find_busiest_queue(struct lb_env *env,
7533                                      struct sched_group *group)
7534 {
7535         struct rq *busiest = NULL, *rq;
7536         unsigned long busiest_load = 0, busiest_capacity = 1;
7537         int i;
7538 
7539         for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
7540                 unsigned long capacity, wl;
7541                 enum fbq_type rt;
7542 
7543                 rq = cpu_rq(i);
7544                 rt = fbq_classify_rq(rq);
7545 
7546                 /*
7547                  * We classify groups/runqueues into three groups:
7548                  *  - regular: there are !numa tasks
7549                  *  - remote:  there are numa tasks that run on the 'wrong' node
7550                  *  - all:     there is no distinction
7551                  *
7552                  * In order to avoid migrating ideally placed numa tasks,
7553                  * ignore those when there's better options.
7554                  *
7555                  * If we ignore the actual busiest queue to migrate another
7556                  * task, the next balance pass can still reduce the busiest
7557                  * queue by moving tasks around inside the node.
7558                  *
7559                  * If we cannot move enough load due to this classification
7560                  * the next pass will adjust the group classification and
7561                  * allow migration of more tasks.
7562                  *
7563                  * Both cases only affect the total convergence complexity.
7564                  */
7565                 if (rt > env->fbq_type)
7566                         continue;
7567 
7568                 capacity = capacity_of(i);
7569 
7570                 wl = weighted_cpuload(i);
7571 
7572                 /*
7573                  * When comparing with imbalance, use weighted_cpuload()
7574                  * which is not scaled with the cpu capacity.
7575                  */
7576 
7577                 if (rq->nr_running == 1 && wl > env->imbalance &&
7578                     !check_cpu_capacity(rq, env->sd))
7579                         continue;
7580 
7581                 /*
7582                  * For the load comparisons with the other cpu's, consider
7583                  * the weighted_cpuload() scaled with the cpu capacity, so
7584                  * that the load can be moved away from the cpu that is
7585                  * potentially running at a lower capacity.
7586                  *
7587                  * Thus we're looking for max(wl_i / capacity_i), crosswise
7588                  * multiplication to rid ourselves of the division works out
7589                  * to: wl_i * capacity_j > wl_j * capacity_i;  where j is
7590                  * our previous maximum.
7591                  */
7592                 if (wl * busiest_capacity > busiest_load * capacity) {
7593                         busiest_load = wl;
7594                         busiest_capacity = capacity;
7595                         busiest = rq;
7596                 }
7597         }
7598 
7599         return busiest;
7600 }
7601 
7602 /*
7603  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
7604  * so long as it is large enough.
7605  */
7606 #define MAX_PINNED_INTERVAL     512
7607 
7608 static int need_active_balance(struct lb_env *env)
7609 {
7610         struct sched_domain *sd = env->sd;
7611 
7612         if (env->idle == CPU_NEWLY_IDLE) {
7613 
7614                 /*
7615                  * ASYM_PACKING needs to force migrate tasks from busy but
7616                  * higher numbered CPUs in order to pack all tasks in the
7617                  * lowest numbered CPUs.
7618                  */
7619                 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
7620                         return 1;
7621         }
7622 
7623         /*
7624          * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
7625          * It's worth migrating the task if the src_cpu's capacity is reduced
7626          * because of other sched_class or IRQs if more capacity stays
7627          * available on dst_cpu.
7628          */
7629         if ((env->idle != CPU_NOT_IDLE) &&
7630             (env->src_rq->cfs.h_nr_running == 1)) {
7631                 if ((check_cpu_capacity(env->src_rq, sd)) &&
7632                     (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
7633                         return 1;
7634         }
7635 
7636         return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
7637 }
7638 
7639 static int active_load_balance_cpu_stop(void *data);
7640 
7641 static int should_we_balance(struct lb_env *env)
7642 {
7643         struct sched_group *sg = env->sd->groups;
7644         struct cpumask *sg_cpus, *sg_mask;
7645         int cpu, balance_cpu = -1;
7646 
7647         /*
7648          * In the newly idle case, we will allow all the cpu's
7649          * to do the newly idle load balance.
7650          */
7651         if (env->idle == CPU_NEWLY_IDLE)
7652                 return 1;
7653 
7654         sg_cpus = sched_group_cpus(sg);
7655         sg_mask = sched_group_mask(sg);
7656         /* Try to find first idle cpu */
7657         for_each_cpu_and(cpu, sg_cpus, env->cpus) {
7658                 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
7659                         continue;
7660 
7661                 balance_cpu = cpu;
7662                 break;
7663         }
7664 
7665         if (balance_cpu == -1)
7666                 balance_cpu = group_balance_cpu(sg);
7667 
7668         /*
7669          * First idle cpu or the first cpu(busiest) in this sched group
7670          * is eligible for doing load balancing at this and above domains.
7671          */
7672         return balance_cpu == env->dst_cpu;
7673 }
7674 
7675 /*
7676  * Check this_cpu to ensure it is balanced within domain. Attempt to move
7677  * tasks if there is an imbalance.
7678  */
7679 static int load_balance(int this_cpu, struct rq *this_rq,
7680                         struct sched_domain *sd, enum cpu_idle_type idle,
7681                         int *continue_balancing)
7682 {
7683         int ld_moved, cur_ld_moved, active_balance = 0;
7684         struct sched_domain *sd_parent = sd->parent;
7685         struct sched_group *group;
7686         struct rq *busiest;
7687         unsigned long flags;
7688         struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
7689 
7690         struct lb_env env = {
7691                 .sd             = sd,
7692                 .dst_cpu        = this_cpu,
7693                 .dst_rq         = this_rq,
7694                 .dst_grpmask    = sched_group_cpus(sd->groups),
7695                 .idle           = idle,
7696                 .loop_break     = sched_nr_migrate_break,
7697                 .cpus           = cpus,
7698                 .fbq_type       = all,
7699                 .tasks          = LIST_HEAD_INIT(env.tasks),
7700         };
7701 
7702         /*
7703          * For NEWLY_IDLE load_balancing, we don't need to consider
7704          * other cpus in our group
7705          */
7706         if (idle == CPU_NEWLY_IDLE)
7707                 env.dst_grpmask = NULL;
7708 
7709         cpumask_copy(cpus, cpu_active_mask);
7710 
7711         schedstat_inc(sd->lb_count[idle]);
7712 
7713 redo:
7714         if (!should_we_balance(&env)) {
7715                 *continue_balancing = 0;
7716                 goto out_balanced;
7717         }
7718 
7719         group = find_busiest_group(&env);
7720         if (!group) {
7721                 schedstat_inc(sd->lb_nobusyg[idle]);
7722                 goto out_balanced;
7723         }
7724 
7725         busiest = find_busiest_queue(&env, group);
7726         if (!busiest) {
7727                 schedstat_inc(sd->lb_nobusyq[idle]);
7728                 goto out_balanced;
7729         }
7730 
7731         BUG_ON(busiest == env.dst_rq);
7732 
7733         schedstat_add(sd->lb_imbalance[idle], env.imbalance);
7734 
7735         env.src_cpu = busiest->cpu;
7736         env.src_rq = busiest;
7737 
7738         ld_moved = 0;
7739         if (busiest->nr_running > 1) {
7740                 /*
7741                  * Attempt to move tasks. If find_busiest_group has found
7742                  * an imbalance but busiest->nr_running <= 1, the group is
7743                  * still unbalanced. ld_moved simply stays zero, so it is
7744                  * correctly treated as an imbalance.
7745                  */
7746                 env.flags |= LBF_ALL_PINNED;
7747                 env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
7748 
7749 more_balance:
7750                 raw_spin_lock_irqsave(&busiest->lock, flags);
7751 
7752                 /*
7753                  * cur_ld_moved - load moved in current iteration
7754                  * ld_moved     - cumulative load moved across iterations
7755                  */
7756                 cur_ld_moved = detach_tasks(&env);
7757 
7758                 /*
7759                  * We've detached some tasks from busiest_rq. Every
7760                  * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
7761                  * unlock busiest->lock, and we are able to be sure
7762                  * that nobody can manipulate the tasks in parallel.
7763                  * See task_rq_lock() family for the details.
7764                  */
7765 
7766                 raw_spin_unlock(&busiest->lock);
7767 
7768                 if (cur_ld_moved) {
7769                         attach_tasks(&env);
7770                         ld_moved += cur_ld_moved;
7771                 }
7772 
7773                 local_irq_restore(flags);
7774 
7775                 if (env.flags & LBF_NEED_BREAK) {
7776                         env.flags &= ~LBF_NEED_BREAK;
7777                         goto more_balance;
7778                 }
7779 
7780                 /*
7781                  * Revisit (affine) tasks on src_cpu that couldn't be moved to
7782                  * us and move them to an alternate dst_cpu in our sched_group
7783                  * where they can run. The upper limit on how many times we
7784                  * iterate on same src_cpu is dependent on number of cpus in our
7785                  * sched_group.
7786                  *
7787                  * This changes load balance semantics a bit on who can move
7788                  * load to a given_cpu. In addition to the given_cpu itself
7789                  * (or a ilb_cpu acting on its behalf where given_cpu is
7790                  * nohz-idle), we now have balance_cpu in a position to move
7791                  * load to given_cpu. In rare situations, this may cause
7792                  * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
7793                  * _independently_ and at _same_ time to move some load to
7794                  * given_cpu) causing exceess load to be moved to given_cpu.
7795                  * This however should not happen so much in practice and
7796                  * moreover subsequent load balance cycles should correct the
7797                  * excess load moved.
7798                  */
7799                 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
7800 
7801                         /* Prevent to re-select dst_cpu via env's cpus */
7802                         cpumask_clear_cpu(env.dst_cpu, env.cpus);
7803 
7804                         env.dst_rq       = cpu_rq(env.new_dst_cpu);
7805                         env.dst_cpu      = env.new_dst_cpu;
7806                         env.flags       &= ~LBF_DST_PINNED;
7807                         env.loop         = 0;
7808                         env.loop_break   = sched_nr_migrate_break;
7809 
7810                         /*
7811                          * Go back to "more_balance" rather than "redo" since we
7812                          * need to continue with same src_cpu.
7813                          */
7814                         goto more_balance;
7815                 }
7816 
7817                 /*
7818                  * We failed to reach balance because of affinity.
7819                  */
7820                 if (sd_parent) {
7821                         int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7822 
7823                         if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
7824                                 *group_imbalance = 1;
7825                 }
7826 
7827                 /* All tasks on this runqueue were pinned by CPU affinity */
7828                 if (unlikely(env.flags & LBF_ALL_PINNED)) {
7829                         cpumask_clear_cpu(cpu_of(busiest), cpus);
7830                         if (!cpumask_empty(cpus)) {
7831                                 env.loop = 0;
7832                                 env.loop_break = sched_nr_migrate_break;
7833                                 goto redo;
7834                         }
7835                         goto out_all_pinned;
7836                 }
7837         }
7838 
7839         if (!ld_moved) {
7840                 schedstat_inc(sd->lb_failed[idle]);
7841                 /*
7842                  * Increment the failure counter only on periodic balance.
7843                  * We do not want newidle balance, which can be very
7844                  * frequent, pollute the failure counter causing
7845                  * excessive cache_hot migrations and active balances.
7846                  */
7847                 if (idle != CPU_NEWLY_IDLE)
7848                         sd->nr_balance_failed++;
7849 
7850                 if (need_active_balance(&env)) {
7851                         raw_spin_lock_irqsave(&busiest->lock, flags);
7852 
7853                         /* don't kick the active_load_balance_cpu_stop,
7854                          * if the curr task on busiest cpu can't be
7855                          * moved to this_cpu
7856                          */
7857                         if (!cpumask_test_cpu(this_cpu,
7858                                         tsk_cpus_allowed(busiest->curr))) {
7859                                 raw_spin_unlock_irqrestore(&busiest->lock,
7860                                                             flags);
7861                                 env.flags |= LBF_ALL_PINNED;
7862                                 goto out_one_pinned;
7863                         }
7864 
7865                         /*
7866                          * ->active_balance synchronizes accesses to
7867                          * ->active_balance_work.  Once set, it's cleared
7868                          * only after active load balance is finished.
7869                          */
7870                         if (!busiest->active_balance) {
7871                                 busiest->active_balance = 1;
7872                                 busiest->push_cpu = this_cpu;
7873                                 active_balance = 1;
7874                         }
7875                         raw_spin_unlock_irqrestore(&busiest->lock, flags);
7876 
7877                         if (active_balance) {
7878                                 stop_one_cpu_nowait(cpu_of(busiest),
7879                                         active_load_balance_cpu_stop, busiest,
7880                                         &busiest->active_balance_work);
7881                         }
7882 
7883                         /* We've kicked active balancing, force task migration. */
7884                         sd->nr_balance_failed = sd->cache_nice_tries+1;
7885                 }
7886         } else
7887                 sd->nr_balance_failed = 0;
7888 
7889         if (likely(!active_balance)) {
7890                 /* We were unbalanced, so reset the balancing interval */
7891                 sd->balance_interval = sd->min_interval;
7892         } else {
7893                 /*
7894                  * If we've begun active balancing, start to back off. This
7895                  * case may not be covered by the all_pinned logic if there
7896                  * is only 1 task on the busy runqueue (because we don't call
7897                  * detach_tasks).
7898                  */
7899                 if (sd->balance_interval < sd->max_interval)
7900                         sd->balance_interval *= 2;
7901         }
7902 
7903         goto out;
7904 
7905 out_balanced:
7906         /*
7907          * We reach balance although we may have faced some affinity
7908          * constraints. Clear the imbalance flag if it was set.
7909          */
7910         if (sd_parent) {
7911                 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7912 
7913                 if (*group_imbalance)
7914                         *group_imbalance = 0;
7915         }
7916 
7917 out_all_pinned:
7918         /*
7919          * We reach balance because all tasks are pinned at this level so
7920          * we can't migrate them. Let the imbalance flag set so parent level
7921          * can try to migrate them.
7922          */
7923         schedstat_inc(sd->lb_balanced[idle]);
7924 
7925         sd->nr_balance_failed = 0;
7926 
7927 out_one_pinned:
7928         /* tune up the balancing interval */
7929         if (((env.flags & LBF_ALL_PINNED) &&
7930                         sd->balance_interval < MAX_PINNED_INTERVAL) ||
7931                         (sd->balance_interval < sd->max_interval))
7932                 sd->balance_interval *= 2;
7933 
7934         ld_moved = 0;
7935 out:
7936         return ld_moved;
7937 }
7938 
7939 static inline unsigned long
7940 get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7941 {
7942         unsigned long interval = sd->balance_interval;
7943 
7944         if (cpu_busy)
7945                 interval *= sd->busy_factor;
7946 
7947         /* scale ms to jiffies */
7948         interval = msecs_to_jiffies(interval);
7949         interval = clamp(interval, 1UL, max_load_balance_interval);
7950 
7951         return interval;
7952 }
7953 
7954 static inline void
7955 update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
7956 {
7957         unsigned long interval, next;
7958 
7959         /* used by idle balance, so cpu_busy = 0 */
7960         interval = get_sd_balance_interval(sd, 0);
7961         next = sd->last_balance + interval;
7962 
7963         if (time_after(*next_balance, next))
7964                 *next_balance = next;
7965 }
7966 
7967 /*
7968  * idle_balance is called by schedule() if this_cpu is about to become
7969  * idle. Attempts to pull tasks from other CPUs.
7970  */
7971 static int idle_balance(struct rq *this_rq)
7972 {
7973         unsigned long next_balance = jiffies + HZ;
7974         int this_cpu = this_rq->cpu;
7975         struct sched_domain *sd;
7976         int pulled_task = 0;
7977         u64 curr_cost = 0;
7978 
7979         /*
7980          * We must set idle_stamp _before_ calling idle_balance(), such that we
7981          * measure the duration of idle_balance() as idle time.
7982          */
7983         this_rq->idle_stamp = rq_clock(this_rq);
7984 
7985         if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7986             !this_rq->rd->overload) {
7987                 rcu_read_lock();
7988                 sd = rcu_dereference_check_sched_domain(this_rq->sd);
7989                 if (sd)
7990                         update_next_balance(sd, &next_balance);
7991                 rcu_read_unlock();
7992 
7993                 goto out;
7994         }
7995 
7996         raw_spin_unlock(&this_rq->lock);
7997 
7998         update_blocked_averages(this_cpu);
7999         rcu_read_lock();
8000         for_each_domain(this_cpu, sd) {
8001                 int continue_balancing = 1;
8002                 u64 t0, domain_cost;
8003 
8004                 if (!(sd->flags & SD_LOAD_BALANCE))
8005                         continue;
8006 
8007                 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
8008                         update_next_balance(sd, &next_balance);
8009                         break;
8010                 }
8011 
8012                 if (sd->flags & SD_BALANCE_NEWIDLE) {
8013                         t0 = sched_clock_cpu(this_cpu);
8014 
8015                         pulled_task = load_balance(this_cpu, this_rq,
8016                                                    sd, CPU_NEWLY_IDLE,
8017                                                    &continue_balancing);
8018 
8019                         domain_cost = sched_clock_cpu(this_cpu) - t0;
8020                         if (domain_cost > sd->max_newidle_lb_cost)
8021                                 sd->max_newidle_lb_cost = domain_cost;
8022 
8023                         curr_cost += domain_cost;
8024                 }
8025 
8026                 update_next_balance(sd, &next_balance);
8027 
8028                 /*
8029                  * Stop searching for tasks to pull if there are
8030                  * now runnable tasks on this rq.
8031                  */
8032                 if (pulled_task || this_rq->nr_running > 0)
8033                         break;
8034         }
8035         rcu_read_unlock();
8036 
8037         raw_spin_lock(&this_rq->lock);
8038 
8039         if (curr_cost > this_rq->max_idle_balance_cost)
8040                 this_rq->max_idle_balance_cost = curr_cost;
8041 
8042         /*
8043          * While browsing the domains, we released the rq lock, a task could
8044          * have been enqueued in the meantime. Since we're not going idle,
8045          * pretend we pulled a task.
8046          */
8047         if (this_rq->cfs.h_nr_running && !pulled_task)
8048                 pulled_task = 1;
8049 
8050 out:
8051         /* Move the next balance forward */
8052         if (time_after(this_rq->next_balance, next_balance))
8053                 this_rq->next_balance = next_balance;
8054 
8055         /* Is there a task of a high priority class? */
8056         if (this_rq->nr_running != this_rq->cfs.h_nr_running)
8057                 pulled_task = -1;
8058 
8059         if (pulled_task)
8060                 this_rq->idle_stamp = 0;
8061 
8062         return pulled_task;
8063 }
8064 
8065 /*
8066  * active_load_balance_cpu_stop is run by cpu stopper. It pushes
8067  * running tasks off the busiest CPU onto idle CPUs. It requires at
8068  * least 1 task to be running on each physical CPU where possible, and
8069  * avoids physical / logical imbalances.
8070  */
8071 static int active_load_balance_cpu_stop(void *data)
8072 {
8073         struct rq *busiest_rq = data;
8074         int busiest_cpu = cpu_of(busiest_rq);
8075         int target_cpu = busiest_rq->push_cpu;
8076         struct rq *target_rq = cpu_rq(target_cpu);
8077         struct sched_domain *sd;
8078         struct task_struct *p = NULL;
8079 
8080         raw_spin_lock_irq(&busiest_rq->lock);
8081 
8082         /* make sure the requested cpu hasn't gone down in the meantime */
8083         if (unlikely(busiest_cpu != smp_processor_id() ||
8084                      !busiest_rq->active_balance))
8085                 goto out_unlock;
8086 
8087         /* Is there any task to move? */
8088         if (busiest_rq->nr_running <= 1)
8089                 goto out_unlock;
8090 
8091         /*
8092          * This condition is "impossible", if it occurs
8093          * we need to fix it. Originally reported by
8094          * Bjorn Helgaas on a 128-cpu setup.
8095          */
8096         BUG_ON(busiest_rq == target_rq);
8097 
8098         /* Search for an sd spanning us and the target CPU. */
8099         rcu_read_lock();
8100         for_each_domain(target_cpu, sd) {
8101                 if ((sd->flags & SD_LOAD_BALANCE) &&
8102                     cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
8103                                 break;
8104         }
8105 
8106         if (likely(sd)) {
8107                 struct lb_env env = {
8108                         .sd             = sd,
8109                         .dst_cpu        = target_cpu,
8110                         .dst_rq         = target_rq,
8111                         .src_cpu        = busiest_rq->cpu,
8112                         .src_rq         = busiest_rq,
8113                         .idle           = CPU_IDLE,
8114                 };
8115 
8116                 schedstat_inc(sd->alb_count);
8117 
8118                 p = detach_one_task(&env);
8119                 if (p) {
8120                         schedstat_inc(sd->alb_pushed);
8121                         /* Active balancing done, reset the failure counter. */
8122                         sd->nr_balance_failed = 0;
8123                 } else {
8124                         schedstat_inc(sd->alb_failed);
8125                 }
8126         }
8127         rcu_read_unlock();
8128 out_unlock:
8129         busiest_rq->active_balance = 0;
8130         raw_spin_unlock(&busiest_rq->lock);
8131 
8132         if (p)
8133                 attach_one_task(target_rq, p);
8134 
8135         local_irq_enable();
8136 
8137         return 0;
8138 }
8139 
8140 static inline int on_null_domain(struct rq *rq)
8141 {
8142         return unlikely(!rcu_dereference_sched(rq->sd));
8143 }
8144 
8145 #ifdef CONFIG_NO_HZ_COMMON
8146 /*
8147  * idle load balancing details
8148  * - When one of the busy CPUs notice that there may be an idle rebalancing
8149  *   needed, they will kick the idle load balancer, which then does idle
8150  *   load balancing for all the idle CPUs.
8151  */
8152 static struct {
8153         cpumask_var_t idle_cpus_mask;
8154         atomic_t nr_cpus;
8155         unsigned long next_balance;     /* in jiffy units */
8156 } nohz ____cacheline_aligned;
8157 
8158 static inline int find_new_ilb(void)
8159 {
8160         int ilb = cpumask_first(nohz.idle_cpus_mask);
8161 
8162         if (ilb < nr_cpu_ids && idle_cpu(ilb))
8163                 return ilb;
8164 
8165         return nr_cpu_ids;
8166 }
8167 
8168 /*
8169  * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
8170  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
8171  * CPU (if there is one).
8172  */
8173 static void nohz_balancer_kick(void)
8174 {
8175         int ilb_cpu;
8176 
8177         nohz.next_balance++;
8178 
8179         ilb_cpu = find_new_ilb();
8180 
8181         if (ilb_cpu >= nr_cpu_ids)
8182                 return;
8183 
8184         if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
8185                 return;
8186         /*
8187          * Use smp_send_reschedule() instead of resched_cpu().
8188          * This way we generate a sched IPI on the target cpu which
8189          * is idle. And the softirq performing nohz idle load balance
8190          * will be run before returning from the IPI.
8191          */
8192         smp_send_reschedule(ilb_cpu);
8193         return;
8194 }
8195 
8196 void nohz_balance_exit_idle(unsigned int cpu)
8197 {
8198         if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
8199                 /*
8200                  * Completely isolated CPUs don't ever set, so we must test.
8201                  */
8202                 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
8203                         cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
8204                         atomic_dec(&nohz.nr_cpus);
8205                 }
8206                 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8207         }
8208 }
8209 
8210 static inline void set_cpu_sd_state_busy(void)
8211 {
8212         struct sched_domain *sd;
8213         int cpu = smp_processor_id();
8214 
8215         rcu_read_lock();
8216         sd = rcu_dereference(per_cpu(sd_llc, cpu));
8217 
8218         if (!sd || !sd->nohz_idle)
8219                 goto unlock;
8220         sd->nohz_idle = 0;
8221 
8222         atomic_inc(&sd->shared->nr_busy_cpus);
8223 unlock:
8224         rcu_read_unlock();
8225 }
8226 
8227 void set_cpu_sd_state_idle(void)
8228 {
8229         struct sched_domain *sd;
8230         int cpu = smp_processor_id();
8231 
8232         rcu_read_lock();
8233         sd = rcu_dereference(per_cpu(sd_llc, cpu));
8234 
8235         if (!sd || sd->nohz_idle)
8236                 goto unlock;
8237         sd->nohz_idle = 1;
8238 
8239         atomic_dec(&sd->shared->nr_busy_cpus);
8240 unlock:
8241         rcu_read_unlock();
8242 }
8243 
8244 /*
8245  * This routine will record that the cpu is going idle with tick stopped.
8246  * This info will be used in performing idle load balancing in the future.
8247  */
8248 void nohz_balance_enter_idle(int cpu)
8249 {
8250         /*
8251          * If this cpu is going down, then nothing needs to be done.
8252          */
8253         if (!cpu_active(cpu))
8254                 return;
8255 
8256         if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
8257                 return;
8258 
8259         /*
8260          * If we're a completely isolated CPU, we don't play.
8261          */
8262         if (on_null_domain(cpu_rq(cpu)))
8263                 return;
8264 
8265         cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
8266         atomic_inc(&nohz.nr_cpus);
8267         set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8268 }
8269 #endif
8270 
8271 static DEFINE_SPINLOCK(balancing);
8272 
8273 /*
8274  * Scale the max load_balance interval with the number of CPUs in the system.
8275  * This trades load-balance latency on larger machines for less cross talk.
8276  */
8277 void update_max_interval(void)
8278 {
8279         max_load_balance_interval = HZ*num_online_cpus()/10;
8280 }
8281 
8282 /*
8283  * It checks each scheduling domain to see if it is due to be balanced,
8284  * and initiates a balancing operation if so.
8285  *
8286  * Balancing parameters are set up in init_sched_domains.
8287  */
8288 static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
8289 {
8290         int continue_balancing = 1;
8291         int cpu = rq->cpu;
8292         unsigned long interval;
8293         struct sched_domain *sd;
8294         /* Earliest time when we have to do rebalance again */
8295         unsigned long next_balance = jiffies + 60*HZ;
8296         int update_next_balance = 0;
8297         int need_serialize, need_decay = 0;
8298         u64 max_cost = 0;
8299 
8300         update_blocked_averages(cpu);
8301 
8302         rcu_read_lock();
8303         for_each_domain(cpu, sd) {
8304                 /*
8305                  * Decay the newidle max times here because this is a regular
8306                  * visit to all the domains. Decay ~1% per second.
8307                  */
8308                 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
8309                         sd->max_newidle_lb_cost =
8310                                 (sd->max_newidle_lb_cost * 253) / 256;
8311                         sd->next_decay_max_lb_cost = jiffies + HZ;
8312                         n