Version:  2.0.40 2.2.26 2.4.37 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6

Linux/kernel/cpuset.c

  1 /*
  2  *  kernel/cpuset.c
  3  *
  4  *  Processor and Memory placement constraints for sets of tasks.
  5  *
  6  *  Copyright (C) 2003 BULL SA.
  7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
  8  *  Copyright (C) 2006 Google, Inc
  9  *
 10  *  Portions derived from Patrick Mochel's sysfs code.
 11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 12  *
 13  *  2003-10-10 Written by Simon Derr.
 14  *  2003-10-22 Updates by Stephen Hemminger.
 15  *  2004 May-July Rework by Paul Jackson.
 16  *  2006 Rework by Paul Menage to use generic cgroups
 17  *  2008 Rework of the scheduler domains and CPU hotplug handling
 18  *       by Max Krasnyansky
 19  *
 20  *  This file is subject to the terms and conditions of the GNU General Public
 21  *  License.  See the file COPYING in the main directory of the Linux
 22  *  distribution for more details.
 23  */
 24 
 25 #include <linux/cpu.h>
 26 #include <linux/cpumask.h>
 27 #include <linux/cpuset.h>
 28 #include <linux/err.h>
 29 #include <linux/errno.h>
 30 #include <linux/file.h>
 31 #include <linux/fs.h>
 32 #include <linux/init.h>
 33 #include <linux/interrupt.h>
 34 #include <linux/kernel.h>
 35 #include <linux/kmod.h>
 36 #include <linux/list.h>
 37 #include <linux/mempolicy.h>
 38 #include <linux/mm.h>
 39 #include <linux/memory.h>
 40 #include <linux/export.h>
 41 #include <linux/mount.h>
 42 #include <linux/namei.h>
 43 #include <linux/pagemap.h>
 44 #include <linux/proc_fs.h>
 45 #include <linux/rcupdate.h>
 46 #include <linux/sched.h>
 47 #include <linux/seq_file.h>
 48 #include <linux/security.h>
 49 #include <linux/slab.h>
 50 #include <linux/spinlock.h>
 51 #include <linux/stat.h>
 52 #include <linux/string.h>
 53 #include <linux/time.h>
 54 #include <linux/time64.h>
 55 #include <linux/backing-dev.h>
 56 #include <linux/sort.h>
 57 
 58 #include <asm/uaccess.h>
 59 #include <linux/atomic.h>
 60 #include <linux/mutex.h>
 61 #include <linux/cgroup.h>
 62 #include <linux/wait.h>
 63 
 64 struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
 65 
 66 /* See "Frequency meter" comments, below. */
 67 
 68 struct fmeter {
 69         int cnt;                /* unprocessed events count */
 70         int val;                /* most recent output value */
 71         time64_t time;          /* clock (secs) when val computed */
 72         spinlock_t lock;        /* guards read or write of above */
 73 };
 74 
 75 struct cpuset {
 76         struct cgroup_subsys_state css;
 77 
 78         unsigned long flags;            /* "unsigned long" so bitops work */
 79 
 80         /*
 81          * On default hierarchy:
 82          *
 83          * The user-configured masks can only be changed by writing to
 84          * cpuset.cpus and cpuset.mems, and won't be limited by the
 85          * parent masks.
 86          *
 87          * The effective masks is the real masks that apply to the tasks
 88          * in the cpuset. They may be changed if the configured masks are
 89          * changed or hotplug happens.
 90          *
 91          * effective_mask == configured_mask & parent's effective_mask,
 92          * and if it ends up empty, it will inherit the parent's mask.
 93          *
 94          *
 95          * On legacy hierachy:
 96          *
 97          * The user-configured masks are always the same with effective masks.
 98          */
 99 
100         /* user-configured CPUs and Memory Nodes allow to tasks */
101         cpumask_var_t cpus_allowed;
102         nodemask_t mems_allowed;
103 
104         /* effective CPUs and Memory Nodes allow to tasks */
105         cpumask_var_t effective_cpus;
106         nodemask_t effective_mems;
107 
108         /*
109          * This is old Memory Nodes tasks took on.
110          *
111          * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
112          * - A new cpuset's old_mems_allowed is initialized when some
113          *   task is moved into it.
114          * - old_mems_allowed is used in cpuset_migrate_mm() when we change
115          *   cpuset.mems_allowed and have tasks' nodemask updated, and
116          *   then old_mems_allowed is updated to mems_allowed.
117          */
118         nodemask_t old_mems_allowed;
119 
120         struct fmeter fmeter;           /* memory_pressure filter */
121 
122         /*
123          * Tasks are being attached to this cpuset.  Used to prevent
124          * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
125          */
126         int attach_in_progress;
127 
128         /* partition number for rebuild_sched_domains() */
129         int pn;
130 
131         /* for custom sched domain */
132         int relax_domain_level;
133 };
134 
135 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
136 {
137         return css ? container_of(css, struct cpuset, css) : NULL;
138 }
139 
140 /* Retrieve the cpuset for a task */
141 static inline struct cpuset *task_cs(struct task_struct *task)
142 {
143         return css_cs(task_css(task, cpuset_cgrp_id));
144 }
145 
146 static inline struct cpuset *parent_cs(struct cpuset *cs)
147 {
148         return css_cs(cs->css.parent);
149 }
150 
151 #ifdef CONFIG_NUMA
152 static inline bool task_has_mempolicy(struct task_struct *task)
153 {
154         return task->mempolicy;
155 }
156 #else
157 static inline bool task_has_mempolicy(struct task_struct *task)
158 {
159         return false;
160 }
161 #endif
162 
163 
164 /* bits in struct cpuset flags field */
165 typedef enum {
166         CS_ONLINE,
167         CS_CPU_EXCLUSIVE,
168         CS_MEM_EXCLUSIVE,
169         CS_MEM_HARDWALL,
170         CS_MEMORY_MIGRATE,
171         CS_SCHED_LOAD_BALANCE,
172         CS_SPREAD_PAGE,
173         CS_SPREAD_SLAB,
174 } cpuset_flagbits_t;
175 
176 /* convenient tests for these bits */
177 static inline bool is_cpuset_online(const struct cpuset *cs)
178 {
179         return test_bit(CS_ONLINE, &cs->flags);
180 }
181 
182 static inline int is_cpu_exclusive(const struct cpuset *cs)
183 {
184         return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
185 }
186 
187 static inline int is_mem_exclusive(const struct cpuset *cs)
188 {
189         return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
190 }
191 
192 static inline int is_mem_hardwall(const struct cpuset *cs)
193 {
194         return test_bit(CS_MEM_HARDWALL, &cs->flags);
195 }
196 
197 static inline int is_sched_load_balance(const struct cpuset *cs)
198 {
199         return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
200 }
201 
202 static inline int is_memory_migrate(const struct cpuset *cs)
203 {
204         return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
205 }
206 
207 static inline int is_spread_page(const struct cpuset *cs)
208 {
209         return test_bit(CS_SPREAD_PAGE, &cs->flags);
210 }
211 
212 static inline int is_spread_slab(const struct cpuset *cs)
213 {
214         return test_bit(CS_SPREAD_SLAB, &cs->flags);
215 }
216 
217 static struct cpuset top_cpuset = {
218         .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
219                   (1 << CS_MEM_EXCLUSIVE)),
220 };
221 
222 /**
223  * cpuset_for_each_child - traverse online children of a cpuset
224  * @child_cs: loop cursor pointing to the current child
225  * @pos_css: used for iteration
226  * @parent_cs: target cpuset to walk children of
227  *
228  * Walk @child_cs through the online children of @parent_cs.  Must be used
229  * with RCU read locked.
230  */
231 #define cpuset_for_each_child(child_cs, pos_css, parent_cs)             \
232         css_for_each_child((pos_css), &(parent_cs)->css)                \
233                 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
234 
235 /**
236  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
237  * @des_cs: loop cursor pointing to the current descendant
238  * @pos_css: used for iteration
239  * @root_cs: target cpuset to walk ancestor of
240  *
241  * Walk @des_cs through the online descendants of @root_cs.  Must be used
242  * with RCU read locked.  The caller may modify @pos_css by calling
243  * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
244  * iteration and the first node to be visited.
245  */
246 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)        \
247         css_for_each_descendant_pre((pos_css), &(root_cs)->css)         \
248                 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
249 
250 /*
251  * There are two global locks guarding cpuset structures - cpuset_mutex and
252  * callback_lock. We also require taking task_lock() when dereferencing a
253  * task's cpuset pointer. See "The task_lock() exception", at the end of this
254  * comment.
255  *
256  * A task must hold both locks to modify cpusets.  If a task holds
257  * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
258  * is the only task able to also acquire callback_lock and be able to
259  * modify cpusets.  It can perform various checks on the cpuset structure
260  * first, knowing nothing will change.  It can also allocate memory while
261  * just holding cpuset_mutex.  While it is performing these checks, various
262  * callback routines can briefly acquire callback_lock to query cpusets.
263  * Once it is ready to make the changes, it takes callback_lock, blocking
264  * everyone else.
265  *
266  * Calls to the kernel memory allocator can not be made while holding
267  * callback_lock, as that would risk double tripping on callback_lock
268  * from one of the callbacks into the cpuset code from within
269  * __alloc_pages().
270  *
271  * If a task is only holding callback_lock, then it has read-only
272  * access to cpusets.
273  *
274  * Now, the task_struct fields mems_allowed and mempolicy may be changed
275  * by other task, we use alloc_lock in the task_struct fields to protect
276  * them.
277  *
278  * The cpuset_common_file_read() handlers only hold callback_lock across
279  * small pieces of code, such as when reading out possibly multi-word
280  * cpumasks and nodemasks.
281  *
282  * Accessing a task's cpuset should be done in accordance with the
283  * guidelines for accessing subsystem state in kernel/cgroup.c
284  */
285 
286 static DEFINE_MUTEX(cpuset_mutex);
287 static DEFINE_SPINLOCK(callback_lock);
288 
289 static struct workqueue_struct *cpuset_migrate_mm_wq;
290 
291 /*
292  * CPU / memory hotplug is handled asynchronously.
293  */
294 static void cpuset_hotplug_workfn(struct work_struct *work);
295 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
296 
297 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
298 
299 /*
300  * This is ugly, but preserves the userspace API for existing cpuset
301  * users. If someone tries to mount the "cpuset" filesystem, we
302  * silently switch it to mount "cgroup" instead
303  */
304 static struct dentry *cpuset_mount(struct file_system_type *fs_type,
305                          int flags, const char *unused_dev_name, void *data)
306 {
307         struct file_system_type *cgroup_fs = get_fs_type("cgroup");
308         struct dentry *ret = ERR_PTR(-ENODEV);
309         if (cgroup_fs) {
310                 char mountopts[] =
311                         "cpuset,noprefix,"
312                         "release_agent=/sbin/cpuset_release_agent";
313                 ret = cgroup_fs->mount(cgroup_fs, flags,
314                                            unused_dev_name, mountopts);
315                 put_filesystem(cgroup_fs);
316         }
317         return ret;
318 }
319 
320 static struct file_system_type cpuset_fs_type = {
321         .name = "cpuset",
322         .mount = cpuset_mount,
323 };
324 
325 /*
326  * Return in pmask the portion of a cpusets's cpus_allowed that
327  * are online.  If none are online, walk up the cpuset hierarchy
328  * until we find one that does have some online cpus.  The top
329  * cpuset always has some cpus online.
330  *
331  * One way or another, we guarantee to return some non-empty subset
332  * of cpu_online_mask.
333  *
334  * Call with callback_lock or cpuset_mutex held.
335  */
336 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
337 {
338         while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
339                 cs = parent_cs(cs);
340         cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
341 }
342 
343 /*
344  * Return in *pmask the portion of a cpusets's mems_allowed that
345  * are online, with memory.  If none are online with memory, walk
346  * up the cpuset hierarchy until we find one that does have some
347  * online mems.  The top cpuset always has some mems online.
348  *
349  * One way or another, we guarantee to return some non-empty subset
350  * of node_states[N_MEMORY].
351  *
352  * Call with callback_lock or cpuset_mutex held.
353  */
354 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
355 {
356         while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
357                 cs = parent_cs(cs);
358         nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
359 }
360 
361 /*
362  * update task's spread flag if cpuset's page/slab spread flag is set
363  *
364  * Call with callback_lock or cpuset_mutex held.
365  */
366 static void cpuset_update_task_spread_flag(struct cpuset *cs,
367                                         struct task_struct *tsk)
368 {
369         if (is_spread_page(cs))
370                 task_set_spread_page(tsk);
371         else
372                 task_clear_spread_page(tsk);
373 
374         if (is_spread_slab(cs))
375                 task_set_spread_slab(tsk);
376         else
377                 task_clear_spread_slab(tsk);
378 }
379 
380 /*
381  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
382  *
383  * One cpuset is a subset of another if all its allowed CPUs and
384  * Memory Nodes are a subset of the other, and its exclusive flags
385  * are only set if the other's are set.  Call holding cpuset_mutex.
386  */
387 
388 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
389 {
390         return  cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
391                 nodes_subset(p->mems_allowed, q->mems_allowed) &&
392                 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
393                 is_mem_exclusive(p) <= is_mem_exclusive(q);
394 }
395 
396 /**
397  * alloc_trial_cpuset - allocate a trial cpuset
398  * @cs: the cpuset that the trial cpuset duplicates
399  */
400 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
401 {
402         struct cpuset *trial;
403 
404         trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
405         if (!trial)
406                 return NULL;
407 
408         if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
409                 goto free_cs;
410         if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
411                 goto free_cpus;
412 
413         cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
414         cpumask_copy(trial->effective_cpus, cs->effective_cpus);
415         return trial;
416 
417 free_cpus:
418         free_cpumask_var(trial->cpus_allowed);
419 free_cs:
420         kfree(trial);
421         return NULL;
422 }
423 
424 /**
425  * free_trial_cpuset - free the trial cpuset
426  * @trial: the trial cpuset to be freed
427  */
428 static void free_trial_cpuset(struct cpuset *trial)
429 {
430         free_cpumask_var(trial->effective_cpus);
431         free_cpumask_var(trial->cpus_allowed);
432         kfree(trial);
433 }
434 
435 /*
436  * validate_change() - Used to validate that any proposed cpuset change
437  *                     follows the structural rules for cpusets.
438  *
439  * If we replaced the flag and mask values of the current cpuset
440  * (cur) with those values in the trial cpuset (trial), would
441  * our various subset and exclusive rules still be valid?  Presumes
442  * cpuset_mutex held.
443  *
444  * 'cur' is the address of an actual, in-use cpuset.  Operations
445  * such as list traversal that depend on the actual address of the
446  * cpuset in the list must use cur below, not trial.
447  *
448  * 'trial' is the address of bulk structure copy of cur, with
449  * perhaps one or more of the fields cpus_allowed, mems_allowed,
450  * or flags changed to new, trial values.
451  *
452  * Return 0 if valid, -errno if not.
453  */
454 
455 static int validate_change(struct cpuset *cur, struct cpuset *trial)
456 {
457         struct cgroup_subsys_state *css;
458         struct cpuset *c, *par;
459         int ret;
460 
461         rcu_read_lock();
462 
463         /* Each of our child cpusets must be a subset of us */
464         ret = -EBUSY;
465         cpuset_for_each_child(c, css, cur)
466                 if (!is_cpuset_subset(c, trial))
467                         goto out;
468 
469         /* Remaining checks don't apply to root cpuset */
470         ret = 0;
471         if (cur == &top_cpuset)
472                 goto out;
473 
474         par = parent_cs(cur);
475 
476         /* On legacy hiearchy, we must be a subset of our parent cpuset. */
477         ret = -EACCES;
478         if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
479             !is_cpuset_subset(trial, par))
480                 goto out;
481 
482         /*
483          * If either I or some sibling (!= me) is exclusive, we can't
484          * overlap
485          */
486         ret = -EINVAL;
487         cpuset_for_each_child(c, css, par) {
488                 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
489                     c != cur &&
490                     cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
491                         goto out;
492                 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
493                     c != cur &&
494                     nodes_intersects(trial->mems_allowed, c->mems_allowed))
495                         goto out;
496         }
497 
498         /*
499          * Cpusets with tasks - existing or newly being attached - can't
500          * be changed to have empty cpus_allowed or mems_allowed.
501          */
502         ret = -ENOSPC;
503         if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
504                 if (!cpumask_empty(cur->cpus_allowed) &&
505                     cpumask_empty(trial->cpus_allowed))
506                         goto out;
507                 if (!nodes_empty(cur->mems_allowed) &&
508                     nodes_empty(trial->mems_allowed))
509                         goto out;
510         }
511 
512         /*
513          * We can't shrink if we won't have enough room for SCHED_DEADLINE
514          * tasks.
515          */
516         ret = -EBUSY;
517         if (is_cpu_exclusive(cur) &&
518             !cpuset_cpumask_can_shrink(cur->cpus_allowed,
519                                        trial->cpus_allowed))
520                 goto out;
521 
522         ret = 0;
523 out:
524         rcu_read_unlock();
525         return ret;
526 }
527 
528 #ifdef CONFIG_SMP
529 /*
530  * Helper routine for generate_sched_domains().
531  * Do cpusets a, b have overlapping effective cpus_allowed masks?
532  */
533 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
534 {
535         return cpumask_intersects(a->effective_cpus, b->effective_cpus);
536 }
537 
538 static void
539 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
540 {
541         if (dattr->relax_domain_level < c->relax_domain_level)
542                 dattr->relax_domain_level = c->relax_domain_level;
543         return;
544 }
545 
546 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
547                                     struct cpuset *root_cs)
548 {
549         struct cpuset *cp;
550         struct cgroup_subsys_state *pos_css;
551 
552         rcu_read_lock();
553         cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
554                 /* skip the whole subtree if @cp doesn't have any CPU */
555                 if (cpumask_empty(cp->cpus_allowed)) {
556                         pos_css = css_rightmost_descendant(pos_css);
557                         continue;
558                 }
559 
560                 if (is_sched_load_balance(cp))
561                         update_domain_attr(dattr, cp);
562         }
563         rcu_read_unlock();
564 }
565 
566 /*
567  * generate_sched_domains()
568  *
569  * This function builds a partial partition of the systems CPUs
570  * A 'partial partition' is a set of non-overlapping subsets whose
571  * union is a subset of that set.
572  * The output of this function needs to be passed to kernel/sched/core.c
573  * partition_sched_domains() routine, which will rebuild the scheduler's
574  * load balancing domains (sched domains) as specified by that partial
575  * partition.
576  *
577  * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
578  * for a background explanation of this.
579  *
580  * Does not return errors, on the theory that the callers of this
581  * routine would rather not worry about failures to rebuild sched
582  * domains when operating in the severe memory shortage situations
583  * that could cause allocation failures below.
584  *
585  * Must be called with cpuset_mutex held.
586  *
587  * The three key local variables below are:
588  *    q  - a linked-list queue of cpuset pointers, used to implement a
589  *         top-down scan of all cpusets.  This scan loads a pointer
590  *         to each cpuset marked is_sched_load_balance into the
591  *         array 'csa'.  For our purposes, rebuilding the schedulers
592  *         sched domains, we can ignore !is_sched_load_balance cpusets.
593  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
594  *         that need to be load balanced, for convenient iterative
595  *         access by the subsequent code that finds the best partition,
596  *         i.e the set of domains (subsets) of CPUs such that the
597  *         cpus_allowed of every cpuset marked is_sched_load_balance
598  *         is a subset of one of these domains, while there are as
599  *         many such domains as possible, each as small as possible.
600  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
601  *         the kernel/sched/core.c routine partition_sched_domains() in a
602  *         convenient format, that can be easily compared to the prior
603  *         value to determine what partition elements (sched domains)
604  *         were changed (added or removed.)
605  *
606  * Finding the best partition (set of domains):
607  *      The triple nested loops below over i, j, k scan over the
608  *      load balanced cpusets (using the array of cpuset pointers in
609  *      csa[]) looking for pairs of cpusets that have overlapping
610  *      cpus_allowed, but which don't have the same 'pn' partition
611  *      number and gives them in the same partition number.  It keeps
612  *      looping on the 'restart' label until it can no longer find
613  *      any such pairs.
614  *
615  *      The union of the cpus_allowed masks from the set of
616  *      all cpusets having the same 'pn' value then form the one
617  *      element of the partition (one sched domain) to be passed to
618  *      partition_sched_domains().
619  */
620 static int generate_sched_domains(cpumask_var_t **domains,
621                         struct sched_domain_attr **attributes)
622 {
623         struct cpuset *cp;      /* scans q */
624         struct cpuset **csa;    /* array of all cpuset ptrs */
625         int csn;                /* how many cpuset ptrs in csa so far */
626         int i, j, k;            /* indices for partition finding loops */
627         cpumask_var_t *doms;    /* resulting partition; i.e. sched domains */
628         cpumask_var_t non_isolated_cpus;  /* load balanced CPUs */
629         struct sched_domain_attr *dattr;  /* attributes for custom domains */
630         int ndoms = 0;          /* number of sched domains in result */
631         int nslot;              /* next empty doms[] struct cpumask slot */
632         struct cgroup_subsys_state *pos_css;
633 
634         doms = NULL;
635         dattr = NULL;
636         csa = NULL;
637 
638         if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL))
639                 goto done;
640         cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
641 
642         /* Special case for the 99% of systems with one, full, sched domain */
643         if (is_sched_load_balance(&top_cpuset)) {
644                 ndoms = 1;
645                 doms = alloc_sched_domains(ndoms);
646                 if (!doms)
647                         goto done;
648 
649                 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
650                 if (dattr) {
651                         *dattr = SD_ATTR_INIT;
652                         update_domain_attr_tree(dattr, &top_cpuset);
653                 }
654                 cpumask_and(doms[0], top_cpuset.effective_cpus,
655                                      non_isolated_cpus);
656 
657                 goto done;
658         }
659 
660         csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
661         if (!csa)
662                 goto done;
663         csn = 0;
664 
665         rcu_read_lock();
666         cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
667                 if (cp == &top_cpuset)
668                         continue;
669                 /*
670                  * Continue traversing beyond @cp iff @cp has some CPUs and
671                  * isn't load balancing.  The former is obvious.  The
672                  * latter: All child cpusets contain a subset of the
673                  * parent's cpus, so just skip them, and then we call
674                  * update_domain_attr_tree() to calc relax_domain_level of
675                  * the corresponding sched domain.
676                  */
677                 if (!cpumask_empty(cp->cpus_allowed) &&
678                     !(is_sched_load_balance(cp) &&
679                       cpumask_intersects(cp->cpus_allowed, non_isolated_cpus)))
680                         continue;
681 
682                 if (is_sched_load_balance(cp))
683                         csa[csn++] = cp;
684 
685                 /* skip @cp's subtree */
686                 pos_css = css_rightmost_descendant(pos_css);
687         }
688         rcu_read_unlock();
689 
690         for (i = 0; i < csn; i++)
691                 csa[i]->pn = i;
692         ndoms = csn;
693 
694 restart:
695         /* Find the best partition (set of sched domains) */
696         for (i = 0; i < csn; i++) {
697                 struct cpuset *a = csa[i];
698                 int apn = a->pn;
699 
700                 for (j = 0; j < csn; j++) {
701                         struct cpuset *b = csa[j];
702                         int bpn = b->pn;
703 
704                         if (apn != bpn && cpusets_overlap(a, b)) {
705                                 for (k = 0; k < csn; k++) {
706                                         struct cpuset *c = csa[k];
707 
708                                         if (c->pn == bpn)
709                                                 c->pn = apn;
710                                 }
711                                 ndoms--;        /* one less element */
712                                 goto restart;
713                         }
714                 }
715         }
716 
717         /*
718          * Now we know how many domains to create.
719          * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
720          */
721         doms = alloc_sched_domains(ndoms);
722         if (!doms)
723                 goto done;
724 
725         /*
726          * The rest of the code, including the scheduler, can deal with
727          * dattr==NULL case. No need to abort if alloc fails.
728          */
729         dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
730 
731         for (nslot = 0, i = 0; i < csn; i++) {
732                 struct cpuset *a = csa[i];
733                 struct cpumask *dp;
734                 int apn = a->pn;
735 
736                 if (apn < 0) {
737                         /* Skip completed partitions */
738                         continue;
739                 }
740 
741                 dp = doms[nslot];
742 
743                 if (nslot == ndoms) {
744                         static int warnings = 10;
745                         if (warnings) {
746                                 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
747                                         nslot, ndoms, csn, i, apn);
748                                 warnings--;
749                         }
750                         continue;
751                 }
752 
753                 cpumask_clear(dp);
754                 if (dattr)
755                         *(dattr + nslot) = SD_ATTR_INIT;
756                 for (j = i; j < csn; j++) {
757                         struct cpuset *b = csa[j];
758 
759                         if (apn == b->pn) {
760                                 cpumask_or(dp, dp, b->effective_cpus);
761                                 cpumask_and(dp, dp, non_isolated_cpus);
762                                 if (dattr)
763                                         update_domain_attr_tree(dattr + nslot, b);
764 
765                                 /* Done with this partition */
766                                 b->pn = -1;
767                         }
768                 }
769                 nslot++;
770         }
771         BUG_ON(nslot != ndoms);
772 
773 done:
774         free_cpumask_var(non_isolated_cpus);
775         kfree(csa);
776 
777         /*
778          * Fallback to the default domain if kmalloc() failed.
779          * See comments in partition_sched_domains().
780          */
781         if (doms == NULL)
782                 ndoms = 1;
783 
784         *domains    = doms;
785         *attributes = dattr;
786         return ndoms;
787 }
788 
789 /*
790  * Rebuild scheduler domains.
791  *
792  * If the flag 'sched_load_balance' of any cpuset with non-empty
793  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
794  * which has that flag enabled, or if any cpuset with a non-empty
795  * 'cpus' is removed, then call this routine to rebuild the
796  * scheduler's dynamic sched domains.
797  *
798  * Call with cpuset_mutex held.  Takes get_online_cpus().
799  */
800 static void rebuild_sched_domains_locked(void)
801 {
802         struct sched_domain_attr *attr;
803         cpumask_var_t *doms;
804         int ndoms;
805 
806         lockdep_assert_held(&cpuset_mutex);
807         get_online_cpus();
808 
809         /*
810          * We have raced with CPU hotplug. Don't do anything to avoid
811          * passing doms with offlined cpu to partition_sched_domains().
812          * Anyways, hotplug work item will rebuild sched domains.
813          */
814         if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
815                 goto out;
816 
817         /* Generate domain masks and attrs */
818         ndoms = generate_sched_domains(&doms, &attr);
819 
820         /* Have scheduler rebuild the domains */
821         partition_sched_domains(ndoms, doms, attr);
822 out:
823         put_online_cpus();
824 }
825 #else /* !CONFIG_SMP */
826 static void rebuild_sched_domains_locked(void)
827 {
828 }
829 #endif /* CONFIG_SMP */
830 
831 void rebuild_sched_domains(void)
832 {
833         mutex_lock(&cpuset_mutex);
834         rebuild_sched_domains_locked();
835         mutex_unlock(&cpuset_mutex);
836 }
837 
838 /**
839  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
840  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
841  *
842  * Iterate through each task of @cs updating its cpus_allowed to the
843  * effective cpuset's.  As this function is called with cpuset_mutex held,
844  * cpuset membership stays stable.
845  */
846 static void update_tasks_cpumask(struct cpuset *cs)
847 {
848         struct css_task_iter it;
849         struct task_struct *task;
850 
851         css_task_iter_start(&cs->css, &it);
852         while ((task = css_task_iter_next(&it)))
853                 set_cpus_allowed_ptr(task, cs->effective_cpus);
854         css_task_iter_end(&it);
855 }
856 
857 /*
858  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
859  * @cs: the cpuset to consider
860  * @new_cpus: temp variable for calculating new effective_cpus
861  *
862  * When congifured cpumask is changed, the effective cpumasks of this cpuset
863  * and all its descendants need to be updated.
864  *
865  * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
866  *
867  * Called with cpuset_mutex held
868  */
869 static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
870 {
871         struct cpuset *cp;
872         struct cgroup_subsys_state *pos_css;
873         bool need_rebuild_sched_domains = false;
874 
875         rcu_read_lock();
876         cpuset_for_each_descendant_pre(cp, pos_css, cs) {
877                 struct cpuset *parent = parent_cs(cp);
878 
879                 cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus);
880 
881                 /*
882                  * If it becomes empty, inherit the effective mask of the
883                  * parent, which is guaranteed to have some CPUs.
884                  */
885                 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
886                     cpumask_empty(new_cpus))
887                         cpumask_copy(new_cpus, parent->effective_cpus);
888 
889                 /* Skip the whole subtree if the cpumask remains the same. */
890                 if (cpumask_equal(new_cpus, cp->effective_cpus)) {
891                         pos_css = css_rightmost_descendant(pos_css);
892                         continue;
893                 }
894 
895                 if (!css_tryget_online(&cp->css))
896                         continue;
897                 rcu_read_unlock();
898 
899                 spin_lock_irq(&callback_lock);
900                 cpumask_copy(cp->effective_cpus, new_cpus);
901                 spin_unlock_irq(&callback_lock);
902 
903                 WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
904                         !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
905 
906                 update_tasks_cpumask(cp);
907 
908                 /*
909                  * If the effective cpumask of any non-empty cpuset is changed,
910                  * we need to rebuild sched domains.
911                  */
912                 if (!cpumask_empty(cp->cpus_allowed) &&
913                     is_sched_load_balance(cp))
914                         need_rebuild_sched_domains = true;
915 
916                 rcu_read_lock();
917                 css_put(&cp->css);
918         }
919         rcu_read_unlock();
920 
921         if (need_rebuild_sched_domains)
922                 rebuild_sched_domains_locked();
923 }
924 
925 /**
926  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
927  * @cs: the cpuset to consider
928  * @trialcs: trial cpuset
929  * @buf: buffer of cpu numbers written to this cpuset
930  */
931 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
932                           const char *buf)
933 {
934         int retval;
935 
936         /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
937         if (cs == &top_cpuset)
938                 return -EACCES;
939 
940         /*
941          * An empty cpus_allowed is ok only if the cpuset has no tasks.
942          * Since cpulist_parse() fails on an empty mask, we special case
943          * that parsing.  The validate_change() call ensures that cpusets
944          * with tasks have cpus.
945          */
946         if (!*buf) {
947                 cpumask_clear(trialcs->cpus_allowed);
948         } else {
949                 retval = cpulist_parse(buf, trialcs->cpus_allowed);
950                 if (retval < 0)
951                         return retval;
952 
953                 if (!cpumask_subset(trialcs->cpus_allowed,
954                                     top_cpuset.cpus_allowed))
955                         return -EINVAL;
956         }
957 
958         /* Nothing to do if the cpus didn't change */
959         if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
960                 return 0;
961 
962         retval = validate_change(cs, trialcs);
963         if (retval < 0)
964                 return retval;
965 
966         spin_lock_irq(&callback_lock);
967         cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
968         spin_unlock_irq(&callback_lock);
969 
970         /* use trialcs->cpus_allowed as a temp variable */
971         update_cpumasks_hier(cs, trialcs->cpus_allowed);
972         return 0;
973 }
974 
975 /*
976  * Migrate memory region from one set of nodes to another.  This is
977  * performed asynchronously as it can be called from process migration path
978  * holding locks involved in process management.  All mm migrations are
979  * performed in the queued order and can be waited for by flushing
980  * cpuset_migrate_mm_wq.
981  */
982 
983 struct cpuset_migrate_mm_work {
984         struct work_struct      work;
985         struct mm_struct        *mm;
986         nodemask_t              from;
987         nodemask_t              to;
988 };
989 
990 static void cpuset_migrate_mm_workfn(struct work_struct *work)
991 {
992         struct cpuset_migrate_mm_work *mwork =
993                 container_of(work, struct cpuset_migrate_mm_work, work);
994 
995         /* on a wq worker, no need to worry about %current's mems_allowed */
996         do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
997         mmput(mwork->mm);
998         kfree(mwork);
999 }
1000 
1001 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1002                                                         const nodemask_t *to)
1003 {
1004         struct cpuset_migrate_mm_work *mwork;
1005 
1006         mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1007         if (mwork) {
1008                 mwork->mm = mm;
1009                 mwork->from = *from;
1010                 mwork->to = *to;
1011                 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1012                 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1013         } else {
1014                 mmput(mm);
1015         }
1016 }
1017 
1018 static void cpuset_post_attach(void)
1019 {
1020         flush_workqueue(cpuset_migrate_mm_wq);
1021 }
1022 
1023 /*
1024  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1025  * @tsk: the task to change
1026  * @newmems: new nodes that the task will be set
1027  *
1028  * In order to avoid seeing no nodes if the old and new nodes are disjoint,
1029  * we structure updates as setting all new allowed nodes, then clearing newly
1030  * disallowed ones.
1031  */
1032 static void cpuset_change_task_nodemask(struct task_struct *tsk,
1033                                         nodemask_t *newmems)
1034 {
1035         bool need_loop;
1036 
1037         /*
1038          * Allow tasks that have access to memory reserves because they have
1039          * been OOM killed to get memory anywhere.
1040          */
1041         if (unlikely(test_thread_flag(TIF_MEMDIE)))
1042                 return;
1043         if (current->flags & PF_EXITING) /* Let dying task have memory */
1044                 return;
1045 
1046         task_lock(tsk);
1047         /*
1048          * Determine if a loop is necessary if another thread is doing
1049          * read_mems_allowed_begin().  If at least one node remains unchanged and
1050          * tsk does not have a mempolicy, then an empty nodemask will not be
1051          * possible when mems_allowed is larger than a word.
1052          */
1053         need_loop = task_has_mempolicy(tsk) ||
1054                         !nodes_intersects(*newmems, tsk->mems_allowed);
1055 
1056         if (need_loop) {
1057                 local_irq_disable();
1058                 write_seqcount_begin(&tsk->mems_allowed_seq);
1059         }
1060 
1061         nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1062         mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1063 
1064         mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1065         tsk->mems_allowed = *newmems;
1066 
1067         if (need_loop) {
1068                 write_seqcount_end(&tsk->mems_allowed_seq);
1069                 local_irq_enable();
1070         }
1071 
1072         task_unlock(tsk);
1073 }
1074 
1075 static void *cpuset_being_rebound;
1076 
1077 /**
1078  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1079  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1080  *
1081  * Iterate through each task of @cs updating its mems_allowed to the
1082  * effective cpuset's.  As this function is called with cpuset_mutex held,
1083  * cpuset membership stays stable.
1084  */
1085 static void update_tasks_nodemask(struct cpuset *cs)
1086 {
1087         static nodemask_t newmems;      /* protected by cpuset_mutex */
1088         struct css_task_iter it;
1089         struct task_struct *task;
1090 
1091         cpuset_being_rebound = cs;              /* causes mpol_dup() rebind */
1092 
1093         guarantee_online_mems(cs, &newmems);
1094 
1095         /*
1096          * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1097          * take while holding tasklist_lock.  Forks can happen - the
1098          * mpol_dup() cpuset_being_rebound check will catch such forks,
1099          * and rebind their vma mempolicies too.  Because we still hold
1100          * the global cpuset_mutex, we know that no other rebind effort
1101          * will be contending for the global variable cpuset_being_rebound.
1102          * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1103          * is idempotent.  Also migrate pages in each mm to new nodes.
1104          */
1105         css_task_iter_start(&cs->css, &it);
1106         while ((task = css_task_iter_next(&it))) {
1107                 struct mm_struct *mm;
1108                 bool migrate;
1109 
1110                 cpuset_change_task_nodemask(task, &newmems);
1111 
1112                 mm = get_task_mm(task);
1113                 if (!mm)
1114                         continue;
1115 
1116                 migrate = is_memory_migrate(cs);
1117 
1118                 mpol_rebind_mm(mm, &cs->mems_allowed);
1119                 if (migrate)
1120                         cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1121                 else
1122                         mmput(mm);
1123         }
1124         css_task_iter_end(&it);
1125 
1126         /*
1127          * All the tasks' nodemasks have been updated, update
1128          * cs->old_mems_allowed.
1129          */
1130         cs->old_mems_allowed = newmems;
1131 
1132         /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1133         cpuset_being_rebound = NULL;
1134 }
1135 
1136 /*
1137  * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1138  * @cs: the cpuset to consider
1139  * @new_mems: a temp variable for calculating new effective_mems
1140  *
1141  * When configured nodemask is changed, the effective nodemasks of this cpuset
1142  * and all its descendants need to be updated.
1143  *
1144  * On legacy hiearchy, effective_mems will be the same with mems_allowed.
1145  *
1146  * Called with cpuset_mutex held
1147  */
1148 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1149 {
1150         struct cpuset *cp;
1151         struct cgroup_subsys_state *pos_css;
1152 
1153         rcu_read_lock();
1154         cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1155                 struct cpuset *parent = parent_cs(cp);
1156 
1157                 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1158 
1159                 /*
1160                  * If it becomes empty, inherit the effective mask of the
1161                  * parent, which is guaranteed to have some MEMs.
1162                  */
1163                 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1164                     nodes_empty(*new_mems))
1165                         *new_mems = parent->effective_mems;
1166 
1167                 /* Skip the whole subtree if the nodemask remains the same. */
1168                 if (nodes_equal(*new_mems, cp->effective_mems)) {
1169                         pos_css = css_rightmost_descendant(pos_css);
1170                         continue;
1171                 }
1172 
1173                 if (!css_tryget_online(&cp->css))
1174                         continue;
1175                 rcu_read_unlock();
1176 
1177                 spin_lock_irq(&callback_lock);
1178                 cp->effective_mems = *new_mems;
1179                 spin_unlock_irq(&callback_lock);
1180 
1181                 WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1182                         !nodes_equal(cp->mems_allowed, cp->effective_mems));
1183 
1184                 update_tasks_nodemask(cp);
1185 
1186                 rcu_read_lock();
1187                 css_put(&cp->css);
1188         }
1189         rcu_read_unlock();
1190 }
1191 
1192 /*
1193  * Handle user request to change the 'mems' memory placement
1194  * of a cpuset.  Needs to validate the request, update the
1195  * cpusets mems_allowed, and for each task in the cpuset,
1196  * update mems_allowed and rebind task's mempolicy and any vma
1197  * mempolicies and if the cpuset is marked 'memory_migrate',
1198  * migrate the tasks pages to the new memory.
1199  *
1200  * Call with cpuset_mutex held. May take callback_lock during call.
1201  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1202  * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1203  * their mempolicies to the cpusets new mems_allowed.
1204  */
1205 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1206                            const char *buf)
1207 {
1208         int retval;
1209 
1210         /*
1211          * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1212          * it's read-only
1213          */
1214         if (cs == &top_cpuset) {
1215                 retval = -EACCES;
1216                 goto done;
1217         }
1218 
1219         /*
1220          * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1221          * Since nodelist_parse() fails on an empty mask, we special case
1222          * that parsing.  The validate_change() call ensures that cpusets
1223          * with tasks have memory.
1224          */
1225         if (!*buf) {
1226                 nodes_clear(trialcs->mems_allowed);
1227         } else {
1228                 retval = nodelist_parse(buf, trialcs->mems_allowed);
1229                 if (retval < 0)
1230                         goto done;
1231 
1232                 if (!nodes_subset(trialcs->mems_allowed,
1233                                   top_cpuset.mems_allowed)) {
1234                         retval = -EINVAL;
1235                         goto done;
1236                 }
1237         }
1238 
1239         if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1240                 retval = 0;             /* Too easy - nothing to do */
1241                 goto done;
1242         }
1243         retval = validate_change(cs, trialcs);
1244         if (retval < 0)
1245                 goto done;
1246 
1247         spin_lock_irq(&callback_lock);
1248         cs->mems_allowed = trialcs->mems_allowed;
1249         spin_unlock_irq(&callback_lock);
1250 
1251         /* use trialcs->mems_allowed as a temp variable */
1252         update_nodemasks_hier(cs, &trialcs->mems_allowed);
1253 done:
1254         return retval;
1255 }
1256 
1257 int current_cpuset_is_being_rebound(void)
1258 {
1259         int ret;
1260 
1261         rcu_read_lock();
1262         ret = task_cs(current) == cpuset_being_rebound;
1263         rcu_read_unlock();
1264 
1265         return ret;
1266 }
1267 
1268 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1269 {
1270 #ifdef CONFIG_SMP
1271         if (val < -1 || val >= sched_domain_level_max)
1272                 return -EINVAL;
1273 #endif
1274 
1275         if (val != cs->relax_domain_level) {
1276                 cs->relax_domain_level = val;
1277                 if (!cpumask_empty(cs->cpus_allowed) &&
1278                     is_sched_load_balance(cs))
1279                         rebuild_sched_domains_locked();
1280         }
1281 
1282         return 0;
1283 }
1284 
1285 /**
1286  * update_tasks_flags - update the spread flags of tasks in the cpuset.
1287  * @cs: the cpuset in which each task's spread flags needs to be changed
1288  *
1289  * Iterate through each task of @cs updating its spread flags.  As this
1290  * function is called with cpuset_mutex held, cpuset membership stays
1291  * stable.
1292  */
1293 static void update_tasks_flags(struct cpuset *cs)
1294 {
1295         struct css_task_iter it;
1296         struct task_struct *task;
1297 
1298         css_task_iter_start(&cs->css, &it);
1299         while ((task = css_task_iter_next(&it)))
1300                 cpuset_update_task_spread_flag(cs, task);
1301         css_task_iter_end(&it);
1302 }
1303 
1304 /*
1305  * update_flag - read a 0 or a 1 in a file and update associated flag
1306  * bit:         the bit to update (see cpuset_flagbits_t)
1307  * cs:          the cpuset to update
1308  * turning_on:  whether the flag is being set or cleared
1309  *
1310  * Call with cpuset_mutex held.
1311  */
1312 
1313 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1314                        int turning_on)
1315 {
1316         struct cpuset *trialcs;
1317         int balance_flag_changed;
1318         int spread_flag_changed;
1319         int err;
1320 
1321         trialcs = alloc_trial_cpuset(cs);
1322         if (!trialcs)
1323                 return -ENOMEM;
1324 
1325         if (turning_on)
1326                 set_bit(bit, &trialcs->flags);
1327         else
1328                 clear_bit(bit, &trialcs->flags);
1329 
1330         err = validate_change(cs, trialcs);
1331         if (err < 0)
1332                 goto out;
1333 
1334         balance_flag_changed = (is_sched_load_balance(cs) !=
1335                                 is_sched_load_balance(trialcs));
1336 
1337         spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1338                         || (is_spread_page(cs) != is_spread_page(trialcs)));
1339 
1340         spin_lock_irq(&callback_lock);
1341         cs->flags = trialcs->flags;
1342         spin_unlock_irq(&callback_lock);
1343 
1344         if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1345                 rebuild_sched_domains_locked();
1346 
1347         if (spread_flag_changed)
1348                 update_tasks_flags(cs);
1349 out:
1350         free_trial_cpuset(trialcs);
1351         return err;
1352 }
1353 
1354 /*
1355  * Frequency meter - How fast is some event occurring?
1356  *
1357  * These routines manage a digitally filtered, constant time based,
1358  * event frequency meter.  There are four routines:
1359  *   fmeter_init() - initialize a frequency meter.
1360  *   fmeter_markevent() - called each time the event happens.
1361  *   fmeter_getrate() - returns the recent rate of such events.
1362  *   fmeter_update() - internal routine used to update fmeter.
1363  *
1364  * A common data structure is passed to each of these routines,
1365  * which is used to keep track of the state required to manage the
1366  * frequency meter and its digital filter.
1367  *
1368  * The filter works on the number of events marked per unit time.
1369  * The filter is single-pole low-pass recursive (IIR).  The time unit
1370  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1371  * simulate 3 decimal digits of precision (multiplied by 1000).
1372  *
1373  * With an FM_COEF of 933, and a time base of 1 second, the filter
1374  * has a half-life of 10 seconds, meaning that if the events quit
1375  * happening, then the rate returned from the fmeter_getrate()
1376  * will be cut in half each 10 seconds, until it converges to zero.
1377  *
1378  * It is not worth doing a real infinitely recursive filter.  If more
1379  * than FM_MAXTICKS ticks have elapsed since the last filter event,
1380  * just compute FM_MAXTICKS ticks worth, by which point the level
1381  * will be stable.
1382  *
1383  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1384  * arithmetic overflow in the fmeter_update() routine.
1385  *
1386  * Given the simple 32 bit integer arithmetic used, this meter works
1387  * best for reporting rates between one per millisecond (msec) and
1388  * one per 32 (approx) seconds.  At constant rates faster than one
1389  * per msec it maxes out at values just under 1,000,000.  At constant
1390  * rates between one per msec, and one per second it will stabilize
1391  * to a value N*1000, where N is the rate of events per second.
1392  * At constant rates between one per second and one per 32 seconds,
1393  * it will be choppy, moving up on the seconds that have an event,
1394  * and then decaying until the next event.  At rates slower than
1395  * about one in 32 seconds, it decays all the way back to zero between
1396  * each event.
1397  */
1398 
1399 #define FM_COEF 933             /* coefficient for half-life of 10 secs */
1400 #define FM_MAXTICKS ((u32)99)   /* useless computing more ticks than this */
1401 #define FM_MAXCNT 1000000       /* limit cnt to avoid overflow */
1402 #define FM_SCALE 1000           /* faux fixed point scale */
1403 
1404 /* Initialize a frequency meter */
1405 static void fmeter_init(struct fmeter *fmp)
1406 {
1407         fmp->cnt = 0;
1408         fmp->val = 0;
1409         fmp->time = 0;
1410         spin_lock_init(&fmp->lock);
1411 }
1412 
1413 /* Internal meter update - process cnt events and update value */
1414 static void fmeter_update(struct fmeter *fmp)
1415 {
1416         time64_t now;
1417         u32 ticks;
1418 
1419         now = ktime_get_seconds();
1420         ticks = now - fmp->time;
1421 
1422         if (ticks == 0)
1423                 return;
1424 
1425         ticks = min(FM_MAXTICKS, ticks);
1426         while (ticks-- > 0)
1427                 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1428         fmp->time = now;
1429 
1430         fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1431         fmp->cnt = 0;
1432 }
1433 
1434 /* Process any previous ticks, then bump cnt by one (times scale). */
1435 static void fmeter_markevent(struct fmeter *fmp)
1436 {
1437         spin_lock(&fmp->lock);
1438         fmeter_update(fmp);
1439         fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1440         spin_unlock(&fmp->lock);
1441 }
1442 
1443 /* Process any previous ticks, then return current value. */
1444 static int fmeter_getrate(struct fmeter *fmp)
1445 {
1446         int val;
1447 
1448         spin_lock(&fmp->lock);
1449         fmeter_update(fmp);
1450         val = fmp->val;
1451         spin_unlock(&fmp->lock);
1452         return val;
1453 }
1454 
1455 static struct cpuset *cpuset_attach_old_cs;
1456 
1457 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1458 static int cpuset_can_attach(struct cgroup_taskset *tset)
1459 {
1460         struct cgroup_subsys_state *css;
1461         struct cpuset *cs;
1462         struct task_struct *task;
1463         int ret;
1464 
1465         /* used later by cpuset_attach() */
1466         cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
1467         cs = css_cs(css);
1468 
1469         mutex_lock(&cpuset_mutex);
1470 
1471         /* allow moving tasks into an empty cpuset if on default hierarchy */
1472         ret = -ENOSPC;
1473         if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1474             (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1475                 goto out_unlock;
1476 
1477         cgroup_taskset_for_each(task, css, tset) {
1478                 ret = task_can_attach(task, cs->cpus_allowed);
1479                 if (ret)
1480                         goto out_unlock;
1481                 ret = security_task_setscheduler(task);
1482                 if (ret)
1483                         goto out_unlock;
1484         }
1485 
1486         /*
1487          * Mark attach is in progress.  This makes validate_change() fail
1488          * changes which zero cpus/mems_allowed.
1489          */
1490         cs->attach_in_progress++;
1491         ret = 0;
1492 out_unlock:
1493         mutex_unlock(&cpuset_mutex);
1494         return ret;
1495 }
1496 
1497 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
1498 {
1499         struct cgroup_subsys_state *css;
1500         struct cpuset *cs;
1501 
1502         cgroup_taskset_first(tset, &css);
1503         cs = css_cs(css);
1504 
1505         mutex_lock(&cpuset_mutex);
1506         css_cs(css)->attach_in_progress--;
1507         mutex_unlock(&cpuset_mutex);
1508 }
1509 
1510 /*
1511  * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
1512  * but we can't allocate it dynamically there.  Define it global and
1513  * allocate from cpuset_init().
1514  */
1515 static cpumask_var_t cpus_attach;
1516 
1517 static void cpuset_attach(struct cgroup_taskset *tset)
1518 {
1519         /* static buf protected by cpuset_mutex */
1520         static nodemask_t cpuset_attach_nodemask_to;
1521         struct task_struct *task;
1522         struct task_struct *leader;
1523         struct cgroup_subsys_state *css;
1524         struct cpuset *cs;
1525         struct cpuset *oldcs = cpuset_attach_old_cs;
1526 
1527         cgroup_taskset_first(tset, &css);
1528         cs = css_cs(css);
1529 
1530         mutex_lock(&cpuset_mutex);
1531 
1532         /* prepare for attach */
1533         if (cs == &top_cpuset)
1534                 cpumask_copy(cpus_attach, cpu_possible_mask);
1535         else
1536                 guarantee_online_cpus(cs, cpus_attach);
1537 
1538         guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1539 
1540         cgroup_taskset_for_each(task, css, tset) {
1541                 /*
1542                  * can_attach beforehand should guarantee that this doesn't
1543                  * fail.  TODO: have a better way to handle failure here
1544                  */
1545                 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1546 
1547                 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1548                 cpuset_update_task_spread_flag(cs, task);
1549         }
1550 
1551         /*
1552          * Change mm for all threadgroup leaders. This is expensive and may
1553          * sleep and should be moved outside migration path proper.
1554          */
1555         cpuset_attach_nodemask_to = cs->effective_mems;
1556         cgroup_taskset_for_each_leader(leader, css, tset) {
1557                 struct mm_struct *mm = get_task_mm(leader);
1558 
1559                 if (mm) {
1560                         mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1561 
1562                         /*
1563                          * old_mems_allowed is the same with mems_allowed
1564                          * here, except if this task is being moved
1565                          * automatically due to hotplug.  In that case
1566                          * @mems_allowed has been updated and is empty, so
1567                          * @old_mems_allowed is the right nodesets that we
1568                          * migrate mm from.
1569                          */
1570                         if (is_memory_migrate(cs))
1571                                 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
1572                                                   &cpuset_attach_nodemask_to);
1573                         else
1574                                 mmput(mm);
1575                 }
1576         }
1577 
1578         cs->old_mems_allowed = cpuset_attach_nodemask_to;
1579 
1580         cs->attach_in_progress--;
1581         if (!cs->attach_in_progress)
1582                 wake_up(&cpuset_attach_wq);
1583 
1584         mutex_unlock(&cpuset_mutex);
1585 }
1586 
1587 /* The various types of files and directories in a cpuset file system */
1588 
1589 typedef enum {
1590         FILE_MEMORY_MIGRATE,
1591         FILE_CPULIST,
1592         FILE_MEMLIST,
1593         FILE_EFFECTIVE_CPULIST,
1594         FILE_EFFECTIVE_MEMLIST,
1595         FILE_CPU_EXCLUSIVE,
1596         FILE_MEM_EXCLUSIVE,
1597         FILE_MEM_HARDWALL,
1598         FILE_SCHED_LOAD_BALANCE,
1599         FILE_SCHED_RELAX_DOMAIN_LEVEL,
1600         FILE_MEMORY_PRESSURE_ENABLED,
1601         FILE_MEMORY_PRESSURE,
1602         FILE_SPREAD_PAGE,
1603         FILE_SPREAD_SLAB,
1604 } cpuset_filetype_t;
1605 
1606 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
1607                             u64 val)
1608 {
1609         struct cpuset *cs = css_cs(css);
1610         cpuset_filetype_t type = cft->private;
1611         int retval = 0;
1612 
1613         mutex_lock(&cpuset_mutex);
1614         if (!is_cpuset_online(cs)) {
1615                 retval = -ENODEV;
1616                 goto out_unlock;
1617         }
1618 
1619         switch (type) {
1620         case FILE_CPU_EXCLUSIVE:
1621                 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1622                 break;
1623         case FILE_MEM_EXCLUSIVE:
1624                 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1625                 break;
1626         case FILE_MEM_HARDWALL:
1627                 retval = update_flag(CS_MEM_HARDWALL, cs, val);
1628                 break;
1629         case FILE_SCHED_LOAD_BALANCE:
1630                 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1631                 break;
1632         case FILE_MEMORY_MIGRATE:
1633                 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1634                 break;
1635         case FILE_MEMORY_PRESSURE_ENABLED:
1636                 cpuset_memory_pressure_enabled = !!val;
1637                 break;
1638         case FILE_SPREAD_PAGE:
1639                 retval = update_flag(CS_SPREAD_PAGE, cs, val);
1640                 break;
1641         case FILE_SPREAD_SLAB:
1642                 retval = update_flag(CS_SPREAD_SLAB, cs, val);
1643                 break;
1644         default:
1645                 retval = -EINVAL;
1646                 break;
1647         }
1648 out_unlock:
1649         mutex_unlock(&cpuset_mutex);
1650         return retval;
1651 }
1652 
1653 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
1654                             s64 val)
1655 {
1656         struct cpuset *cs = css_cs(css);
1657         cpuset_filetype_t type = cft->private;
1658         int retval = -ENODEV;
1659 
1660         mutex_lock(&cpuset_mutex);
1661         if (!is_cpuset_online(cs))
1662                 goto out_unlock;
1663 
1664         switch (type) {
1665         case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1666                 retval = update_relax_domain_level(cs, val);
1667                 break;
1668         default:
1669                 retval = -EINVAL;
1670                 break;
1671         }
1672 out_unlock:
1673         mutex_unlock(&cpuset_mutex);
1674         return retval;
1675 }
1676 
1677 /*
1678  * Common handling for a write to a "cpus" or "mems" file.
1679  */
1680 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1681                                     char *buf, size_t nbytes, loff_t off)
1682 {
1683         struct cpuset *cs = css_cs(of_css(of));
1684         struct cpuset *trialcs;
1685         int retval = -ENODEV;
1686 
1687         buf = strstrip(buf);
1688 
1689         /*
1690          * CPU or memory hotunplug may leave @cs w/o any execution
1691          * resources, in which case the hotplug code asynchronously updates
1692          * configuration and transfers all tasks to the nearest ancestor
1693          * which can execute.
1694          *
1695          * As writes to "cpus" or "mems" may restore @cs's execution
1696          * resources, wait for the previously scheduled operations before
1697          * proceeding, so that we don't end up keep removing tasks added
1698          * after execution capability is restored.
1699          *
1700          * cpuset_hotplug_work calls back into cgroup core via
1701          * cgroup_transfer_tasks() and waiting for it from a cgroupfs
1702          * operation like this one can lead to a deadlock through kernfs
1703          * active_ref protection.  Let's break the protection.  Losing the
1704          * protection is okay as we check whether @cs is online after
1705          * grabbing cpuset_mutex anyway.  This only happens on the legacy
1706          * hierarchies.
1707          */
1708         css_get(&cs->css);
1709         kernfs_break_active_protection(of->kn);
1710         flush_work(&cpuset_hotplug_work);
1711 
1712         mutex_lock(&cpuset_mutex);
1713         if (!is_cpuset_online(cs))
1714                 goto out_unlock;
1715 
1716         trialcs = alloc_trial_cpuset(cs);
1717         if (!trialcs) {
1718                 retval = -ENOMEM;
1719                 goto out_unlock;
1720         }
1721 
1722         switch (of_cft(of)->private) {
1723         case FILE_CPULIST:
1724                 retval = update_cpumask(cs, trialcs, buf);
1725                 break;
1726         case FILE_MEMLIST:
1727                 retval = update_nodemask(cs, trialcs, buf);
1728                 break;
1729         default:
1730                 retval = -EINVAL;
1731                 break;
1732         }
1733 
1734         free_trial_cpuset(trialcs);
1735 out_unlock:
1736         mutex_unlock(&cpuset_mutex);
1737         kernfs_unbreak_active_protection(of->kn);
1738         css_put(&cs->css);
1739         flush_workqueue(cpuset_migrate_mm_wq);
1740         return retval ?: nbytes;
1741 }
1742 
1743 /*
1744  * These ascii lists should be read in a single call, by using a user
1745  * buffer large enough to hold the entire map.  If read in smaller
1746  * chunks, there is no guarantee of atomicity.  Since the display format
1747  * used, list of ranges of sequential numbers, is variable length,
1748  * and since these maps can change value dynamically, one could read
1749  * gibberish by doing partial reads while a list was changing.
1750  */
1751 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
1752 {
1753         struct cpuset *cs = css_cs(seq_css(sf));
1754         cpuset_filetype_t type = seq_cft(sf)->private;
1755         int ret = 0;
1756 
1757         spin_lock_irq(&callback_lock);
1758 
1759         switch (type) {
1760         case FILE_CPULIST:
1761                 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
1762                 break;
1763         case FILE_MEMLIST:
1764                 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
1765                 break;
1766         case FILE_EFFECTIVE_CPULIST:
1767                 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
1768                 break;
1769         case FILE_EFFECTIVE_MEMLIST:
1770                 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
1771                 break;
1772         default:
1773                 ret = -EINVAL;
1774         }
1775 
1776         spin_unlock_irq(&callback_lock);
1777         return ret;
1778 }
1779 
1780 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
1781 {
1782         struct cpuset *cs = css_cs(css);
1783         cpuset_filetype_t type = cft->private;
1784         switch (type) {
1785         case FILE_CPU_EXCLUSIVE:
1786                 return is_cpu_exclusive(cs);
1787         case FILE_MEM_EXCLUSIVE:
1788                 return is_mem_exclusive(cs);
1789         case FILE_MEM_HARDWALL:
1790                 return is_mem_hardwall(cs);
1791         case FILE_SCHED_LOAD_BALANCE:
1792                 return is_sched_load_balance(cs);
1793         case FILE_MEMORY_MIGRATE:
1794                 return is_memory_migrate(cs);
1795         case FILE_MEMORY_PRESSURE_ENABLED:
1796                 return cpuset_memory_pressure_enabled;
1797         case FILE_MEMORY_PRESSURE:
1798                 return fmeter_getrate(&cs->fmeter);
1799         case FILE_SPREAD_PAGE:
1800                 return is_spread_page(cs);
1801         case FILE_SPREAD_SLAB:
1802                 return is_spread_slab(cs);
1803         default:
1804                 BUG();
1805         }
1806 
1807         /* Unreachable but makes gcc happy */
1808         return 0;
1809 }
1810 
1811 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
1812 {
1813         struct cpuset *cs = css_cs(css);
1814         cpuset_filetype_t type = cft->private;
1815         switch (type) {
1816         case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1817                 return cs->relax_domain_level;
1818         default:
1819                 BUG();
1820         }
1821 
1822         /* Unrechable but makes gcc happy */
1823         return 0;
1824 }
1825 
1826 
1827 /*
1828  * for the common functions, 'private' gives the type of file
1829  */
1830 
1831 static struct cftype files[] = {
1832         {
1833                 .name = "cpus",
1834                 .seq_show = cpuset_common_seq_show,
1835                 .write = cpuset_write_resmask,
1836                 .max_write_len = (100U + 6 * NR_CPUS),
1837                 .private = FILE_CPULIST,
1838         },
1839 
1840         {
1841                 .name = "mems",
1842                 .seq_show = cpuset_common_seq_show,
1843                 .write = cpuset_write_resmask,
1844                 .max_write_len = (100U + 6 * MAX_NUMNODES),
1845                 .private = FILE_MEMLIST,
1846         },
1847 
1848         {
1849                 .name = "effective_cpus",
1850                 .seq_show = cpuset_common_seq_show,
1851                 .private = FILE_EFFECTIVE_CPULIST,
1852         },
1853 
1854         {
1855                 .name = "effective_mems",
1856                 .seq_show = cpuset_common_seq_show,
1857                 .private = FILE_EFFECTIVE_MEMLIST,
1858         },
1859 
1860         {
1861                 .name = "cpu_exclusive",
1862                 .read_u64 = cpuset_read_u64,
1863                 .write_u64 = cpuset_write_u64,
1864                 .private = FILE_CPU_EXCLUSIVE,
1865         },
1866 
1867         {
1868                 .name = "mem_exclusive",
1869                 .read_u64 = cpuset_read_u64,
1870                 .write_u64 = cpuset_write_u64,
1871                 .private = FILE_MEM_EXCLUSIVE,
1872         },
1873 
1874         {
1875                 .name = "mem_hardwall",
1876                 .read_u64 = cpuset_read_u64,
1877                 .write_u64 = cpuset_write_u64,
1878                 .private = FILE_MEM_HARDWALL,
1879         },
1880 
1881         {
1882                 .name = "sched_load_balance",
1883                 .read_u64 = cpuset_read_u64,
1884                 .write_u64 = cpuset_write_u64,
1885                 .private = FILE_SCHED_LOAD_BALANCE,
1886         },
1887 
1888         {
1889                 .name = "sched_relax_domain_level",
1890                 .read_s64 = cpuset_read_s64,
1891                 .write_s64 = cpuset_write_s64,
1892                 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1893         },
1894 
1895         {
1896                 .name = "memory_migrate",
1897                 .read_u64 = cpuset_read_u64,
1898                 .write_u64 = cpuset_write_u64,
1899                 .private = FILE_MEMORY_MIGRATE,
1900         },
1901 
1902         {
1903                 .name = "memory_pressure",
1904                 .read_u64 = cpuset_read_u64,
1905         },
1906 
1907         {
1908                 .name = "memory_spread_page",
1909                 .read_u64 = cpuset_read_u64,
1910                 .write_u64 = cpuset_write_u64,
1911                 .private = FILE_SPREAD_PAGE,
1912         },
1913 
1914         {
1915                 .name = "memory_spread_slab",
1916                 .read_u64 = cpuset_read_u64,
1917                 .write_u64 = cpuset_write_u64,
1918                 .private = FILE_SPREAD_SLAB,
1919         },
1920 
1921         {
1922                 .name = "memory_pressure_enabled",
1923                 .flags = CFTYPE_ONLY_ON_ROOT,
1924                 .read_u64 = cpuset_read_u64,
1925                 .write_u64 = cpuset_write_u64,
1926                 .private = FILE_MEMORY_PRESSURE_ENABLED,
1927         },
1928 
1929         { }     /* terminate */
1930 };
1931 
1932 /*
1933  *      cpuset_css_alloc - allocate a cpuset css
1934  *      cgrp:   control group that the new cpuset will be part of
1935  */
1936 
1937 static struct cgroup_subsys_state *
1938 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
1939 {
1940         struct cpuset *cs;
1941 
1942         if (!parent_css)
1943                 return &top_cpuset.css;
1944 
1945         cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1946         if (!cs)
1947                 return ERR_PTR(-ENOMEM);
1948         if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
1949                 goto free_cs;
1950         if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
1951                 goto free_cpus;
1952 
1953         set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1954         cpumask_clear(cs->cpus_allowed);
1955         nodes_clear(cs->mems_allowed);
1956         cpumask_clear(cs->effective_cpus);
1957         nodes_clear(cs->effective_mems);
1958         fmeter_init(&cs->fmeter);
1959         cs->relax_domain_level = -1;
1960 
1961         return &cs->css;
1962 
1963 free_cpus:
1964         free_cpumask_var(cs->cpus_allowed);
1965 free_cs:
1966         kfree(cs);
1967         return ERR_PTR(-ENOMEM);
1968 }
1969 
1970 static int cpuset_css_online(struct cgroup_subsys_state *css)
1971 {
1972         struct cpuset *cs = css_cs(css);
1973         struct cpuset *parent = parent_cs(cs);
1974         struct cpuset *tmp_cs;
1975         struct cgroup_subsys_state *pos_css;
1976 
1977         if (!parent)
1978                 return 0;
1979 
1980         mutex_lock(&cpuset_mutex);
1981 
1982         set_bit(CS_ONLINE, &cs->flags);
1983         if (is_spread_page(parent))
1984                 set_bit(CS_SPREAD_PAGE, &cs->flags);
1985         if (is_spread_slab(parent))
1986                 set_bit(CS_SPREAD_SLAB, &cs->flags);
1987 
1988         cpuset_inc();
1989 
1990         spin_lock_irq(&callback_lock);
1991         if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
1992                 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1993                 cs->effective_mems = parent->effective_mems;
1994         }
1995         spin_unlock_irq(&callback_lock);
1996 
1997         if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
1998                 goto out_unlock;
1999 
2000         /*
2001          * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
2002          * set.  This flag handling is implemented in cgroup core for
2003          * histrical reasons - the flag may be specified during mount.
2004          *
2005          * Currently, if any sibling cpusets have exclusive cpus or mem, we
2006          * refuse to clone the configuration - thereby refusing the task to
2007          * be entered, and as a result refusing the sys_unshare() or
2008          * clone() which initiated it.  If this becomes a problem for some
2009          * users who wish to allow that scenario, then this could be
2010          * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2011          * (and likewise for mems) to the new cgroup.
2012          */
2013         rcu_read_lock();
2014         cpuset_for_each_child(tmp_cs, pos_css, parent) {
2015                 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2016                         rcu_read_unlock();
2017                         goto out_unlock;
2018                 }
2019         }
2020         rcu_read_unlock();
2021 
2022         spin_lock_irq(&callback_lock);
2023         cs->mems_allowed = parent->mems_allowed;
2024         cs->effective_mems = parent->mems_allowed;
2025         cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2026         cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2027         spin_unlock_irq(&callback_lock);
2028 out_unlock:
2029         mutex_unlock(&cpuset_mutex);
2030         return 0;
2031 }
2032 
2033 /*
2034  * If the cpuset being removed has its flag 'sched_load_balance'
2035  * enabled, then simulate turning sched_load_balance off, which
2036  * will call rebuild_sched_domains_locked().
2037  */
2038 
2039 static void cpuset_css_offline(struct cgroup_subsys_state *css)
2040 {
2041         struct cpuset *cs = css_cs(css);
2042 
2043         mutex_lock(&cpuset_mutex);
2044 
2045         if (is_sched_load_balance(cs))
2046                 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2047 
2048         cpuset_dec();
2049         clear_bit(CS_ONLINE, &cs->flags);
2050 
2051         mutex_unlock(&cpuset_mutex);
2052 }
2053 
2054 static void cpuset_css_free(struct cgroup_subsys_state *css)
2055 {
2056         struct cpuset *cs = css_cs(css);
2057 
2058         free_cpumask_var(cs->effective_cpus);
2059         free_cpumask_var(cs->cpus_allowed);
2060         kfree(cs);
2061 }
2062 
2063 static void cpuset_bind(struct cgroup_subsys_state *root_css)
2064 {
2065         mutex_lock(&cpuset_mutex);
2066         spin_lock_irq(&callback_lock);
2067 
2068         if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
2069                 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
2070                 top_cpuset.mems_allowed = node_possible_map;
2071         } else {
2072                 cpumask_copy(top_cpuset.cpus_allowed,
2073                              top_cpuset.effective_cpus);
2074                 top_cpuset.mems_allowed = top_cpuset.effective_mems;
2075         }
2076 
2077         spin_unlock_irq(&callback_lock);
2078         mutex_unlock(&cpuset_mutex);
2079 }
2080 
2081 struct cgroup_subsys cpuset_cgrp_subsys = {
2082         .css_alloc      = cpuset_css_alloc,
2083         .css_online     = cpuset_css_online,
2084         .css_offline    = cpuset_css_offline,
2085         .css_free       = cpuset_css_free,
2086         .can_attach     = cpuset_can_attach,
2087         .cancel_attach  = cpuset_cancel_attach,
2088         .attach         = cpuset_attach,
2089         .post_attach    = cpuset_post_attach,
2090         .bind           = cpuset_bind,
2091         .legacy_cftypes = files,
2092         .early_init     = true,
2093 };
2094 
2095 /**
2096  * cpuset_init - initialize cpusets at system boot
2097  *
2098  * Description: Initialize top_cpuset and the cpuset internal file system,
2099  **/
2100 
2101 int __init cpuset_init(void)
2102 {
2103         int err = 0;
2104 
2105         if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
2106                 BUG();
2107         if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
2108                 BUG();
2109 
2110         cpumask_setall(top_cpuset.cpus_allowed);
2111         nodes_setall(top_cpuset.mems_allowed);
2112         cpumask_setall(top_cpuset.effective_cpus);
2113         nodes_setall(top_cpuset.effective_mems);
2114 
2115         fmeter_init(&top_cpuset.fmeter);
2116         set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
2117         top_cpuset.relax_domain_level = -1;
2118 
2119         err = register_filesystem(&cpuset_fs_type);
2120         if (err < 0)
2121                 return err;
2122 
2123         if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
2124                 BUG();
2125 
2126         return 0;
2127 }
2128 
2129 /*
2130  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2131  * or memory nodes, we need to walk over the cpuset hierarchy,
2132  * removing that CPU or node from all cpusets.  If this removes the
2133  * last CPU or node from a cpuset, then move the tasks in the empty
2134  * cpuset to its next-highest non-empty parent.
2135  */
2136 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2137 {
2138         struct cpuset *parent;
2139 
2140         /*
2141          * Find its next-highest non-empty parent, (top cpuset
2142          * has online cpus, so can't be empty).
2143          */
2144         parent = parent_cs(cs);
2145         while (cpumask_empty(parent->cpus_allowed) ||
2146                         nodes_empty(parent->mems_allowed))
2147                 parent = parent_cs(parent);
2148 
2149         if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2150                 pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
2151                 pr_cont_cgroup_name(cs->css.cgroup);
2152                 pr_cont("\n");
2153         }
2154 }
2155 
2156 static void
2157 hotplug_update_tasks_legacy(struct cpuset *cs,
2158                             struct cpumask *new_cpus, nodemask_t *new_mems,
2159                             bool cpus_updated, bool mems_updated)
2160 {
2161         bool is_empty;
2162 
2163         spin_lock_irq(&callback_lock);
2164         cpumask_copy(cs->cpus_allowed, new_cpus);
2165         cpumask_copy(cs->effective_cpus, new_cpus);
2166         cs->mems_allowed = *new_mems;
2167         cs->effective_mems = *new_mems;
2168         spin_unlock_irq(&callback_lock);
2169 
2170         /*
2171          * Don't call update_tasks_cpumask() if the cpuset becomes empty,
2172          * as the tasks will be migratecd to an ancestor.
2173          */
2174         if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
2175                 update_tasks_cpumask(cs);
2176         if (mems_updated && !nodes_empty(cs->mems_allowed))
2177                 update_tasks_nodemask(cs);
2178 
2179         is_empty = cpumask_empty(cs->cpus_allowed) ||
2180                    nodes_empty(cs->mems_allowed);
2181 
2182         mutex_unlock(&cpuset_mutex);
2183 
2184         /*
2185          * Move tasks to the nearest ancestor with execution resources,
2186          * This is full cgroup operation which will also call back into
2187          * cpuset. Should be done outside any lock.
2188          */
2189         if (is_empty)
2190                 remove_tasks_in_empty_cpuset(cs);
2191 
2192         mutex_lock(&cpuset_mutex);
2193 }
2194 
2195 static void
2196 hotplug_update_tasks(struct cpuset *cs,
2197                      struct cpumask *new_cpus, nodemask_t *new_mems,
2198                      bool cpus_updated, bool mems_updated)
2199 {
2200         if (cpumask_empty(new_cpus))
2201                 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
2202         if (nodes_empty(*new_mems))
2203                 *new_mems = parent_cs(cs)->effective_mems;
2204 
2205         spin_lock_irq(&callback_lock);
2206         cpumask_copy(cs->effective_cpus, new_cpus);
2207         cs->effective_mems = *new_mems;
2208         spin_unlock_irq(&callback_lock);
2209 
2210         if (cpus_updated)
2211                 update_tasks_cpumask(cs);
2212         if (mems_updated)
2213                 update_tasks_nodemask(cs);
2214 }
2215 
2216 /**
2217  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
2218  * @cs: cpuset in interest
2219  *
2220  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2221  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
2222  * all its tasks are moved to the nearest ancestor with both resources.
2223  */
2224 static void cpuset_hotplug_update_tasks(struct cpuset *cs)
2225 {
2226         static cpumask_t new_cpus;
2227         static nodemask_t new_mems;
2228         bool cpus_updated;
2229         bool mems_updated;
2230 retry:
2231         wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
2232 
2233         mutex_lock(&cpuset_mutex);
2234 
2235         /*
2236          * We have raced with task attaching. We wait until attaching
2237          * is finished, so we won't attach a task to an empty cpuset.
2238          */
2239         if (cs->attach_in_progress) {
2240                 mutex_unlock(&cpuset_mutex);
2241                 goto retry;
2242         }
2243 
2244         cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
2245         nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
2246 
2247         cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
2248         mems_updated = !nodes_equal(new_mems, cs->effective_mems);
2249 
2250         if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
2251                 hotplug_update_tasks(cs, &new_cpus, &new_mems,
2252                                      cpus_updated, mems_updated);
2253         else
2254                 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
2255                                             cpus_updated, mems_updated);
2256 
2257         mutex_unlock(&cpuset_mutex);
2258 }
2259 
2260 /**
2261  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2262  *
2263  * This function is called after either CPU or memory configuration has
2264  * changed and updates cpuset accordingly.  The top_cpuset is always
2265  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2266  * order to make cpusets transparent (of no affect) on systems that are
2267  * actively using CPU hotplug but making no active use of cpusets.
2268  *
2269  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
2270  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
2271  * all descendants.
2272  *
2273  * Note that CPU offlining during suspend is ignored.  We don't modify
2274  * cpusets across suspend/resume cycles at all.
2275  */
2276 static void cpuset_hotplug_workfn(struct work_struct *work)
2277 {
2278         static cpumask_t new_cpus;
2279         static nodemask_t new_mems;
2280         bool cpus_updated, mems_updated;
2281         bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
2282 
2283         mutex_lock(&cpuset_mutex);
2284 
2285         /* fetch the available cpus/mems and find out which changed how */
2286         cpumask_copy(&new_cpus, cpu_active_mask);
2287         new_mems = node_states[N_MEMORY];
2288 
2289         cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
2290         mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
2291 
2292         /* synchronize cpus_allowed to cpu_active_mask */
2293         if (cpus_updated) {
2294                 spin_lock_irq(&callback_lock);
2295                 if (!on_dfl)
2296                         cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2297                 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
2298                 spin_unlock_irq(&callback_lock);
2299                 /* we don't mess with cpumasks of tasks in top_cpuset */
2300         }
2301 
2302         /* synchronize mems_allowed to N_MEMORY */
2303         if (mems_updated) {
2304                 spin_lock_irq(&callback_lock);
2305                 if (!on_dfl)
2306                         top_cpuset.mems_allowed = new_mems;
2307                 top_cpuset.effective_mems = new_mems;
2308                 spin_unlock_irq(&callback_lock);
2309                 update_tasks_nodemask(&top_cpuset);
2310         }
2311 
2312         mutex_unlock(&cpuset_mutex);
2313 
2314         /* if cpus or mems changed, we need to propagate to descendants */
2315         if (cpus_updated || mems_updated) {
2316                 struct cpuset *cs;
2317                 struct cgroup_subsys_state *pos_css;
2318 
2319                 rcu_read_lock();
2320                 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
2321                         if (cs == &top_cpuset || !css_tryget_online(&cs->css))
2322                                 continue;
2323                         rcu_read_unlock();
2324 
2325                         cpuset_hotplug_update_tasks(cs);
2326 
2327                         rcu_read_lock();
2328                         css_put(&cs->css);
2329                 }
2330                 rcu_read_unlock();
2331         }
2332 
2333         /* rebuild sched domains if cpus_allowed has changed */
2334         if (cpus_updated)
2335                 rebuild_sched_domains();
2336 }
2337 
2338 void cpuset_update_active_cpus(bool cpu_online)
2339 {
2340         /*
2341          * We're inside cpu hotplug critical region which usually nests
2342          * inside cgroup synchronization.  Bounce actual hotplug processing
2343          * to a work item to avoid reverse locking order.
2344          *
2345          * We still need to do partition_sched_domains() synchronously;
2346          * otherwise, the scheduler will get confused and put tasks to the
2347          * dead CPU.  Fall back to the default single domain.
2348          * cpuset_hotplug_workfn() will rebuild it as necessary.
2349          */
2350         partition_sched_domains(1, NULL, NULL);
2351         schedule_work(&cpuset_hotplug_work);
2352 }
2353 
2354 /*
2355  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2356  * Call this routine anytime after node_states[N_MEMORY] changes.
2357  * See cpuset_update_active_cpus() for CPU hotplug handling.
2358  */
2359 static int cpuset_track_online_nodes(struct notifier_block *self,
2360                                 unsigned long action, void *arg)
2361 {
2362         schedule_work(&cpuset_hotplug_work);
2363         return NOTIFY_OK;
2364 }
2365 
2366 static struct notifier_block cpuset_track_online_nodes_nb = {
2367         .notifier_call = cpuset_track_online_nodes,
2368         .priority = 10,         /* ??! */
2369 };
2370 
2371 /**
2372  * cpuset_init_smp - initialize cpus_allowed
2373  *
2374  * Description: Finish top cpuset after cpu, node maps are initialized
2375  */
2376 void __init cpuset_init_smp(void)
2377 {
2378         cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2379         top_cpuset.mems_allowed = node_states[N_MEMORY];
2380         top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
2381 
2382         cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
2383         top_cpuset.effective_mems = node_states[N_MEMORY];
2384 
2385         register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
2386 
2387         cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
2388         BUG_ON(!cpuset_migrate_mm_wq);
2389 }
2390 
2391 /**
2392  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2393  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2394  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2395  *
2396  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2397  * attached to the specified @tsk.  Guaranteed to return some non-empty
2398  * subset of cpu_online_mask, even if this means going outside the
2399  * tasks cpuset.
2400  **/
2401 
2402 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2403 {
2404         unsigned long flags;
2405 
2406         spin_lock_irqsave(&callback_lock, flags);
2407         rcu_read_lock();
2408         guarantee_online_cpus(task_cs(tsk), pmask);
2409         rcu_read_unlock();
2410         spin_unlock_irqrestore(&callback_lock, flags);
2411 }
2412 
2413 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2414 {
2415         rcu_read_lock();
2416         do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
2417         rcu_read_unlock();
2418 
2419         /*
2420          * We own tsk->cpus_allowed, nobody can change it under us.
2421          *
2422          * But we used cs && cs->cpus_allowed lockless and thus can
2423          * race with cgroup_attach_task() or update_cpumask() and get
2424          * the wrong tsk->cpus_allowed. However, both cases imply the
2425          * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2426          * which takes task_rq_lock().
2427          *
2428          * If we are called after it dropped the lock we must see all
2429          * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2430          * set any mask even if it is not right from task_cs() pov,
2431          * the pending set_cpus_allowed_ptr() will fix things.
2432          *
2433          * select_fallback_rq() will fix things ups and set cpu_possible_mask
2434          * if required.
2435          */
2436 }
2437 
2438 void __init cpuset_init_current_mems_allowed(void)
2439 {
2440         nodes_setall(current->mems_allowed);
2441 }
2442 
2443 /**
2444  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2445  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2446  *
2447  * Description: Returns the nodemask_t mems_allowed of the cpuset
2448  * attached to the specified @tsk.  Guaranteed to return some non-empty
2449  * subset of node_states[N_MEMORY], even if this means going outside the
2450  * tasks cpuset.
2451  **/
2452 
2453 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2454 {
2455         nodemask_t mask;
2456         unsigned long flags;
2457 
2458         spin_lock_irqsave(&callback_lock, flags);
2459         rcu_read_lock();
2460         guarantee_online_mems(task_cs(tsk), &mask);
2461         rcu_read_unlock();
2462         spin_unlock_irqrestore(&callback_lock, flags);
2463 
2464         return mask;
2465 }
2466 
2467 /**
2468  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2469  * @nodemask: the nodemask to be checked
2470  *
2471  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2472  */
2473 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2474 {
2475         return nodes_intersects(*nodemask, current->mems_allowed);
2476 }
2477 
2478 /*
2479  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2480  * mem_hardwall ancestor to the specified cpuset.  Call holding
2481  * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
2482  * (an unusual configuration), then returns the root cpuset.
2483  */
2484 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2485 {
2486         while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
2487                 cs = parent_cs(cs);
2488         return cs;
2489 }
2490 
2491 /**
2492  * cpuset_node_allowed - Can we allocate on a memory node?
2493  * @node: is this an allowed node?
2494  * @gfp_mask: memory allocation flags
2495  *
2496  * If we're in interrupt, yes, we can always allocate.  If @node is set in
2497  * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
2498  * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
2499  * yes.  If current has access to memory reserves due to TIF_MEMDIE, yes.
2500  * Otherwise, no.
2501  *
2502  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2503  * and do not allow allocations outside the current tasks cpuset
2504  * unless the task has been OOM killed as is marked TIF_MEMDIE.
2505  * GFP_KERNEL allocations are not so marked, so can escape to the
2506  * nearest enclosing hardwalled ancestor cpuset.
2507  *
2508  * Scanning up parent cpusets requires callback_lock.  The
2509  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2510  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2511  * current tasks mems_allowed came up empty on the first pass over
2512  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2513  * cpuset are short of memory, might require taking the callback_lock.
2514  *
2515  * The first call here from mm/page_alloc:get_page_from_freelist()
2516  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2517  * so no allocation on a node outside the cpuset is allowed (unless
2518  * in interrupt, of course).
2519  *
2520  * The second pass through get_page_from_freelist() doesn't even call
2521  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2522  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2523  * in alloc_flags.  That logic and the checks below have the combined
2524  * affect that:
2525  *      in_interrupt - any node ok (current task context irrelevant)
2526  *      GFP_ATOMIC   - any node ok
2527  *      TIF_MEMDIE   - any node ok
2528  *      GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2529  *      GFP_USER     - only nodes in current tasks mems allowed ok.
2530  */
2531 int __cpuset_node_allowed(int node, gfp_t gfp_mask)
2532 {
2533         struct cpuset *cs;              /* current cpuset ancestors */
2534         int allowed;                    /* is allocation in zone z allowed? */
2535         unsigned long flags;
2536 
2537         if (in_interrupt())
2538                 return 1;
2539         if (node_isset(node, current->mems_allowed))
2540                 return 1;
2541         /*
2542          * Allow tasks that have access to memory reserves because they have
2543          * been OOM killed to get memory anywhere.
2544          */
2545         if (unlikely(test_thread_flag(TIF_MEMDIE)))
2546                 return 1;
2547         if (gfp_mask & __GFP_HARDWALL)  /* If hardwall request, stop here */
2548                 return 0;
2549 
2550         if (current->flags & PF_EXITING) /* Let dying task have memory */
2551                 return 1;
2552 
2553         /* Not hardwall and node outside mems_allowed: scan up cpusets */
2554         spin_lock_irqsave(&callback_lock, flags);
2555 
2556         rcu_read_lock();
2557         cs = nearest_hardwall_ancestor(task_cs(current));
2558         allowed = node_isset(node, cs->mems_allowed);
2559         rcu_read_unlock();
2560 
2561         spin_unlock_irqrestore(&callback_lock, flags);
2562         return allowed;
2563 }
2564 
2565 /**
2566  * cpuset_mem_spread_node() - On which node to begin search for a file page
2567  * cpuset_slab_spread_node() - On which node to begin search for a slab page
2568  *
2569  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2570  * tasks in a cpuset with is_spread_page or is_spread_slab set),
2571  * and if the memory allocation used cpuset_mem_spread_node()
2572  * to determine on which node to start looking, as it will for
2573  * certain page cache or slab cache pages such as used for file
2574  * system buffers and inode caches, then instead of starting on the
2575  * local node to look for a free page, rather spread the starting
2576  * node around the tasks mems_allowed nodes.
2577  *
2578  * We don't have to worry about the returned node being offline
2579  * because "it can't happen", and even if it did, it would be ok.
2580  *
2581  * The routines calling guarantee_online_mems() are careful to
2582  * only set nodes in task->mems_allowed that are online.  So it
2583  * should not be possible for the following code to return an
2584  * offline node.  But if it did, that would be ok, as this routine
2585  * is not returning the node where the allocation must be, only
2586  * the node where the search should start.  The zonelist passed to
2587  * __alloc_pages() will include all nodes.  If the slab allocator
2588  * is passed an offline node, it will fall back to the local node.
2589  * See kmem_cache_alloc_node().
2590  */
2591 
2592 static int cpuset_spread_node(int *rotor)
2593 {
2594         int node;
2595 
2596         node = next_node(*rotor, current->mems_allowed);
2597         if (node == MAX_NUMNODES)
2598                 node = first_node(current->mems_allowed);
2599         *rotor = node;
2600         return node;
2601 }
2602 
2603 int cpuset_mem_spread_node(void)
2604 {
2605         if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2606                 current->cpuset_mem_spread_rotor =
2607                         node_random(&current->mems_allowed);
2608 
2609         return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2610 }
2611 
2612 int cpuset_slab_spread_node(void)
2613 {
2614         if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2615                 current->cpuset_slab_spread_rotor =
2616                         node_random(&current->mems_allowed);
2617 
2618         return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2619 }
2620 
2621 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2622 
2623 /**
2624  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2625  * @tsk1: pointer to task_struct of some task.
2626  * @tsk2: pointer to task_struct of some other task.
2627  *
2628  * Description: Return true if @tsk1's mems_allowed intersects the
2629  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2630  * one of the task's memory usage might impact the memory available
2631  * to the other.
2632  **/
2633 
2634 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2635                                    const struct task_struct *tsk2)
2636 {
2637         return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2638 }
2639 
2640 /**
2641  * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
2642  *
2643  * Description: Prints current's name, cpuset name, and cached copy of its
2644  * mems_allowed to the kernel log.
2645  */
2646 void cpuset_print_current_mems_allowed(void)
2647 {
2648         struct cgroup *cgrp;
2649 
2650         rcu_read_lock();
2651 
2652         cgrp = task_cs(current)->css.cgroup;
2653         pr_info("%s cpuset=", current->comm);
2654         pr_cont_cgroup_name(cgrp);
2655         pr_cont(" mems_allowed=%*pbl\n",
2656                 nodemask_pr_args(&current->mems_allowed));
2657 
2658         rcu_read_unlock();
2659 }
2660 
2661 /*
2662  * Collection of memory_pressure is suppressed unless
2663  * this flag is enabled by writing "1" to the special
2664  * cpuset file 'memory_pressure_enabled' in the root cpuset.
2665  */
2666 
2667 int cpuset_memory_pressure_enabled __read_mostly;
2668 
2669 /**
2670  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2671  *
2672  * Keep a running average of the rate of synchronous (direct)
2673  * page reclaim efforts initiated by tasks in each cpuset.
2674  *
2675  * This represents the rate at which some task in the cpuset
2676  * ran low on memory on all nodes it was allowed to use, and
2677  * had to enter the kernels page reclaim code in an effort to
2678  * create more free memory by tossing clean pages or swapping
2679  * or writing dirty pages.
2680  *
2681  * Display to user space in the per-cpuset read-only file
2682  * "memory_pressure".  Value displayed is an integer
2683  * representing the recent rate of entry into the synchronous
2684  * (direct) page reclaim by any task attached to the cpuset.
2685  **/
2686 
2687 void __cpuset_memory_pressure_bump(void)
2688 {
2689         rcu_read_lock();
2690         fmeter_markevent(&task_cs(current)->fmeter);
2691         rcu_read_unlock();
2692 }
2693 
2694 #ifdef CONFIG_PROC_PID_CPUSET
2695 /*
2696  * proc_cpuset_show()
2697  *  - Print tasks cpuset path into seq_file.
2698  *  - Used for /proc/<pid>/cpuset.
2699  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2700  *    doesn't really matter if tsk->cpuset changes after we read it,
2701  *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
2702  *    anyway.
2703  */
2704 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
2705                      struct pid *pid, struct task_struct *tsk)
2706 {
2707         char *buf, *p;
2708         struct cgroup_subsys_state *css;
2709         int retval;
2710 
2711         retval = -ENOMEM;
2712         buf = kmalloc(PATH_MAX, GFP_KERNEL);
2713         if (!buf)
2714                 goto out;
2715 
2716         retval = -ENAMETOOLONG;
2717         css = task_get_css(tsk, cpuset_cgrp_id);
2718         p = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
2719                            current->nsproxy->cgroup_ns);
2720         css_put(css);
2721         if (!p)
2722                 goto out_free;
2723         seq_puts(m, p);
2724         seq_putc(m, '\n');
2725         retval = 0;
2726 out_free:
2727         kfree(buf);
2728 out:
2729         return retval;
2730 }
2731 #endif /* CONFIG_PROC_PID_CPUSET */
2732 
2733 /* Display task mems_allowed in /proc/<pid>/status file. */
2734 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2735 {
2736         seq_printf(m, "Mems_allowed:\t%*pb\n",
2737                    nodemask_pr_args(&task->mems_allowed));
2738         seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
2739                    nodemask_pr_args(&task->mems_allowed));
2740 }
2741 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us