Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/kernel/cpuset.c

  1 /*
  2  *  kernel/cpuset.c
  3  *
  4  *  Processor and Memory placement constraints for sets of tasks.
  5  *
  6  *  Copyright (C) 2003 BULL SA.
  7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
  8  *  Copyright (C) 2006 Google, Inc
  9  *
 10  *  Portions derived from Patrick Mochel's sysfs code.
 11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 12  *
 13  *  2003-10-10 Written by Simon Derr.
 14  *  2003-10-22 Updates by Stephen Hemminger.
 15  *  2004 May-July Rework by Paul Jackson.
 16  *  2006 Rework by Paul Menage to use generic cgroups
 17  *  2008 Rework of the scheduler domains and CPU hotplug handling
 18  *       by Max Krasnyansky
 19  *
 20  *  This file is subject to the terms and conditions of the GNU General Public
 21  *  License.  See the file COPYING in the main directory of the Linux
 22  *  distribution for more details.
 23  */
 24 
 25 #include <linux/cpu.h>
 26 #include <linux/cpumask.h>
 27 #include <linux/cpuset.h>
 28 #include <linux/err.h>
 29 #include <linux/errno.h>
 30 #include <linux/file.h>
 31 #include <linux/fs.h>
 32 #include <linux/init.h>
 33 #include <linux/interrupt.h>
 34 #include <linux/kernel.h>
 35 #include <linux/kmod.h>
 36 #include <linux/list.h>
 37 #include <linux/mempolicy.h>
 38 #include <linux/mm.h>
 39 #include <linux/memory.h>
 40 #include <linux/export.h>
 41 #include <linux/mount.h>
 42 #include <linux/namei.h>
 43 #include <linux/pagemap.h>
 44 #include <linux/proc_fs.h>
 45 #include <linux/rcupdate.h>
 46 #include <linux/sched.h>
 47 #include <linux/seq_file.h>
 48 #include <linux/security.h>
 49 #include <linux/slab.h>
 50 #include <linux/spinlock.h>
 51 #include <linux/stat.h>
 52 #include <linux/string.h>
 53 #include <linux/time.h>
 54 #include <linux/backing-dev.h>
 55 #include <linux/sort.h>
 56 
 57 #include <asm/uaccess.h>
 58 #include <linux/atomic.h>
 59 #include <linux/mutex.h>
 60 #include <linux/workqueue.h>
 61 #include <linux/cgroup.h>
 62 #include <linux/wait.h>
 63 
 64 /*
 65  * Tracks how many cpusets are currently defined in system.
 66  * When there is only one cpuset (the root cpuset) we can
 67  * short circuit some hooks.
 68  */
 69 int number_of_cpusets __read_mostly;
 70 
 71 /* See "Frequency meter" comments, below. */
 72 
 73 struct fmeter {
 74         int cnt;                /* unprocessed events count */
 75         int val;                /* most recent output value */
 76         time_t time;            /* clock (secs) when val computed */
 77         spinlock_t lock;        /* guards read or write of above */
 78 };
 79 
 80 struct cpuset {
 81         struct cgroup_subsys_state css;
 82 
 83         unsigned long flags;            /* "unsigned long" so bitops work */
 84         cpumask_var_t cpus_allowed;     /* CPUs allowed to tasks in cpuset */
 85         nodemask_t mems_allowed;        /* Memory Nodes allowed to tasks */
 86 
 87         /*
 88          * This is old Memory Nodes tasks took on.
 89          *
 90          * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
 91          * - A new cpuset's old_mems_allowed is initialized when some
 92          *   task is moved into it.
 93          * - old_mems_allowed is used in cpuset_migrate_mm() when we change
 94          *   cpuset.mems_allowed and have tasks' nodemask updated, and
 95          *   then old_mems_allowed is updated to mems_allowed.
 96          */
 97         nodemask_t old_mems_allowed;
 98 
 99         struct fmeter fmeter;           /* memory_pressure filter */
100 
101         /*
102          * Tasks are being attached to this cpuset.  Used to prevent
103          * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
104          */
105         int attach_in_progress;
106 
107         /* partition number for rebuild_sched_domains() */
108         int pn;
109 
110         /* for custom sched domain */
111         int relax_domain_level;
112 };
113 
114 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
115 {
116         return css ? container_of(css, struct cpuset, css) : NULL;
117 }
118 
119 /* Retrieve the cpuset for a task */
120 static inline struct cpuset *task_cs(struct task_struct *task)
121 {
122         return css_cs(task_css(task, cpuset_cgrp_id));
123 }
124 
125 static inline struct cpuset *parent_cs(struct cpuset *cs)
126 {
127         return css_cs(css_parent(&cs->css));
128 }
129 
130 #ifdef CONFIG_NUMA
131 static inline bool task_has_mempolicy(struct task_struct *task)
132 {
133         return task->mempolicy;
134 }
135 #else
136 static inline bool task_has_mempolicy(struct task_struct *task)
137 {
138         return false;
139 }
140 #endif
141 
142 
143 /* bits in struct cpuset flags field */
144 typedef enum {
145         CS_ONLINE,
146         CS_CPU_EXCLUSIVE,
147         CS_MEM_EXCLUSIVE,
148         CS_MEM_HARDWALL,
149         CS_MEMORY_MIGRATE,
150         CS_SCHED_LOAD_BALANCE,
151         CS_SPREAD_PAGE,
152         CS_SPREAD_SLAB,
153 } cpuset_flagbits_t;
154 
155 /* convenient tests for these bits */
156 static inline bool is_cpuset_online(const struct cpuset *cs)
157 {
158         return test_bit(CS_ONLINE, &cs->flags);
159 }
160 
161 static inline int is_cpu_exclusive(const struct cpuset *cs)
162 {
163         return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
164 }
165 
166 static inline int is_mem_exclusive(const struct cpuset *cs)
167 {
168         return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
169 }
170 
171 static inline int is_mem_hardwall(const struct cpuset *cs)
172 {
173         return test_bit(CS_MEM_HARDWALL, &cs->flags);
174 }
175 
176 static inline int is_sched_load_balance(const struct cpuset *cs)
177 {
178         return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
179 }
180 
181 static inline int is_memory_migrate(const struct cpuset *cs)
182 {
183         return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
184 }
185 
186 static inline int is_spread_page(const struct cpuset *cs)
187 {
188         return test_bit(CS_SPREAD_PAGE, &cs->flags);
189 }
190 
191 static inline int is_spread_slab(const struct cpuset *cs)
192 {
193         return test_bit(CS_SPREAD_SLAB, &cs->flags);
194 }
195 
196 static struct cpuset top_cpuset = {
197         .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
198                   (1 << CS_MEM_EXCLUSIVE)),
199 };
200 
201 /**
202  * cpuset_for_each_child - traverse online children of a cpuset
203  * @child_cs: loop cursor pointing to the current child
204  * @pos_css: used for iteration
205  * @parent_cs: target cpuset to walk children of
206  *
207  * Walk @child_cs through the online children of @parent_cs.  Must be used
208  * with RCU read locked.
209  */
210 #define cpuset_for_each_child(child_cs, pos_css, parent_cs)             \
211         css_for_each_child((pos_css), &(parent_cs)->css)                \
212                 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
213 
214 /**
215  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
216  * @des_cs: loop cursor pointing to the current descendant
217  * @pos_css: used for iteration
218  * @root_cs: target cpuset to walk ancestor of
219  *
220  * Walk @des_cs through the online descendants of @root_cs.  Must be used
221  * with RCU read locked.  The caller may modify @pos_css by calling
222  * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
223  * iteration and the first node to be visited.
224  */
225 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)        \
226         css_for_each_descendant_pre((pos_css), &(root_cs)->css)         \
227                 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
228 
229 /*
230  * There are two global mutexes guarding cpuset structures - cpuset_mutex
231  * and callback_mutex.  The latter may nest inside the former.  We also
232  * require taking task_lock() when dereferencing a task's cpuset pointer.
233  * See "The task_lock() exception", at the end of this comment.
234  *
235  * A task must hold both mutexes to modify cpusets.  If a task holds
236  * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
237  * is the only task able to also acquire callback_mutex and be able to
238  * modify cpusets.  It can perform various checks on the cpuset structure
239  * first, knowing nothing will change.  It can also allocate memory while
240  * just holding cpuset_mutex.  While it is performing these checks, various
241  * callback routines can briefly acquire callback_mutex to query cpusets.
242  * Once it is ready to make the changes, it takes callback_mutex, blocking
243  * everyone else.
244  *
245  * Calls to the kernel memory allocator can not be made while holding
246  * callback_mutex, as that would risk double tripping on callback_mutex
247  * from one of the callbacks into the cpuset code from within
248  * __alloc_pages().
249  *
250  * If a task is only holding callback_mutex, then it has read-only
251  * access to cpusets.
252  *
253  * Now, the task_struct fields mems_allowed and mempolicy may be changed
254  * by other task, we use alloc_lock in the task_struct fields to protect
255  * them.
256  *
257  * The cpuset_common_file_read() handlers only hold callback_mutex across
258  * small pieces of code, such as when reading out possibly multi-word
259  * cpumasks and nodemasks.
260  *
261  * Accessing a task's cpuset should be done in accordance with the
262  * guidelines for accessing subsystem state in kernel/cgroup.c
263  */
264 
265 static DEFINE_MUTEX(cpuset_mutex);
266 static DEFINE_MUTEX(callback_mutex);
267 
268 /*
269  * CPU / memory hotplug is handled asynchronously.
270  */
271 static void cpuset_hotplug_workfn(struct work_struct *work);
272 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
273 
274 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
275 
276 /*
277  * This is ugly, but preserves the userspace API for existing cpuset
278  * users. If someone tries to mount the "cpuset" filesystem, we
279  * silently switch it to mount "cgroup" instead
280  */
281 static struct dentry *cpuset_mount(struct file_system_type *fs_type,
282                          int flags, const char *unused_dev_name, void *data)
283 {
284         struct file_system_type *cgroup_fs = get_fs_type("cgroup");
285         struct dentry *ret = ERR_PTR(-ENODEV);
286         if (cgroup_fs) {
287                 char mountopts[] =
288                         "cpuset,noprefix,"
289                         "release_agent=/sbin/cpuset_release_agent";
290                 ret = cgroup_fs->mount(cgroup_fs, flags,
291                                            unused_dev_name, mountopts);
292                 put_filesystem(cgroup_fs);
293         }
294         return ret;
295 }
296 
297 static struct file_system_type cpuset_fs_type = {
298         .name = "cpuset",
299         .mount = cpuset_mount,
300 };
301 
302 /*
303  * Return in pmask the portion of a cpusets's cpus_allowed that
304  * are online.  If none are online, walk up the cpuset hierarchy
305  * until we find one that does have some online cpus.  The top
306  * cpuset always has some cpus online.
307  *
308  * One way or another, we guarantee to return some non-empty subset
309  * of cpu_online_mask.
310  *
311  * Call with callback_mutex held.
312  */
313 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
314 {
315         while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
316                 cs = parent_cs(cs);
317         cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
318 }
319 
320 /*
321  * Return in *pmask the portion of a cpusets's mems_allowed that
322  * are online, with memory.  If none are online with memory, walk
323  * up the cpuset hierarchy until we find one that does have some
324  * online mems.  The top cpuset always has some mems online.
325  *
326  * One way or another, we guarantee to return some non-empty subset
327  * of node_states[N_MEMORY].
328  *
329  * Call with callback_mutex held.
330  */
331 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
332 {
333         while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
334                 cs = parent_cs(cs);
335         nodes_and(*pmask, cs->mems_allowed, node_states[N_MEMORY]);
336 }
337 
338 /*
339  * update task's spread flag if cpuset's page/slab spread flag is set
340  *
341  * Called with callback_mutex/cpuset_mutex held
342  */
343 static void cpuset_update_task_spread_flag(struct cpuset *cs,
344                                         struct task_struct *tsk)
345 {
346         if (is_spread_page(cs))
347                 tsk->flags |= PF_SPREAD_PAGE;
348         else
349                 tsk->flags &= ~PF_SPREAD_PAGE;
350         if (is_spread_slab(cs))
351                 tsk->flags |= PF_SPREAD_SLAB;
352         else
353                 tsk->flags &= ~PF_SPREAD_SLAB;
354 }
355 
356 /*
357  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
358  *
359  * One cpuset is a subset of another if all its allowed CPUs and
360  * Memory Nodes are a subset of the other, and its exclusive flags
361  * are only set if the other's are set.  Call holding cpuset_mutex.
362  */
363 
364 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
365 {
366         return  cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
367                 nodes_subset(p->mems_allowed, q->mems_allowed) &&
368                 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
369                 is_mem_exclusive(p) <= is_mem_exclusive(q);
370 }
371 
372 /**
373  * alloc_trial_cpuset - allocate a trial cpuset
374  * @cs: the cpuset that the trial cpuset duplicates
375  */
376 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
377 {
378         struct cpuset *trial;
379 
380         trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
381         if (!trial)
382                 return NULL;
383 
384         if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
385                 kfree(trial);
386                 return NULL;
387         }
388         cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
389 
390         return trial;
391 }
392 
393 /**
394  * free_trial_cpuset - free the trial cpuset
395  * @trial: the trial cpuset to be freed
396  */
397 static void free_trial_cpuset(struct cpuset *trial)
398 {
399         free_cpumask_var(trial->cpus_allowed);
400         kfree(trial);
401 }
402 
403 /*
404  * validate_change() - Used to validate that any proposed cpuset change
405  *                     follows the structural rules for cpusets.
406  *
407  * If we replaced the flag and mask values of the current cpuset
408  * (cur) with those values in the trial cpuset (trial), would
409  * our various subset and exclusive rules still be valid?  Presumes
410  * cpuset_mutex held.
411  *
412  * 'cur' is the address of an actual, in-use cpuset.  Operations
413  * such as list traversal that depend on the actual address of the
414  * cpuset in the list must use cur below, not trial.
415  *
416  * 'trial' is the address of bulk structure copy of cur, with
417  * perhaps one or more of the fields cpus_allowed, mems_allowed,
418  * or flags changed to new, trial values.
419  *
420  * Return 0 if valid, -errno if not.
421  */
422 
423 static int validate_change(struct cpuset *cur, struct cpuset *trial)
424 {
425         struct cgroup_subsys_state *css;
426         struct cpuset *c, *par;
427         int ret;
428 
429         rcu_read_lock();
430 
431         /* Each of our child cpusets must be a subset of us */
432         ret = -EBUSY;
433         cpuset_for_each_child(c, css, cur)
434                 if (!is_cpuset_subset(c, trial))
435                         goto out;
436 
437         /* Remaining checks don't apply to root cpuset */
438         ret = 0;
439         if (cur == &top_cpuset)
440                 goto out;
441 
442         par = parent_cs(cur);
443 
444         /* We must be a subset of our parent cpuset */
445         ret = -EACCES;
446         if (!is_cpuset_subset(trial, par))
447                 goto out;
448 
449         /*
450          * If either I or some sibling (!= me) is exclusive, we can't
451          * overlap
452          */
453         ret = -EINVAL;
454         cpuset_for_each_child(c, css, par) {
455                 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
456                     c != cur &&
457                     cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
458                         goto out;
459                 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
460                     c != cur &&
461                     nodes_intersects(trial->mems_allowed, c->mems_allowed))
462                         goto out;
463         }
464 
465         /*
466          * Cpusets with tasks - existing or newly being attached - can't
467          * be changed to have empty cpus_allowed or mems_allowed.
468          */
469         ret = -ENOSPC;
470         if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) {
471                 if (!cpumask_empty(cur->cpus_allowed) &&
472                     cpumask_empty(trial->cpus_allowed))
473                         goto out;
474                 if (!nodes_empty(cur->mems_allowed) &&
475                     nodes_empty(trial->mems_allowed))
476                         goto out;
477         }
478 
479         ret = 0;
480 out:
481         rcu_read_unlock();
482         return ret;
483 }
484 
485 #ifdef CONFIG_SMP
486 /*
487  * Helper routine for generate_sched_domains().
488  * Do cpusets a, b have overlapping cpus_allowed masks?
489  */
490 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
491 {
492         return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
493 }
494 
495 static void
496 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
497 {
498         if (dattr->relax_domain_level < c->relax_domain_level)
499                 dattr->relax_domain_level = c->relax_domain_level;
500         return;
501 }
502 
503 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
504                                     struct cpuset *root_cs)
505 {
506         struct cpuset *cp;
507         struct cgroup_subsys_state *pos_css;
508 
509         rcu_read_lock();
510         cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
511                 if (cp == root_cs)
512                         continue;
513 
514                 /* skip the whole subtree if @cp doesn't have any CPU */
515                 if (cpumask_empty(cp->cpus_allowed)) {
516                         pos_css = css_rightmost_descendant(pos_css);
517                         continue;
518                 }
519 
520                 if (is_sched_load_balance(cp))
521                         update_domain_attr(dattr, cp);
522         }
523         rcu_read_unlock();
524 }
525 
526 /*
527  * generate_sched_domains()
528  *
529  * This function builds a partial partition of the systems CPUs
530  * A 'partial partition' is a set of non-overlapping subsets whose
531  * union is a subset of that set.
532  * The output of this function needs to be passed to kernel/sched/core.c
533  * partition_sched_domains() routine, which will rebuild the scheduler's
534  * load balancing domains (sched domains) as specified by that partial
535  * partition.
536  *
537  * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
538  * for a background explanation of this.
539  *
540  * Does not return errors, on the theory that the callers of this
541  * routine would rather not worry about failures to rebuild sched
542  * domains when operating in the severe memory shortage situations
543  * that could cause allocation failures below.
544  *
545  * Must be called with cpuset_mutex held.
546  *
547  * The three key local variables below are:
548  *    q  - a linked-list queue of cpuset pointers, used to implement a
549  *         top-down scan of all cpusets.  This scan loads a pointer
550  *         to each cpuset marked is_sched_load_balance into the
551  *         array 'csa'.  For our purposes, rebuilding the schedulers
552  *         sched domains, we can ignore !is_sched_load_balance cpusets.
553  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
554  *         that need to be load balanced, for convenient iterative
555  *         access by the subsequent code that finds the best partition,
556  *         i.e the set of domains (subsets) of CPUs such that the
557  *         cpus_allowed of every cpuset marked is_sched_load_balance
558  *         is a subset of one of these domains, while there are as
559  *         many such domains as possible, each as small as possible.
560  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
561  *         the kernel/sched/core.c routine partition_sched_domains() in a
562  *         convenient format, that can be easily compared to the prior
563  *         value to determine what partition elements (sched domains)
564  *         were changed (added or removed.)
565  *
566  * Finding the best partition (set of domains):
567  *      The triple nested loops below over i, j, k scan over the
568  *      load balanced cpusets (using the array of cpuset pointers in
569  *      csa[]) looking for pairs of cpusets that have overlapping
570  *      cpus_allowed, but which don't have the same 'pn' partition
571  *      number and gives them in the same partition number.  It keeps
572  *      looping on the 'restart' label until it can no longer find
573  *      any such pairs.
574  *
575  *      The union of the cpus_allowed masks from the set of
576  *      all cpusets having the same 'pn' value then form the one
577  *      element of the partition (one sched domain) to be passed to
578  *      partition_sched_domains().
579  */
580 static int generate_sched_domains(cpumask_var_t **domains,
581                         struct sched_domain_attr **attributes)
582 {
583         struct cpuset *cp;      /* scans q */
584         struct cpuset **csa;    /* array of all cpuset ptrs */
585         int csn;                /* how many cpuset ptrs in csa so far */
586         int i, j, k;            /* indices for partition finding loops */
587         cpumask_var_t *doms;    /* resulting partition; i.e. sched domains */
588         struct sched_domain_attr *dattr;  /* attributes for custom domains */
589         int ndoms = 0;          /* number of sched domains in result */
590         int nslot;              /* next empty doms[] struct cpumask slot */
591         struct cgroup_subsys_state *pos_css;
592 
593         doms = NULL;
594         dattr = NULL;
595         csa = NULL;
596 
597         /* Special case for the 99% of systems with one, full, sched domain */
598         if (is_sched_load_balance(&top_cpuset)) {
599                 ndoms = 1;
600                 doms = alloc_sched_domains(ndoms);
601                 if (!doms)
602                         goto done;
603 
604                 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
605                 if (dattr) {
606                         *dattr = SD_ATTR_INIT;
607                         update_domain_attr_tree(dattr, &top_cpuset);
608                 }
609                 cpumask_copy(doms[0], top_cpuset.cpus_allowed);
610 
611                 goto done;
612         }
613 
614         csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
615         if (!csa)
616                 goto done;
617         csn = 0;
618 
619         rcu_read_lock();
620         cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
621                 if (cp == &top_cpuset)
622                         continue;
623                 /*
624                  * Continue traversing beyond @cp iff @cp has some CPUs and
625                  * isn't load balancing.  The former is obvious.  The
626                  * latter: All child cpusets contain a subset of the
627                  * parent's cpus, so just skip them, and then we call
628                  * update_domain_attr_tree() to calc relax_domain_level of
629                  * the corresponding sched domain.
630                  */
631                 if (!cpumask_empty(cp->cpus_allowed) &&
632                     !is_sched_load_balance(cp))
633                         continue;
634 
635                 if (is_sched_load_balance(cp))
636                         csa[csn++] = cp;
637 
638                 /* skip @cp's subtree */
639                 pos_css = css_rightmost_descendant(pos_css);
640         }
641         rcu_read_unlock();
642 
643         for (i = 0; i < csn; i++)
644                 csa[i]->pn = i;
645         ndoms = csn;
646 
647 restart:
648         /* Find the best partition (set of sched domains) */
649         for (i = 0; i < csn; i++) {
650                 struct cpuset *a = csa[i];
651                 int apn = a->pn;
652 
653                 for (j = 0; j < csn; j++) {
654                         struct cpuset *b = csa[j];
655                         int bpn = b->pn;
656 
657                         if (apn != bpn && cpusets_overlap(a, b)) {
658                                 for (k = 0; k < csn; k++) {
659                                         struct cpuset *c = csa[k];
660 
661                                         if (c->pn == bpn)
662                                                 c->pn = apn;
663                                 }
664                                 ndoms--;        /* one less element */
665                                 goto restart;
666                         }
667                 }
668         }
669 
670         /*
671          * Now we know how many domains to create.
672          * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
673          */
674         doms = alloc_sched_domains(ndoms);
675         if (!doms)
676                 goto done;
677 
678         /*
679          * The rest of the code, including the scheduler, can deal with
680          * dattr==NULL case. No need to abort if alloc fails.
681          */
682         dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
683 
684         for (nslot = 0, i = 0; i < csn; i++) {
685                 struct cpuset *a = csa[i];
686                 struct cpumask *dp;
687                 int apn = a->pn;
688 
689                 if (apn < 0) {
690                         /* Skip completed partitions */
691                         continue;
692                 }
693 
694                 dp = doms[nslot];
695 
696                 if (nslot == ndoms) {
697                         static int warnings = 10;
698                         if (warnings) {
699                                 printk(KERN_WARNING
700                                  "rebuild_sched_domains confused:"
701                                   " nslot %d, ndoms %d, csn %d, i %d,"
702                                   " apn %d\n",
703                                   nslot, ndoms, csn, i, apn);
704                                 warnings--;
705                         }
706                         continue;
707                 }
708 
709                 cpumask_clear(dp);
710                 if (dattr)
711                         *(dattr + nslot) = SD_ATTR_INIT;
712                 for (j = i; j < csn; j++) {
713                         struct cpuset *b = csa[j];
714 
715                         if (apn == b->pn) {
716                                 cpumask_or(dp, dp, b->cpus_allowed);
717                                 if (dattr)
718                                         update_domain_attr_tree(dattr + nslot, b);
719 
720                                 /* Done with this partition */
721                                 b->pn = -1;
722                         }
723                 }
724                 nslot++;
725         }
726         BUG_ON(nslot != ndoms);
727 
728 done:
729         kfree(csa);
730 
731         /*
732          * Fallback to the default domain if kmalloc() failed.
733          * See comments in partition_sched_domains().
734          */
735         if (doms == NULL)
736                 ndoms = 1;
737 
738         *domains    = doms;
739         *attributes = dattr;
740         return ndoms;
741 }
742 
743 /*
744  * Rebuild scheduler domains.
745  *
746  * If the flag 'sched_load_balance' of any cpuset with non-empty
747  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
748  * which has that flag enabled, or if any cpuset with a non-empty
749  * 'cpus' is removed, then call this routine to rebuild the
750  * scheduler's dynamic sched domains.
751  *
752  * Call with cpuset_mutex held.  Takes get_online_cpus().
753  */
754 static void rebuild_sched_domains_locked(void)
755 {
756         struct sched_domain_attr *attr;
757         cpumask_var_t *doms;
758         int ndoms;
759 
760         lockdep_assert_held(&cpuset_mutex);
761         get_online_cpus();
762 
763         /*
764          * We have raced with CPU hotplug. Don't do anything to avoid
765          * passing doms with offlined cpu to partition_sched_domains().
766          * Anyways, hotplug work item will rebuild sched domains.
767          */
768         if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
769                 goto out;
770 
771         /* Generate domain masks and attrs */
772         ndoms = generate_sched_domains(&doms, &attr);
773 
774         /* Have scheduler rebuild the domains */
775         partition_sched_domains(ndoms, doms, attr);
776 out:
777         put_online_cpus();
778 }
779 #else /* !CONFIG_SMP */
780 static void rebuild_sched_domains_locked(void)
781 {
782 }
783 #endif /* CONFIG_SMP */
784 
785 void rebuild_sched_domains(void)
786 {
787         mutex_lock(&cpuset_mutex);
788         rebuild_sched_domains_locked();
789         mutex_unlock(&cpuset_mutex);
790 }
791 
792 /*
793  * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
794  * @cs: the cpuset in interest
795  *
796  * A cpuset's effective cpumask is the cpumask of the nearest ancestor
797  * with non-empty cpus. We use effective cpumask whenever:
798  * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
799  *   if the cpuset they reside in has no cpus)
800  * - we want to retrieve task_cs(tsk)'s cpus_allowed.
801  *
802  * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
803  * exception. See comments there.
804  */
805 static struct cpuset *effective_cpumask_cpuset(struct cpuset *cs)
806 {
807         while (cpumask_empty(cs->cpus_allowed))
808                 cs = parent_cs(cs);
809         return cs;
810 }
811 
812 /*
813  * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
814  * @cs: the cpuset in interest
815  *
816  * A cpuset's effective nodemask is the nodemask of the nearest ancestor
817  * with non-empty memss. We use effective nodemask whenever:
818  * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
819  *   if the cpuset they reside in has no mems)
820  * - we want to retrieve task_cs(tsk)'s mems_allowed.
821  *
822  * Called with cpuset_mutex held.
823  */
824 static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
825 {
826         while (nodes_empty(cs->mems_allowed))
827                 cs = parent_cs(cs);
828         return cs;
829 }
830 
831 /**
832  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
833  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
834  *
835  * Iterate through each task of @cs updating its cpus_allowed to the
836  * effective cpuset's.  As this function is called with cpuset_mutex held,
837  * cpuset membership stays stable.
838  */
839 static void update_tasks_cpumask(struct cpuset *cs)
840 {
841         struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
842         struct css_task_iter it;
843         struct task_struct *task;
844 
845         css_task_iter_start(&cs->css, &it);
846         while ((task = css_task_iter_next(&it)))
847                 set_cpus_allowed_ptr(task, cpus_cs->cpus_allowed);
848         css_task_iter_end(&it);
849 }
850 
851 /*
852  * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
853  * @root_cs: the root cpuset of the hierarchy
854  * @update_root: update root cpuset or not?
855  *
856  * This will update cpumasks of tasks in @root_cs and all other empty cpusets
857  * which take on cpumask of @root_cs.
858  *
859  * Called with cpuset_mutex held
860  */
861 static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root)
862 {
863         struct cpuset *cp;
864         struct cgroup_subsys_state *pos_css;
865 
866         rcu_read_lock();
867         cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
868                 if (cp == root_cs) {
869                         if (!update_root)
870                                 continue;
871                 } else {
872                         /* skip the whole subtree if @cp have some CPU */
873                         if (!cpumask_empty(cp->cpus_allowed)) {
874                                 pos_css = css_rightmost_descendant(pos_css);
875                                 continue;
876                         }
877                 }
878                 if (!css_tryget(&cp->css))
879                         continue;
880                 rcu_read_unlock();
881 
882                 update_tasks_cpumask(cp);
883 
884                 rcu_read_lock();
885                 css_put(&cp->css);
886         }
887         rcu_read_unlock();
888 }
889 
890 /**
891  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
892  * @cs: the cpuset to consider
893  * @buf: buffer of cpu numbers written to this cpuset
894  */
895 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
896                           const char *buf)
897 {
898         int retval;
899         int is_load_balanced;
900 
901         /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
902         if (cs == &top_cpuset)
903                 return -EACCES;
904 
905         /*
906          * An empty cpus_allowed is ok only if the cpuset has no tasks.
907          * Since cpulist_parse() fails on an empty mask, we special case
908          * that parsing.  The validate_change() call ensures that cpusets
909          * with tasks have cpus.
910          */
911         if (!*buf) {
912                 cpumask_clear(trialcs->cpus_allowed);
913         } else {
914                 retval = cpulist_parse(buf, trialcs->cpus_allowed);
915                 if (retval < 0)
916                         return retval;
917 
918                 if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
919                         return -EINVAL;
920         }
921 
922         /* Nothing to do if the cpus didn't change */
923         if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
924                 return 0;
925 
926         retval = validate_change(cs, trialcs);
927         if (retval < 0)
928                 return retval;
929 
930         is_load_balanced = is_sched_load_balance(trialcs);
931 
932         mutex_lock(&callback_mutex);
933         cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
934         mutex_unlock(&callback_mutex);
935 
936         update_tasks_cpumask_hier(cs, true);
937 
938         if (is_load_balanced)
939                 rebuild_sched_domains_locked();
940         return 0;
941 }
942 
943 /*
944  * cpuset_migrate_mm
945  *
946  *    Migrate memory region from one set of nodes to another.
947  *
948  *    Temporarilly set tasks mems_allowed to target nodes of migration,
949  *    so that the migration code can allocate pages on these nodes.
950  *
951  *    While the mm_struct we are migrating is typically from some
952  *    other task, the task_struct mems_allowed that we are hacking
953  *    is for our current task, which must allocate new pages for that
954  *    migrating memory region.
955  */
956 
957 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
958                                                         const nodemask_t *to)
959 {
960         struct task_struct *tsk = current;
961         struct cpuset *mems_cs;
962 
963         tsk->mems_allowed = *to;
964 
965         do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
966 
967         rcu_read_lock();
968         mems_cs = effective_nodemask_cpuset(task_cs(tsk));
969         guarantee_online_mems(mems_cs, &tsk->mems_allowed);
970         rcu_read_unlock();
971 }
972 
973 /*
974  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
975  * @tsk: the task to change
976  * @newmems: new nodes that the task will be set
977  *
978  * In order to avoid seeing no nodes if the old and new nodes are disjoint,
979  * we structure updates as setting all new allowed nodes, then clearing newly
980  * disallowed ones.
981  */
982 static void cpuset_change_task_nodemask(struct task_struct *tsk,
983                                         nodemask_t *newmems)
984 {
985         bool need_loop;
986 
987         /*
988          * Allow tasks that have access to memory reserves because they have
989          * been OOM killed to get memory anywhere.
990          */
991         if (unlikely(test_thread_flag(TIF_MEMDIE)))
992                 return;
993         if (current->flags & PF_EXITING) /* Let dying task have memory */
994                 return;
995 
996         task_lock(tsk);
997         /*
998          * Determine if a loop is necessary if another thread is doing
999          * read_mems_allowed_begin().  If at least one node remains unchanged and
1000          * tsk does not have a mempolicy, then an empty nodemask will not be
1001          * possible when mems_allowed is larger than a word.
1002          */
1003         need_loop = task_has_mempolicy(tsk) ||
1004                         !nodes_intersects(*newmems, tsk->mems_allowed);
1005 
1006         if (need_loop) {
1007                 local_irq_disable();
1008                 write_seqcount_begin(&tsk->mems_allowed_seq);
1009         }
1010 
1011         nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1012         mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1013 
1014         mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1015         tsk->mems_allowed = *newmems;
1016 
1017         if (need_loop) {
1018                 write_seqcount_end(&tsk->mems_allowed_seq);
1019                 local_irq_enable();
1020         }
1021 
1022         task_unlock(tsk);
1023 }
1024 
1025 static void *cpuset_being_rebound;
1026 
1027 /**
1028  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1029  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1030  *
1031  * Iterate through each task of @cs updating its mems_allowed to the
1032  * effective cpuset's.  As this function is called with cpuset_mutex held,
1033  * cpuset membership stays stable.
1034  */
1035 static void update_tasks_nodemask(struct cpuset *cs)
1036 {
1037         static nodemask_t newmems;      /* protected by cpuset_mutex */
1038         struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
1039         struct css_task_iter it;
1040         struct task_struct *task;
1041 
1042         cpuset_being_rebound = cs;              /* causes mpol_dup() rebind */
1043 
1044         guarantee_online_mems(mems_cs, &newmems);
1045 
1046         /*
1047          * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1048          * take while holding tasklist_lock.  Forks can happen - the
1049          * mpol_dup() cpuset_being_rebound check will catch such forks,
1050          * and rebind their vma mempolicies too.  Because we still hold
1051          * the global cpuset_mutex, we know that no other rebind effort
1052          * will be contending for the global variable cpuset_being_rebound.
1053          * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1054          * is idempotent.  Also migrate pages in each mm to new nodes.
1055          */
1056         css_task_iter_start(&cs->css, &it);
1057         while ((task = css_task_iter_next(&it))) {
1058                 struct mm_struct *mm;
1059                 bool migrate;
1060 
1061                 cpuset_change_task_nodemask(task, &newmems);
1062 
1063                 mm = get_task_mm(task);
1064                 if (!mm)
1065                         continue;
1066 
1067                 migrate = is_memory_migrate(cs);
1068 
1069                 mpol_rebind_mm(mm, &cs->mems_allowed);
1070                 if (migrate)
1071                         cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1072                 mmput(mm);
1073         }
1074         css_task_iter_end(&it);
1075 
1076         /*
1077          * All the tasks' nodemasks have been updated, update
1078          * cs->old_mems_allowed.
1079          */
1080         cs->old_mems_allowed = newmems;
1081 
1082         /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1083         cpuset_being_rebound = NULL;
1084 }
1085 
1086 /*
1087  * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
1088  * @cs: the root cpuset of the hierarchy
1089  * @update_root: update the root cpuset or not?
1090  *
1091  * This will update nodemasks of tasks in @root_cs and all other empty cpusets
1092  * which take on nodemask of @root_cs.
1093  *
1094  * Called with cpuset_mutex held
1095  */
1096 static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root)
1097 {
1098         struct cpuset *cp;
1099         struct cgroup_subsys_state *pos_css;
1100 
1101         rcu_read_lock();
1102         cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
1103                 if (cp == root_cs) {
1104                         if (!update_root)
1105                                 continue;
1106                 } else {
1107                         /* skip the whole subtree if @cp have some CPU */
1108                         if (!nodes_empty(cp->mems_allowed)) {
1109                                 pos_css = css_rightmost_descendant(pos_css);
1110                                 continue;
1111                         }
1112                 }
1113                 if (!css_tryget(&cp->css))
1114                         continue;
1115                 rcu_read_unlock();
1116 
1117                 update_tasks_nodemask(cp);
1118 
1119                 rcu_read_lock();
1120                 css_put(&cp->css);
1121         }
1122         rcu_read_unlock();
1123 }
1124 
1125 /*
1126  * Handle user request to change the 'mems' memory placement
1127  * of a cpuset.  Needs to validate the request, update the
1128  * cpusets mems_allowed, and for each task in the cpuset,
1129  * update mems_allowed and rebind task's mempolicy and any vma
1130  * mempolicies and if the cpuset is marked 'memory_migrate',
1131  * migrate the tasks pages to the new memory.
1132  *
1133  * Call with cpuset_mutex held.  May take callback_mutex during call.
1134  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1135  * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1136  * their mempolicies to the cpusets new mems_allowed.
1137  */
1138 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1139                            const char *buf)
1140 {
1141         int retval;
1142 
1143         /*
1144          * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1145          * it's read-only
1146          */
1147         if (cs == &top_cpuset) {
1148                 retval = -EACCES;
1149                 goto done;
1150         }
1151 
1152         /*
1153          * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1154          * Since nodelist_parse() fails on an empty mask, we special case
1155          * that parsing.  The validate_change() call ensures that cpusets
1156          * with tasks have memory.
1157          */
1158         if (!*buf) {
1159                 nodes_clear(trialcs->mems_allowed);
1160         } else {
1161                 retval = nodelist_parse(buf, trialcs->mems_allowed);
1162                 if (retval < 0)
1163                         goto done;
1164 
1165                 if (!nodes_subset(trialcs->mems_allowed,
1166                                 node_states[N_MEMORY])) {
1167                         retval =  -EINVAL;
1168                         goto done;
1169                 }
1170         }
1171 
1172         if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1173                 retval = 0;             /* Too easy - nothing to do */
1174                 goto done;
1175         }
1176         retval = validate_change(cs, trialcs);
1177         if (retval < 0)
1178                 goto done;
1179 
1180         mutex_lock(&callback_mutex);
1181         cs->mems_allowed = trialcs->mems_allowed;
1182         mutex_unlock(&callback_mutex);
1183 
1184         update_tasks_nodemask_hier(cs, true);
1185 done:
1186         return retval;
1187 }
1188 
1189 int current_cpuset_is_being_rebound(void)
1190 {
1191         return task_cs(current) == cpuset_being_rebound;
1192 }
1193 
1194 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1195 {
1196 #ifdef CONFIG_SMP
1197         if (val < -1 || val >= sched_domain_level_max)
1198                 return -EINVAL;
1199 #endif
1200 
1201         if (val != cs->relax_domain_level) {
1202                 cs->relax_domain_level = val;
1203                 if (!cpumask_empty(cs->cpus_allowed) &&
1204                     is_sched_load_balance(cs))
1205                         rebuild_sched_domains_locked();
1206         }
1207 
1208         return 0;
1209 }
1210 
1211 /**
1212  * update_tasks_flags - update the spread flags of tasks in the cpuset.
1213  * @cs: the cpuset in which each task's spread flags needs to be changed
1214  *
1215  * Iterate through each task of @cs updating its spread flags.  As this
1216  * function is called with cpuset_mutex held, cpuset membership stays
1217  * stable.
1218  */
1219 static void update_tasks_flags(struct cpuset *cs)
1220 {
1221         struct css_task_iter it;
1222         struct task_struct *task;
1223 
1224         css_task_iter_start(&cs->css, &it);
1225         while ((task = css_task_iter_next(&it)))
1226                 cpuset_update_task_spread_flag(cs, task);
1227         css_task_iter_end(&it);
1228 }
1229 
1230 /*
1231  * update_flag - read a 0 or a 1 in a file and update associated flag
1232  * bit:         the bit to update (see cpuset_flagbits_t)
1233  * cs:          the cpuset to update
1234  * turning_on:  whether the flag is being set or cleared
1235  *
1236  * Call with cpuset_mutex held.
1237  */
1238 
1239 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1240                        int turning_on)
1241 {
1242         struct cpuset *trialcs;
1243         int balance_flag_changed;
1244         int spread_flag_changed;
1245         int err;
1246 
1247         trialcs = alloc_trial_cpuset(cs);
1248         if (!trialcs)
1249                 return -ENOMEM;
1250 
1251         if (turning_on)
1252                 set_bit(bit, &trialcs->flags);
1253         else
1254                 clear_bit(bit, &trialcs->flags);
1255 
1256         err = validate_change(cs, trialcs);
1257         if (err < 0)
1258                 goto out;
1259 
1260         balance_flag_changed = (is_sched_load_balance(cs) !=
1261                                 is_sched_load_balance(trialcs));
1262 
1263         spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1264                         || (is_spread_page(cs) != is_spread_page(trialcs)));
1265 
1266         mutex_lock(&callback_mutex);
1267         cs->flags = trialcs->flags;
1268         mutex_unlock(&callback_mutex);
1269 
1270         if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1271                 rebuild_sched_domains_locked();
1272 
1273         if (spread_flag_changed)
1274                 update_tasks_flags(cs);
1275 out:
1276         free_trial_cpuset(trialcs);
1277         return err;
1278 }
1279 
1280 /*
1281  * Frequency meter - How fast is some event occurring?
1282  *
1283  * These routines manage a digitally filtered, constant time based,
1284  * event frequency meter.  There are four routines:
1285  *   fmeter_init() - initialize a frequency meter.
1286  *   fmeter_markevent() - called each time the event happens.
1287  *   fmeter_getrate() - returns the recent rate of such events.
1288  *   fmeter_update() - internal routine used to update fmeter.
1289  *
1290  * A common data structure is passed to each of these routines,
1291  * which is used to keep track of the state required to manage the
1292  * frequency meter and its digital filter.
1293  *
1294  * The filter works on the number of events marked per unit time.
1295  * The filter is single-pole low-pass recursive (IIR).  The time unit
1296  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1297  * simulate 3 decimal digits of precision (multiplied by 1000).
1298  *
1299  * With an FM_COEF of 933, and a time base of 1 second, the filter
1300  * has a half-life of 10 seconds, meaning that if the events quit
1301  * happening, then the rate returned from the fmeter_getrate()
1302  * will be cut in half each 10 seconds, until it converges to zero.
1303  *
1304  * It is not worth doing a real infinitely recursive filter.  If more
1305  * than FM_MAXTICKS ticks have elapsed since the last filter event,
1306  * just compute FM_MAXTICKS ticks worth, by which point the level
1307  * will be stable.
1308  *
1309  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1310  * arithmetic overflow in the fmeter_update() routine.
1311  *
1312  * Given the simple 32 bit integer arithmetic used, this meter works
1313  * best for reporting rates between one per millisecond (msec) and
1314  * one per 32 (approx) seconds.  At constant rates faster than one
1315  * per msec it maxes out at values just under 1,000,000.  At constant
1316  * rates between one per msec, and one per second it will stabilize
1317  * to a value N*1000, where N is the rate of events per second.
1318  * At constant rates between one per second and one per 32 seconds,
1319  * it will be choppy, moving up on the seconds that have an event,
1320  * and then decaying until the next event.  At rates slower than
1321  * about one in 32 seconds, it decays all the way back to zero between
1322  * each event.
1323  */
1324 
1325 #define FM_COEF 933             /* coefficient for half-life of 10 secs */
1326 #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1327 #define FM_MAXCNT 1000000       /* limit cnt to avoid overflow */
1328 #define FM_SCALE 1000           /* faux fixed point scale */
1329 
1330 /* Initialize a frequency meter */
1331 static void fmeter_init(struct fmeter *fmp)
1332 {
1333         fmp->cnt = 0;
1334         fmp->val = 0;
1335         fmp->time = 0;
1336         spin_lock_init(&fmp->lock);
1337 }
1338 
1339 /* Internal meter update - process cnt events and update value */
1340 static void fmeter_update(struct fmeter *fmp)
1341 {
1342         time_t now = get_seconds();
1343         time_t ticks = now - fmp->time;
1344 
1345         if (ticks == 0)
1346                 return;
1347 
1348         ticks = min(FM_MAXTICKS, ticks);
1349         while (ticks-- > 0)
1350                 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1351         fmp->time = now;
1352 
1353         fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1354         fmp->cnt = 0;
1355 }
1356 
1357 /* Process any previous ticks, then bump cnt by one (times scale). */
1358 static void fmeter_markevent(struct fmeter *fmp)
1359 {
1360         spin_lock(&fmp->lock);
1361         fmeter_update(fmp);
1362         fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1363         spin_unlock(&fmp->lock);
1364 }
1365 
1366 /* Process any previous ticks, then return current value. */
1367 static int fmeter_getrate(struct fmeter *fmp)
1368 {
1369         int val;
1370 
1371         spin_lock(&fmp->lock);
1372         fmeter_update(fmp);
1373         val = fmp->val;
1374         spin_unlock(&fmp->lock);
1375         return val;
1376 }
1377 
1378 static struct cpuset *cpuset_attach_old_cs;
1379 
1380 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1381 static int cpuset_can_attach(struct cgroup_subsys_state *css,
1382                              struct cgroup_taskset *tset)
1383 {
1384         struct cpuset *cs = css_cs(css);
1385         struct task_struct *task;
1386         int ret;
1387 
1388         /* used later by cpuset_attach() */
1389         cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset));
1390 
1391         mutex_lock(&cpuset_mutex);
1392 
1393         /*
1394          * We allow to move tasks into an empty cpuset if sane_behavior
1395          * flag is set.
1396          */
1397         ret = -ENOSPC;
1398         if (!cgroup_sane_behavior(css->cgroup) &&
1399             (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1400                 goto out_unlock;
1401 
1402         cgroup_taskset_for_each(task, tset) {
1403                 /*
1404                  * Kthreads which disallow setaffinity shouldn't be moved
1405                  * to a new cpuset; we don't want to change their cpu
1406                  * affinity and isolating such threads by their set of
1407                  * allowed nodes is unnecessary.  Thus, cpusets are not
1408                  * applicable for such threads.  This prevents checking for
1409                  * success of set_cpus_allowed_ptr() on all attached tasks
1410                  * before cpus_allowed may be changed.
1411                  */
1412                 ret = -EINVAL;
1413                 if (task->flags & PF_NO_SETAFFINITY)
1414                         goto out_unlock;
1415                 ret = security_task_setscheduler(task);
1416                 if (ret)
1417                         goto out_unlock;
1418         }
1419 
1420         /*
1421          * Mark attach is in progress.  This makes validate_change() fail
1422          * changes which zero cpus/mems_allowed.
1423          */
1424         cs->attach_in_progress++;
1425         ret = 0;
1426 out_unlock:
1427         mutex_unlock(&cpuset_mutex);
1428         return ret;
1429 }
1430 
1431 static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
1432                                  struct cgroup_taskset *tset)
1433 {
1434         mutex_lock(&cpuset_mutex);
1435         css_cs(css)->attach_in_progress--;
1436         mutex_unlock(&cpuset_mutex);
1437 }
1438 
1439 /*
1440  * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
1441  * but we can't allocate it dynamically there.  Define it global and
1442  * allocate from cpuset_init().
1443  */
1444 static cpumask_var_t cpus_attach;
1445 
1446 static void cpuset_attach(struct cgroup_subsys_state *css,
1447                           struct cgroup_taskset *tset)
1448 {
1449         /* static buf protected by cpuset_mutex */
1450         static nodemask_t cpuset_attach_nodemask_to;
1451         struct mm_struct *mm;
1452         struct task_struct *task;
1453         struct task_struct *leader = cgroup_taskset_first(tset);
1454         struct cpuset *cs = css_cs(css);
1455         struct cpuset *oldcs = cpuset_attach_old_cs;
1456         struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
1457         struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
1458 
1459         mutex_lock(&cpuset_mutex);
1460 
1461         /* prepare for attach */
1462         if (cs == &top_cpuset)
1463                 cpumask_copy(cpus_attach, cpu_possible_mask);
1464         else
1465                 guarantee_online_cpus(cpus_cs, cpus_attach);
1466 
1467         guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
1468 
1469         cgroup_taskset_for_each(task, tset) {
1470                 /*
1471                  * can_attach beforehand should guarantee that this doesn't
1472                  * fail.  TODO: have a better way to handle failure here
1473                  */
1474                 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1475 
1476                 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1477                 cpuset_update_task_spread_flag(cs, task);
1478         }
1479 
1480         /*
1481          * Change mm, possibly for multiple threads in a threadgroup. This is
1482          * expensive and may sleep.
1483          */
1484         cpuset_attach_nodemask_to = cs->mems_allowed;
1485         mm = get_task_mm(leader);
1486         if (mm) {
1487                 struct cpuset *mems_oldcs = effective_nodemask_cpuset(oldcs);
1488 
1489                 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1490 
1491                 /*
1492                  * old_mems_allowed is the same with mems_allowed here, except
1493                  * if this task is being moved automatically due to hotplug.
1494                  * In that case @mems_allowed has been updated and is empty,
1495                  * so @old_mems_allowed is the right nodesets that we migrate
1496                  * mm from.
1497                  */
1498                 if (is_memory_migrate(cs)) {
1499                         cpuset_migrate_mm(mm, &mems_oldcs->old_mems_allowed,
1500                                           &cpuset_attach_nodemask_to);
1501                 }
1502                 mmput(mm);
1503         }
1504 
1505         cs->old_mems_allowed = cpuset_attach_nodemask_to;
1506 
1507         cs->attach_in_progress--;
1508         if (!cs->attach_in_progress)
1509                 wake_up(&cpuset_attach_wq);
1510 
1511         mutex_unlock(&cpuset_mutex);
1512 }
1513 
1514 /* The various types of files and directories in a cpuset file system */
1515 
1516 typedef enum {
1517         FILE_MEMORY_MIGRATE,
1518         FILE_CPULIST,
1519         FILE_MEMLIST,
1520         FILE_CPU_EXCLUSIVE,
1521         FILE_MEM_EXCLUSIVE,
1522         FILE_MEM_HARDWALL,
1523         FILE_SCHED_LOAD_BALANCE,
1524         FILE_SCHED_RELAX_DOMAIN_LEVEL,
1525         FILE_MEMORY_PRESSURE_ENABLED,
1526         FILE_MEMORY_PRESSURE,
1527         FILE_SPREAD_PAGE,
1528         FILE_SPREAD_SLAB,
1529 } cpuset_filetype_t;
1530 
1531 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
1532                             u64 val)
1533 {
1534         struct cpuset *cs = css_cs(css);
1535         cpuset_filetype_t type = cft->private;
1536         int retval = 0;
1537 
1538         mutex_lock(&cpuset_mutex);
1539         if (!is_cpuset_online(cs)) {
1540                 retval = -ENODEV;
1541                 goto out_unlock;
1542         }
1543 
1544         switch (type) {
1545         case FILE_CPU_EXCLUSIVE:
1546                 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1547                 break;
1548         case FILE_MEM_EXCLUSIVE:
1549                 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1550                 break;
1551         case FILE_MEM_HARDWALL:
1552                 retval = update_flag(CS_MEM_HARDWALL, cs, val);
1553                 break;
1554         case FILE_SCHED_LOAD_BALANCE:
1555                 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1556                 break;
1557         case FILE_MEMORY_MIGRATE:
1558                 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1559                 break;
1560         case FILE_MEMORY_PRESSURE_ENABLED:
1561                 cpuset_memory_pressure_enabled = !!val;
1562                 break;
1563         case FILE_MEMORY_PRESSURE:
1564                 retval = -EACCES;
1565                 break;
1566         case FILE_SPREAD_PAGE:
1567                 retval = update_flag(CS_SPREAD_PAGE, cs, val);
1568                 break;
1569         case FILE_SPREAD_SLAB:
1570                 retval = update_flag(CS_SPREAD_SLAB, cs, val);
1571                 break;
1572         default:
1573                 retval = -EINVAL;
1574                 break;
1575         }
1576 out_unlock:
1577         mutex_unlock(&cpuset_mutex);
1578         return retval;
1579 }
1580 
1581 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
1582                             s64 val)
1583 {
1584         struct cpuset *cs = css_cs(css);
1585         cpuset_filetype_t type = cft->private;
1586         int retval = -ENODEV;
1587 
1588         mutex_lock(&cpuset_mutex);
1589         if (!is_cpuset_online(cs))
1590                 goto out_unlock;
1591 
1592         switch (type) {
1593         case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1594                 retval = update_relax_domain_level(cs, val);
1595                 break;
1596         default:
1597                 retval = -EINVAL;
1598                 break;
1599         }
1600 out_unlock:
1601         mutex_unlock(&cpuset_mutex);
1602         return retval;
1603 }
1604 
1605 /*
1606  * Common handling for a write to a "cpus" or "mems" file.
1607  */
1608 static int cpuset_write_resmask(struct cgroup_subsys_state *css,
1609                                 struct cftype *cft, char *buf)
1610 {
1611         struct cpuset *cs = css_cs(css);
1612         struct cpuset *trialcs;
1613         int retval = -ENODEV;
1614 
1615         /*
1616          * CPU or memory hotunplug may leave @cs w/o any execution
1617          * resources, in which case the hotplug code asynchronously updates
1618          * configuration and transfers all tasks to the nearest ancestor
1619          * which can execute.
1620          *
1621          * As writes to "cpus" or "mems" may restore @cs's execution
1622          * resources, wait for the previously scheduled operations before
1623          * proceeding, so that we don't end up keep removing tasks added
1624          * after execution capability is restored.
1625          */
1626         flush_work(&cpuset_hotplug_work);
1627 
1628         mutex_lock(&cpuset_mutex);
1629         if (!is_cpuset_online(cs))
1630                 goto out_unlock;
1631 
1632         trialcs = alloc_trial_cpuset(cs);
1633         if (!trialcs) {
1634                 retval = -ENOMEM;
1635                 goto out_unlock;
1636         }
1637 
1638         switch (cft->private) {
1639         case FILE_CPULIST:
1640                 retval = update_cpumask(cs, trialcs, buf);
1641                 break;
1642         case FILE_MEMLIST:
1643                 retval = update_nodemask(cs, trialcs, buf);
1644                 break;
1645         default:
1646                 retval = -EINVAL;
1647                 break;
1648         }
1649 
1650         free_trial_cpuset(trialcs);
1651 out_unlock:
1652         mutex_unlock(&cpuset_mutex);
1653         return retval;
1654 }
1655 
1656 /*
1657  * These ascii lists should be read in a single call, by using a user
1658  * buffer large enough to hold the entire map.  If read in smaller
1659  * chunks, there is no guarantee of atomicity.  Since the display format
1660  * used, list of ranges of sequential numbers, is variable length,
1661  * and since these maps can change value dynamically, one could read
1662  * gibberish by doing partial reads while a list was changing.
1663  */
1664 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
1665 {
1666         struct cpuset *cs = css_cs(seq_css(sf));
1667         cpuset_filetype_t type = seq_cft(sf)->private;
1668         ssize_t count;
1669         char *buf, *s;
1670         int ret = 0;
1671 
1672         count = seq_get_buf(sf, &buf);
1673         s = buf;
1674 
1675         mutex_lock(&callback_mutex);
1676 
1677         switch (type) {
1678         case FILE_CPULIST:
1679                 s += cpulist_scnprintf(s, count, cs->cpus_allowed);
1680                 break;
1681         case FILE_MEMLIST:
1682                 s += nodelist_scnprintf(s, count, cs->mems_allowed);
1683                 break;
1684         default:
1685                 ret = -EINVAL;
1686                 goto out_unlock;
1687         }
1688 
1689         if (s < buf + count - 1) {
1690                 *s++ = '\n';
1691                 seq_commit(sf, s - buf);
1692         } else {
1693                 seq_commit(sf, -1);
1694         }
1695 out_unlock:
1696         mutex_unlock(&callback_mutex);
1697         return ret;
1698 }
1699 
1700 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
1701 {
1702         struct cpuset *cs = css_cs(css);
1703         cpuset_filetype_t type = cft->private;
1704         switch (type) {
1705         case FILE_CPU_EXCLUSIVE:
1706                 return is_cpu_exclusive(cs);
1707         case FILE_MEM_EXCLUSIVE:
1708                 return is_mem_exclusive(cs);
1709         case FILE_MEM_HARDWALL:
1710                 return is_mem_hardwall(cs);
1711         case FILE_SCHED_LOAD_BALANCE:
1712                 return is_sched_load_balance(cs);
1713         case FILE_MEMORY_MIGRATE:
1714                 return is_memory_migrate(cs);
1715         case FILE_MEMORY_PRESSURE_ENABLED:
1716                 return cpuset_memory_pressure_enabled;
1717         case FILE_MEMORY_PRESSURE:
1718                 return fmeter_getrate(&cs->fmeter);
1719         case FILE_SPREAD_PAGE:
1720                 return is_spread_page(cs);
1721         case FILE_SPREAD_SLAB:
1722                 return is_spread_slab(cs);
1723         default:
1724                 BUG();
1725         }
1726 
1727         /* Unreachable but makes gcc happy */
1728         return 0;
1729 }
1730 
1731 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
1732 {
1733         struct cpuset *cs = css_cs(css);
1734         cpuset_filetype_t type = cft->private;
1735         switch (type) {
1736         case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1737                 return cs->relax_domain_level;
1738         default:
1739                 BUG();
1740         }
1741 
1742         /* Unrechable but makes gcc happy */
1743         return 0;
1744 }
1745 
1746 
1747 /*
1748  * for the common functions, 'private' gives the type of file
1749  */
1750 
1751 static struct cftype files[] = {
1752         {
1753                 .name = "cpus",
1754                 .seq_show = cpuset_common_seq_show,
1755                 .write_string = cpuset_write_resmask,
1756                 .max_write_len = (100U + 6 * NR_CPUS),
1757                 .private = FILE_CPULIST,
1758         },
1759 
1760         {
1761                 .name = "mems",
1762                 .seq_show = cpuset_common_seq_show,
1763                 .write_string = cpuset_write_resmask,
1764                 .max_write_len = (100U + 6 * MAX_NUMNODES),
1765                 .private = FILE_MEMLIST,
1766         },
1767 
1768         {
1769                 .name = "cpu_exclusive",
1770                 .read_u64 = cpuset_read_u64,
1771                 .write_u64 = cpuset_write_u64,
1772                 .private = FILE_CPU_EXCLUSIVE,
1773         },
1774 
1775         {
1776                 .name = "mem_exclusive",
1777                 .read_u64 = cpuset_read_u64,
1778                 .write_u64 = cpuset_write_u64,
1779                 .private = FILE_MEM_EXCLUSIVE,
1780         },
1781 
1782         {
1783                 .name = "mem_hardwall",
1784                 .read_u64 = cpuset_read_u64,
1785                 .write_u64 = cpuset_write_u64,
1786                 .private = FILE_MEM_HARDWALL,
1787         },
1788 
1789         {
1790                 .name = "sched_load_balance",
1791                 .read_u64 = cpuset_read_u64,
1792                 .write_u64 = cpuset_write_u64,
1793                 .private = FILE_SCHED_LOAD_BALANCE,
1794         },
1795 
1796         {
1797                 .name = "sched_relax_domain_level",
1798                 .read_s64 = cpuset_read_s64,
1799                 .write_s64 = cpuset_write_s64,
1800                 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1801         },
1802 
1803         {
1804                 .name = "memory_migrate",
1805                 .read_u64 = cpuset_read_u64,
1806                 .write_u64 = cpuset_write_u64,
1807                 .private = FILE_MEMORY_MIGRATE,
1808         },
1809 
1810         {
1811                 .name = "memory_pressure",
1812                 .read_u64 = cpuset_read_u64,
1813                 .write_u64 = cpuset_write_u64,
1814                 .private = FILE_MEMORY_PRESSURE,
1815                 .mode = S_IRUGO,
1816         },
1817 
1818         {
1819                 .name = "memory_spread_page",
1820                 .read_u64 = cpuset_read_u64,
1821                 .write_u64 = cpuset_write_u64,
1822                 .private = FILE_SPREAD_PAGE,
1823         },
1824 
1825         {
1826                 .name = "memory_spread_slab",
1827                 .read_u64 = cpuset_read_u64,
1828                 .write_u64 = cpuset_write_u64,
1829                 .private = FILE_SPREAD_SLAB,
1830         },
1831 
1832         {
1833                 .name = "memory_pressure_enabled",
1834                 .flags = CFTYPE_ONLY_ON_ROOT,
1835                 .read_u64 = cpuset_read_u64,
1836                 .write_u64 = cpuset_write_u64,
1837                 .private = FILE_MEMORY_PRESSURE_ENABLED,
1838         },
1839 
1840         { }     /* terminate */
1841 };
1842 
1843 /*
1844  *      cpuset_css_alloc - allocate a cpuset css
1845  *      cgrp:   control group that the new cpuset will be part of
1846  */
1847 
1848 static struct cgroup_subsys_state *
1849 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
1850 {
1851         struct cpuset *cs;
1852 
1853         if (!parent_css)
1854                 return &top_cpuset.css;
1855 
1856         cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1857         if (!cs)
1858                 return ERR_PTR(-ENOMEM);
1859         if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1860                 kfree(cs);
1861                 return ERR_PTR(-ENOMEM);
1862         }
1863 
1864         set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1865         cpumask_clear(cs->cpus_allowed);
1866         nodes_clear(cs->mems_allowed);
1867         fmeter_init(&cs->fmeter);
1868         cs->relax_domain_level = -1;
1869 
1870         return &cs->css;
1871 }
1872 
1873 static int cpuset_css_online(struct cgroup_subsys_state *css)
1874 {
1875         struct cpuset *cs = css_cs(css);
1876         struct cpuset *parent = parent_cs(cs);
1877         struct cpuset *tmp_cs;
1878         struct cgroup_subsys_state *pos_css;
1879 
1880         if (!parent)
1881                 return 0;
1882 
1883         mutex_lock(&cpuset_mutex);
1884 
1885         set_bit(CS_ONLINE, &cs->flags);
1886         if (is_spread_page(parent))
1887                 set_bit(CS_SPREAD_PAGE, &cs->flags);
1888         if (is_spread_slab(parent))
1889                 set_bit(CS_SPREAD_SLAB, &cs->flags);
1890 
1891         number_of_cpusets++;
1892 
1893         if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
1894                 goto out_unlock;
1895 
1896         /*
1897          * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
1898          * set.  This flag handling is implemented in cgroup core for
1899          * histrical reasons - the flag may be specified during mount.
1900          *
1901          * Currently, if any sibling cpusets have exclusive cpus or mem, we
1902          * refuse to clone the configuration - thereby refusing the task to
1903          * be entered, and as a result refusing the sys_unshare() or
1904          * clone() which initiated it.  If this becomes a problem for some
1905          * users who wish to allow that scenario, then this could be
1906          * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1907          * (and likewise for mems) to the new cgroup.
1908          */
1909         rcu_read_lock();
1910         cpuset_for_each_child(tmp_cs, pos_css, parent) {
1911                 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
1912                         rcu_read_unlock();
1913                         goto out_unlock;
1914                 }
1915         }
1916         rcu_read_unlock();
1917 
1918         mutex_lock(&callback_mutex);
1919         cs->mems_allowed = parent->mems_allowed;
1920         cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
1921         mutex_unlock(&callback_mutex);
1922 out_unlock:
1923         mutex_unlock(&cpuset_mutex);
1924         return 0;
1925 }
1926 
1927 /*
1928  * If the cpuset being removed has its flag 'sched_load_balance'
1929  * enabled, then simulate turning sched_load_balance off, which
1930  * will call rebuild_sched_domains_locked().
1931  */
1932 
1933 static void cpuset_css_offline(struct cgroup_subsys_state *css)
1934 {
1935         struct cpuset *cs = css_cs(css);
1936 
1937         mutex_lock(&cpuset_mutex);
1938 
1939         if (is_sched_load_balance(cs))
1940                 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1941 
1942         number_of_cpusets--;
1943         clear_bit(CS_ONLINE, &cs->flags);
1944 
1945         mutex_unlock(&cpuset_mutex);
1946 }
1947 
1948 static void cpuset_css_free(struct cgroup_subsys_state *css)
1949 {
1950         struct cpuset *cs = css_cs(css);
1951 
1952         free_cpumask_var(cs->cpus_allowed);
1953         kfree(cs);
1954 }
1955 
1956 struct cgroup_subsys cpuset_cgrp_subsys = {
1957         .css_alloc = cpuset_css_alloc,
1958         .css_online = cpuset_css_online,
1959         .css_offline = cpuset_css_offline,
1960         .css_free = cpuset_css_free,
1961         .can_attach = cpuset_can_attach,
1962         .cancel_attach = cpuset_cancel_attach,
1963         .attach = cpuset_attach,
1964         .base_cftypes = files,
1965         .early_init = 1,
1966 };
1967 
1968 /**
1969  * cpuset_init - initialize cpusets at system boot
1970  *
1971  * Description: Initialize top_cpuset and the cpuset internal file system,
1972  **/
1973 
1974 int __init cpuset_init(void)
1975 {
1976         int err = 0;
1977 
1978         if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
1979                 BUG();
1980 
1981         cpumask_setall(top_cpuset.cpus_allowed);
1982         nodes_setall(top_cpuset.mems_allowed);
1983 
1984         fmeter_init(&top_cpuset.fmeter);
1985         set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1986         top_cpuset.relax_domain_level = -1;
1987 
1988         err = register_filesystem(&cpuset_fs_type);
1989         if (err < 0)
1990                 return err;
1991 
1992         if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1993                 BUG();
1994 
1995         number_of_cpusets = 1;
1996         return 0;
1997 }
1998 
1999 /*
2000  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2001  * or memory nodes, we need to walk over the cpuset hierarchy,
2002  * removing that CPU or node from all cpusets.  If this removes the
2003  * last CPU or node from a cpuset, then move the tasks in the empty
2004  * cpuset to its next-highest non-empty parent.
2005  */
2006 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2007 {
2008         struct cpuset *parent;
2009 
2010         /*
2011          * Find its next-highest non-empty parent, (top cpuset
2012          * has online cpus, so can't be empty).
2013          */
2014         parent = parent_cs(cs);
2015         while (cpumask_empty(parent->cpus_allowed) ||
2016                         nodes_empty(parent->mems_allowed))
2017                 parent = parent_cs(parent);
2018 
2019         if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2020                 printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset ");
2021                 pr_cont_cgroup_name(cs->css.cgroup);
2022                 pr_cont("\n");
2023         }
2024 }
2025 
2026 /**
2027  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
2028  * @cs: cpuset in interest
2029  *
2030  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2031  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
2032  * all its tasks are moved to the nearest ancestor with both resources.
2033  */
2034 static void cpuset_hotplug_update_tasks(struct cpuset *cs)
2035 {
2036         static cpumask_t off_cpus;
2037         static nodemask_t off_mems;
2038         bool is_empty;
2039         bool sane = cgroup_sane_behavior(cs->css.cgroup);
2040 
2041 retry:
2042         wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
2043 
2044         mutex_lock(&cpuset_mutex);
2045 
2046         /*
2047          * We have raced with task attaching. We wait until attaching
2048          * is finished, so we won't attach a task to an empty cpuset.
2049          */
2050         if (cs->attach_in_progress) {
2051                 mutex_unlock(&cpuset_mutex);
2052                 goto retry;
2053         }
2054 
2055         cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
2056         nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
2057 
2058         mutex_lock(&callback_mutex);
2059         cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
2060         mutex_unlock(&callback_mutex);
2061 
2062         /*
2063          * If sane_behavior flag is set, we need to update tasks' cpumask
2064          * for empty cpuset to take on ancestor's cpumask. Otherwise, don't
2065          * call update_tasks_cpumask() if the cpuset becomes empty, as
2066          * the tasks in it will be migrated to an ancestor.
2067          */
2068         if ((sane && cpumask_empty(cs->cpus_allowed)) ||
2069             (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed)))
2070                 update_tasks_cpumask(cs);
2071 
2072         mutex_lock(&callback_mutex);
2073         nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
2074         mutex_unlock(&callback_mutex);
2075 
2076         /*
2077          * If sane_behavior flag is set, we need to update tasks' nodemask
2078          * for empty cpuset to take on ancestor's nodemask. Otherwise, don't
2079          * call update_tasks_nodemask() if the cpuset becomes empty, as
2080          * the tasks in it will be migratd to an ancestor.
2081          */
2082         if ((sane && nodes_empty(cs->mems_allowed)) ||
2083             (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed)))
2084                 update_tasks_nodemask(cs);
2085 
2086         is_empty = cpumask_empty(cs->cpus_allowed) ||
2087                 nodes_empty(cs->mems_allowed);
2088 
2089         mutex_unlock(&cpuset_mutex);
2090 
2091         /*
2092          * If sane_behavior flag is set, we'll keep tasks in empty cpusets.
2093          *
2094          * Otherwise move tasks to the nearest ancestor with execution
2095          * resources.  This is full cgroup operation which will
2096          * also call back into cpuset.  Should be done outside any lock.
2097          */
2098         if (!sane && is_empty)
2099                 remove_tasks_in_empty_cpuset(cs);
2100 }
2101 
2102 /**
2103  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2104  *
2105  * This function is called after either CPU or memory configuration has
2106  * changed and updates cpuset accordingly.  The top_cpuset is always
2107  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2108  * order to make cpusets transparent (of no affect) on systems that are
2109  * actively using CPU hotplug but making no active use of cpusets.
2110  *
2111  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
2112  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
2113  * all descendants.
2114  *
2115  * Note that CPU offlining during suspend is ignored.  We don't modify
2116  * cpusets across suspend/resume cycles at all.
2117  */
2118 static void cpuset_hotplug_workfn(struct work_struct *work)
2119 {
2120         static cpumask_t new_cpus;
2121         static nodemask_t new_mems;
2122         bool cpus_updated, mems_updated;
2123 
2124         mutex_lock(&cpuset_mutex);
2125 
2126         /* fetch the available cpus/mems and find out which changed how */
2127         cpumask_copy(&new_cpus, cpu_active_mask);
2128         new_mems = node_states[N_MEMORY];
2129 
2130         cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
2131         mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
2132 
2133         /* synchronize cpus_allowed to cpu_active_mask */
2134         if (cpus_updated) {
2135                 mutex_lock(&callback_mutex);
2136                 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2137                 mutex_unlock(&callback_mutex);
2138                 /* we don't mess with cpumasks of tasks in top_cpuset */
2139         }
2140 
2141         /* synchronize mems_allowed to N_MEMORY */
2142         if (mems_updated) {
2143                 mutex_lock(&callback_mutex);
2144                 top_cpuset.mems_allowed = new_mems;
2145                 mutex_unlock(&callback_mutex);
2146                 update_tasks_nodemask(&top_cpuset);
2147         }
2148 
2149         mutex_unlock(&cpuset_mutex);
2150 
2151         /* if cpus or mems changed, we need to propagate to descendants */
2152         if (cpus_updated || mems_updated) {
2153                 struct cpuset *cs;
2154                 struct cgroup_subsys_state *pos_css;
2155 
2156                 rcu_read_lock();
2157                 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
2158                         if (cs == &top_cpuset || !css_tryget(&cs->css))
2159                                 continue;
2160                         rcu_read_unlock();
2161 
2162                         cpuset_hotplug_update_tasks(cs);
2163 
2164                         rcu_read_lock();
2165                         css_put(&cs->css);
2166                 }
2167                 rcu_read_unlock();
2168         }
2169 
2170         /* rebuild sched domains if cpus_allowed has changed */
2171         if (cpus_updated)
2172                 rebuild_sched_domains();
2173 }
2174 
2175 void cpuset_update_active_cpus(bool cpu_online)
2176 {
2177         /*
2178          * We're inside cpu hotplug critical region which usually nests
2179          * inside cgroup synchronization.  Bounce actual hotplug processing
2180          * to a work item to avoid reverse locking order.
2181          *
2182          * We still need to do partition_sched_domains() synchronously;
2183          * otherwise, the scheduler will get confused and put tasks to the
2184          * dead CPU.  Fall back to the default single domain.
2185          * cpuset_hotplug_workfn() will rebuild it as necessary.
2186          */
2187         partition_sched_domains(1, NULL, NULL);
2188         schedule_work(&cpuset_hotplug_work);
2189 }
2190 
2191 /*
2192  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2193  * Call this routine anytime after node_states[N_MEMORY] changes.
2194  * See cpuset_update_active_cpus() for CPU hotplug handling.
2195  */
2196 static int cpuset_track_online_nodes(struct notifier_block *self,
2197                                 unsigned long action, void *arg)
2198 {
2199         schedule_work(&cpuset_hotplug_work);
2200         return NOTIFY_OK;
2201 }
2202 
2203 static struct notifier_block cpuset_track_online_nodes_nb = {
2204         .notifier_call = cpuset_track_online_nodes,
2205         .priority = 10,         /* ??! */
2206 };
2207 
2208 /**
2209  * cpuset_init_smp - initialize cpus_allowed
2210  *
2211  * Description: Finish top cpuset after cpu, node maps are initialized
2212  */
2213 void __init cpuset_init_smp(void)
2214 {
2215         cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2216         top_cpuset.mems_allowed = node_states[N_MEMORY];
2217         top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
2218 
2219         register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
2220 }
2221 
2222 /**
2223  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2224  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2225  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2226  *
2227  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2228  * attached to the specified @tsk.  Guaranteed to return some non-empty
2229  * subset of cpu_online_mask, even if this means going outside the
2230  * tasks cpuset.
2231  **/
2232 
2233 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2234 {
2235         struct cpuset *cpus_cs;
2236 
2237         mutex_lock(&callback_mutex);
2238         rcu_read_lock();
2239         cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
2240         guarantee_online_cpus(cpus_cs, pmask);
2241         rcu_read_unlock();
2242         mutex_unlock(&callback_mutex);
2243 }
2244 
2245 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2246 {
2247         struct cpuset *cpus_cs;
2248 
2249         rcu_read_lock();
2250         cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
2251         do_set_cpus_allowed(tsk, cpus_cs->cpus_allowed);
2252         rcu_read_unlock();
2253 
2254         /*
2255          * We own tsk->cpus_allowed, nobody can change it under us.
2256          *
2257          * But we used cs && cs->cpus_allowed lockless and thus can
2258          * race with cgroup_attach_task() or update_cpumask() and get
2259          * the wrong tsk->cpus_allowed. However, both cases imply the
2260          * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2261          * which takes task_rq_lock().
2262          *
2263          * If we are called after it dropped the lock we must see all
2264          * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2265          * set any mask even if it is not right from task_cs() pov,
2266          * the pending set_cpus_allowed_ptr() will fix things.
2267          *
2268          * select_fallback_rq() will fix things ups and set cpu_possible_mask
2269          * if required.
2270          */
2271 }
2272 
2273 void cpuset_init_current_mems_allowed(void)
2274 {
2275         nodes_setall(current->mems_allowed);
2276 }
2277 
2278 /**
2279  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2280  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2281  *
2282  * Description: Returns the nodemask_t mems_allowed of the cpuset
2283  * attached to the specified @tsk.  Guaranteed to return some non-empty
2284  * subset of node_states[N_MEMORY], even if this means going outside the
2285  * tasks cpuset.
2286  **/
2287 
2288 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2289 {
2290         struct cpuset *mems_cs;
2291         nodemask_t mask;
2292 
2293         mutex_lock(&callback_mutex);
2294         rcu_read_lock();
2295         mems_cs = effective_nodemask_cpuset(task_cs(tsk));
2296         guarantee_online_mems(mems_cs, &mask);
2297         rcu_read_unlock();
2298         mutex_unlock(&callback_mutex);
2299 
2300         return mask;
2301 }
2302 
2303 /**
2304  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2305  * @nodemask: the nodemask to be checked
2306  *
2307  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2308  */
2309 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2310 {
2311         return nodes_intersects(*nodemask, current->mems_allowed);
2312 }
2313 
2314 /*
2315  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2316  * mem_hardwall ancestor to the specified cpuset.  Call holding
2317  * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
2318  * (an unusual configuration), then returns the root cpuset.
2319  */
2320 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2321 {
2322         while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
2323                 cs = parent_cs(cs);
2324         return cs;
2325 }
2326 
2327 /**
2328  * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2329  * @node: is this an allowed node?
2330  * @gfp_mask: memory allocation flags
2331  *
2332  * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2333  * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2334  * yes.  If it's not a __GFP_HARDWALL request and this node is in the nearest
2335  * hardwalled cpuset ancestor to this task's cpuset, yes.  If the task has been
2336  * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2337  * flag, yes.
2338  * Otherwise, no.
2339  *
2340  * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2341  * cpuset_node_allowed_hardwall().  Otherwise, cpuset_node_allowed_softwall()
2342  * might sleep, and might allow a node from an enclosing cpuset.
2343  *
2344  * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2345  * cpusets, and never sleeps.
2346  *
2347  * The __GFP_THISNODE placement logic is really handled elsewhere,
2348  * by forcibly using a zonelist starting at a specified node, and by
2349  * (in get_page_from_freelist()) refusing to consider the zones for
2350  * any node on the zonelist except the first.  By the time any such
2351  * calls get to this routine, we should just shut up and say 'yes'.
2352  *
2353  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2354  * and do not allow allocations outside the current tasks cpuset
2355  * unless the task has been OOM killed as is marked TIF_MEMDIE.
2356  * GFP_KERNEL allocations are not so marked, so can escape to the
2357  * nearest enclosing hardwalled ancestor cpuset.
2358  *
2359  * Scanning up parent cpusets requires callback_mutex.  The
2360  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2361  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2362  * current tasks mems_allowed came up empty on the first pass over
2363  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2364  * cpuset are short of memory, might require taking the callback_mutex
2365  * mutex.
2366  *
2367  * The first call here from mm/page_alloc:get_page_from_freelist()
2368  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2369  * so no allocation on a node outside the cpuset is allowed (unless
2370  * in interrupt, of course).
2371  *
2372  * The second pass through get_page_from_freelist() doesn't even call
2373  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2374  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2375  * in alloc_flags.  That logic and the checks below have the combined
2376  * affect that:
2377  *      in_interrupt - any node ok (current task context irrelevant)
2378  *      GFP_ATOMIC   - any node ok
2379  *      TIF_MEMDIE   - any node ok
2380  *      GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2381  *      GFP_USER     - only nodes in current tasks mems allowed ok.
2382  *
2383  * Rule:
2384  *    Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2385  *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2386  *    the code that might scan up ancestor cpusets and sleep.
2387  */
2388 int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2389 {
2390         struct cpuset *cs;              /* current cpuset ancestors */
2391         int allowed;                    /* is allocation in zone z allowed? */
2392 
2393         if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2394                 return 1;
2395         might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2396         if (node_isset(node, current->mems_allowed))
2397                 return 1;
2398         /*
2399          * Allow tasks that have access to memory reserves because they have
2400          * been OOM killed to get memory anywhere.
2401          */
2402         if (unlikely(test_thread_flag(TIF_MEMDIE)))
2403                 return 1;
2404         if (gfp_mask & __GFP_HARDWALL)  /* If hardwall request, stop here */
2405                 return 0;
2406 
2407         if (current->flags & PF_EXITING) /* Let dying task have memory */
2408                 return 1;
2409 
2410         /* Not hardwall and node outside mems_allowed: scan up cpusets */
2411         mutex_lock(&callback_mutex);
2412 
2413         rcu_read_lock();
2414         cs = nearest_hardwall_ancestor(task_cs(current));
2415         allowed = node_isset(node, cs->mems_allowed);
2416         rcu_read_unlock();
2417 
2418         mutex_unlock(&callback_mutex);
2419         return allowed;
2420 }
2421 
2422 /*
2423  * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2424  * @node: is this an allowed node?
2425  * @gfp_mask: memory allocation flags
2426  *
2427  * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2428  * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2429  * yes.  If the task has been OOM killed and has access to memory reserves as
2430  * specified by the TIF_MEMDIE flag, yes.
2431  * Otherwise, no.
2432  *
2433  * The __GFP_THISNODE placement logic is really handled elsewhere,
2434  * by forcibly using a zonelist starting at a specified node, and by
2435  * (in get_page_from_freelist()) refusing to consider the zones for
2436  * any node on the zonelist except the first.  By the time any such
2437  * calls get to this routine, we should just shut up and say 'yes'.
2438  *
2439  * Unlike the cpuset_node_allowed_softwall() variant, above,
2440  * this variant requires that the node be in the current task's
2441  * mems_allowed or that we're in interrupt.  It does not scan up the
2442  * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2443  * It never sleeps.
2444  */
2445 int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2446 {
2447         if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2448                 return 1;
2449         if (node_isset(node, current->mems_allowed))
2450                 return 1;
2451         /*
2452          * Allow tasks that have access to memory reserves because they have
2453          * been OOM killed to get memory anywhere.
2454          */
2455         if (unlikely(test_thread_flag(TIF_MEMDIE)))
2456                 return 1;
2457         return 0;
2458 }
2459 
2460 /**
2461  * cpuset_mem_spread_node() - On which node to begin search for a file page
2462  * cpuset_slab_spread_node() - On which node to begin search for a slab page
2463  *
2464  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2465  * tasks in a cpuset with is_spread_page or is_spread_slab set),
2466  * and if the memory allocation used cpuset_mem_spread_node()
2467  * to determine on which node to start looking, as it will for
2468  * certain page cache or slab cache pages such as used for file
2469  * system buffers and inode caches, then instead of starting on the
2470  * local node to look for a free page, rather spread the starting
2471  * node around the tasks mems_allowed nodes.
2472  *
2473  * We don't have to worry about the returned node being offline
2474  * because "it can't happen", and even if it did, it would be ok.
2475  *
2476  * The routines calling guarantee_online_mems() are careful to
2477  * only set nodes in task->mems_allowed that are online.  So it
2478  * should not be possible for the following code to return an
2479  * offline node.  But if it did, that would be ok, as this routine
2480  * is not returning the node where the allocation must be, only
2481  * the node where the search should start.  The zonelist passed to
2482  * __alloc_pages() will include all nodes.  If the slab allocator
2483  * is passed an offline node, it will fall back to the local node.
2484  * See kmem_cache_alloc_node().
2485  */
2486 
2487 static int cpuset_spread_node(int *rotor)
2488 {
2489         int node;
2490 
2491         node = next_node(*rotor, current->mems_allowed);
2492         if (node == MAX_NUMNODES)
2493                 node = first_node(current->mems_allowed);
2494         *rotor = node;
2495         return node;
2496 }
2497 
2498 int cpuset_mem_spread_node(void)
2499 {
2500         if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2501                 current->cpuset_mem_spread_rotor =
2502                         node_random(&current->mems_allowed);
2503 
2504         return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2505 }
2506 
2507 int cpuset_slab_spread_node(void)
2508 {
2509         if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2510                 current->cpuset_slab_spread_rotor =
2511                         node_random(&current->mems_allowed);
2512 
2513         return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2514 }
2515 
2516 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2517 
2518 /**
2519  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2520  * @tsk1: pointer to task_struct of some task.
2521  * @tsk2: pointer to task_struct of some other task.
2522  *
2523  * Description: Return true if @tsk1's mems_allowed intersects the
2524  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2525  * one of the task's memory usage might impact the memory available
2526  * to the other.
2527  **/
2528 
2529 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2530                                    const struct task_struct *tsk2)
2531 {
2532         return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2533 }
2534 
2535 #define CPUSET_NODELIST_LEN     (256)
2536 
2537 /**
2538  * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2539  * @task: pointer to task_struct of some task.
2540  *
2541  * Description: Prints @task's name, cpuset name, and cached copy of its
2542  * mems_allowed to the kernel log.
2543  */
2544 void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2545 {
2546          /* Statically allocated to prevent using excess stack. */
2547         static char cpuset_nodelist[CPUSET_NODELIST_LEN];
2548         static DEFINE_SPINLOCK(cpuset_buffer_lock);
2549         struct cgroup *cgrp;
2550 
2551         spin_lock(&cpuset_buffer_lock);
2552         rcu_read_lock();
2553 
2554         cgrp = task_cs(tsk)->css.cgroup;
2555         nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2556                            tsk->mems_allowed);
2557         printk(KERN_INFO "%s cpuset=", tsk->comm);
2558         pr_cont_cgroup_name(cgrp);
2559         pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
2560 
2561         rcu_read_unlock();
2562         spin_unlock(&cpuset_buffer_lock);
2563 }
2564 
2565 /*
2566  * Collection of memory_pressure is suppressed unless
2567  * this flag is enabled by writing "1" to the special
2568  * cpuset file 'memory_pressure_enabled' in the root cpuset.
2569  */
2570 
2571 int cpuset_memory_pressure_enabled __read_mostly;
2572 
2573 /**
2574  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2575  *
2576  * Keep a running average of the rate of synchronous (direct)
2577  * page reclaim efforts initiated by tasks in each cpuset.
2578  *
2579  * This represents the rate at which some task in the cpuset
2580  * ran low on memory on all nodes it was allowed to use, and
2581  * had to enter the kernels page reclaim code in an effort to
2582  * create more free memory by tossing clean pages or swapping
2583  * or writing dirty pages.
2584  *
2585  * Display to user space in the per-cpuset read-only file
2586  * "memory_pressure".  Value displayed is an integer
2587  * representing the recent rate of entry into the synchronous
2588  * (direct) page reclaim by any task attached to the cpuset.
2589  **/
2590 
2591 void __cpuset_memory_pressure_bump(void)
2592 {
2593         rcu_read_lock();
2594         fmeter_markevent(&task_cs(current)->fmeter);
2595         rcu_read_unlock();
2596 }
2597 
2598 #ifdef CONFIG_PROC_PID_CPUSET
2599 /*
2600  * proc_cpuset_show()
2601  *  - Print tasks cpuset path into seq_file.
2602  *  - Used for /proc/<pid>/cpuset.
2603  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2604  *    doesn't really matter if tsk->cpuset changes after we read it,
2605  *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
2606  *    anyway.
2607  */
2608 int proc_cpuset_show(struct seq_file *m, void *unused_v)
2609 {
2610         struct pid *pid;
2611         struct task_struct *tsk;
2612         char *buf, *p;
2613         struct cgroup_subsys_state *css;
2614         int retval;
2615 
2616         retval = -ENOMEM;
2617         buf = kmalloc(PATH_MAX, GFP_KERNEL);
2618         if (!buf)
2619                 goto out;
2620 
2621         retval = -ESRCH;
2622         pid = m->private;
2623         tsk = get_pid_task(pid, PIDTYPE_PID);
2624         if (!tsk)
2625                 goto out_free;
2626 
2627         retval = -ENAMETOOLONG;
2628         rcu_read_lock();
2629         css = task_css(tsk, cpuset_cgrp_id);
2630         p = cgroup_path(css->cgroup, buf, PATH_MAX);
2631         rcu_read_unlock();
2632         if (!p)
2633                 goto out_put_task;
2634         seq_puts(m, p);
2635         seq_putc(m, '\n');
2636         retval = 0;
2637 out_put_task:
2638         put_task_struct(tsk);
2639 out_free:
2640         kfree(buf);
2641 out:
2642         return retval;
2643 }
2644 #endif /* CONFIG_PROC_PID_CPUSET */
2645 
2646 /* Display task mems_allowed in /proc/<pid>/status file. */
2647 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2648 {
2649         seq_printf(m, "Mems_allowed:\t");
2650         seq_nodemask(m, &task->mems_allowed);
2651         seq_printf(m, "\n");
2652         seq_printf(m, "Mems_allowed_list:\t");
2653         seq_nodemask_list(m, &task->mems_allowed);
2654         seq_printf(m, "\n");
2655 }
2656 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us