Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/include/linux/mempolicy.h

  1 /*
  2  * NUMA memory policies for Linux.
  3  * Copyright 2003,2004 Andi Kleen SuSE Labs
  4  */
  5 #ifndef _LINUX_MEMPOLICY_H
  6 #define _LINUX_MEMPOLICY_H 1
  7 
  8 
  9 #include <linux/mmzone.h>
 10 #include <linux/slab.h>
 11 #include <linux/rbtree.h>
 12 #include <linux/spinlock.h>
 13 #include <linux/nodemask.h>
 14 #include <linux/pagemap.h>
 15 #include <uapi/linux/mempolicy.h>
 16 
 17 struct mm_struct;
 18 
 19 #ifdef CONFIG_NUMA
 20 
 21 /*
 22  * Describe a memory policy.
 23  *
 24  * A mempolicy can be either associated with a process or with a VMA.
 25  * For VMA related allocations the VMA policy is preferred, otherwise
 26  * the process policy is used. Interrupts ignore the memory policy
 27  * of the current process.
 28  *
 29  * Locking policy for interlave:
 30  * In process context there is no locking because only the process accesses
 31  * its own state. All vma manipulation is somewhat protected by a down_read on
 32  * mmap_sem.
 33  *
 34  * Freeing policy:
 35  * Mempolicy objects are reference counted.  A mempolicy will be freed when
 36  * mpol_put() decrements the reference count to zero.
 37  *
 38  * Duplicating policy objects:
 39  * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
 40  * to the new storage.  The reference count of the new object is initialized
 41  * to 1, representing the caller of mpol_dup().
 42  */
 43 struct mempolicy {
 44         atomic_t refcnt;
 45         unsigned short mode;    /* See MPOL_* above */
 46         unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
 47         union {
 48                 short            preferred_node; /* preferred */
 49                 nodemask_t       nodes;         /* interleave/bind */
 50                 /* undefined for default */
 51         } v;
 52         union {
 53                 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
 54                 nodemask_t user_nodemask;       /* nodemask passed by user */
 55         } w;
 56 };
 57 
 58 /*
 59  * Support for managing mempolicy data objects (clone, copy, destroy)
 60  * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
 61  */
 62 
 63 extern void __mpol_put(struct mempolicy *pol);
 64 static inline void mpol_put(struct mempolicy *pol)
 65 {
 66         if (pol)
 67                 __mpol_put(pol);
 68 }
 69 
 70 /*
 71  * Does mempolicy pol need explicit unref after use?
 72  * Currently only needed for shared policies.
 73  */
 74 static inline int mpol_needs_cond_ref(struct mempolicy *pol)
 75 {
 76         return (pol && (pol->flags & MPOL_F_SHARED));
 77 }
 78 
 79 static inline void mpol_cond_put(struct mempolicy *pol)
 80 {
 81         if (mpol_needs_cond_ref(pol))
 82                 __mpol_put(pol);
 83 }
 84 
 85 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
 86 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
 87 {
 88         if (pol)
 89                 pol = __mpol_dup(pol);
 90         return pol;
 91 }
 92 
 93 #define vma_policy(vma) ((vma)->vm_policy)
 94 
 95 static inline void mpol_get(struct mempolicy *pol)
 96 {
 97         if (pol)
 98                 atomic_inc(&pol->refcnt);
 99 }
100 
101 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
102 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
103 {
104         if (a == b)
105                 return true;
106         return __mpol_equal(a, b);
107 }
108 
109 /*
110  * Tree of shared policies for a shared memory region.
111  * Maintain the policies in a pseudo mm that contains vmas. The vmas
112  * carry the policy. As a special twist the pseudo mm is indexed in pages, not
113  * bytes, so that we can work with shared memory segments bigger than
114  * unsigned long.
115  */
116 
117 struct sp_node {
118         struct rb_node nd;
119         unsigned long start, end;
120         struct mempolicy *policy;
121 };
122 
123 struct shared_policy {
124         struct rb_root root;
125         rwlock_t lock;
126 };
127 
128 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
129 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
130 int mpol_set_shared_policy(struct shared_policy *info,
131                                 struct vm_area_struct *vma,
132                                 struct mempolicy *new);
133 void mpol_free_shared_policy(struct shared_policy *p);
134 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
135                                             unsigned long idx);
136 
137 struct mempolicy *get_task_policy(struct task_struct *p);
138 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
139                 unsigned long addr);
140 bool vma_policy_mof(struct vm_area_struct *vma);
141 
142 extern void numa_default_policy(void);
143 extern void numa_policy_init(void);
144 extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
145                                 enum mpol_rebind_step step);
146 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
147 
148 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
149                                 unsigned long addr, gfp_t gfp_flags,
150                                 struct mempolicy **mpol, nodemask_t **nodemask);
151 extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
152 extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
153                                 const nodemask_t *mask);
154 extern unsigned int mempolicy_slab_node(void);
155 
156 extern enum zone_type policy_zone;
157 
158 static inline void check_highest_zone(enum zone_type k)
159 {
160         if (k > policy_zone && k != ZONE_MOVABLE)
161                 policy_zone = k;
162 }
163 
164 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
165                      const nodemask_t *to, int flags);
166 
167 
168 #ifdef CONFIG_TMPFS
169 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
170 #endif
171 
172 extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
173 
174 /* Check if a vma is migratable */
175 static inline bool vma_migratable(struct vm_area_struct *vma)
176 {
177         if (vma->vm_flags & (VM_IO | VM_PFNMAP))
178                 return false;
179 
180 #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
181         if (vma->vm_flags & VM_HUGETLB)
182                 return false;
183 #endif
184 
185         /*
186          * Migration allocates pages in the highest zone. If we cannot
187          * do so then migration (at least from node to node) is not
188          * possible.
189          */
190         if (vma->vm_file &&
191                 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
192                                                                 < policy_zone)
193                         return false;
194         return true;
195 }
196 
197 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
198 extern void mpol_put_task_policy(struct task_struct *);
199 
200 #else
201 
202 struct mempolicy {};
203 
204 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
205 {
206         return true;
207 }
208 
209 static inline void mpol_put(struct mempolicy *p)
210 {
211 }
212 
213 static inline void mpol_cond_put(struct mempolicy *pol)
214 {
215 }
216 
217 static inline void mpol_get(struct mempolicy *pol)
218 {
219 }
220 
221 struct shared_policy {};
222 
223 static inline void mpol_shared_policy_init(struct shared_policy *sp,
224                                                 struct mempolicy *mpol)
225 {
226 }
227 
228 static inline void mpol_free_shared_policy(struct shared_policy *p)
229 {
230 }
231 
232 static inline struct mempolicy *
233 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
234 {
235         return NULL;
236 }
237 
238 #define vma_policy(vma) NULL
239 
240 static inline int
241 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
242 {
243         return 0;
244 }
245 
246 static inline void numa_policy_init(void)
247 {
248 }
249 
250 static inline void numa_default_policy(void)
251 {
252 }
253 
254 static inline void mpol_rebind_task(struct task_struct *tsk,
255                                 const nodemask_t *new,
256                                 enum mpol_rebind_step step)
257 {
258 }
259 
260 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
261 {
262 }
263 
264 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
265                                 unsigned long addr, gfp_t gfp_flags,
266                                 struct mempolicy **mpol, nodemask_t **nodemask)
267 {
268         *mpol = NULL;
269         *nodemask = NULL;
270         return node_zonelist(0, gfp_flags);
271 }
272 
273 static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
274 {
275         return false;
276 }
277 
278 static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
279                                    const nodemask_t *to, int flags)
280 {
281         return 0;
282 }
283 
284 static inline void check_highest_zone(int k)
285 {
286 }
287 
288 #ifdef CONFIG_TMPFS
289 static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
290 {
291         return 1;       /* error */
292 }
293 #endif
294 
295 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
296                                  unsigned long address)
297 {
298         return -1; /* no node preference */
299 }
300 
301 static inline void mpol_put_task_policy(struct task_struct *task)
302 {
303 }
304 #endif /* CONFIG_NUMA */
305 #endif
306 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us