Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/include/linux/mm.h

  1 #ifndef _LINUX_MM_H
  2 #define _LINUX_MM_H
  3 
  4 #include <linux/errno.h>
  5 
  6 #ifdef __KERNEL__
  7 
  8 #include <linux/mmdebug.h>
  9 #include <linux/gfp.h>
 10 #include <linux/bug.h>
 11 #include <linux/list.h>
 12 #include <linux/mmzone.h>
 13 #include <linux/rbtree.h>
 14 #include <linux/atomic.h>
 15 #include <linux/debug_locks.h>
 16 #include <linux/mm_types.h>
 17 #include <linux/range.h>
 18 #include <linux/pfn.h>
 19 #include <linux/bit_spinlock.h>
 20 #include <linux/shrinker.h>
 21 
 22 struct mempolicy;
 23 struct anon_vma;
 24 struct anon_vma_chain;
 25 struct file_ra_state;
 26 struct user_struct;
 27 struct writeback_control;
 28 
 29 #ifndef CONFIG_NEED_MULTIPLE_NODES      /* Don't use mapnrs, do it properly */
 30 extern unsigned long max_mapnr;
 31 
 32 static inline void set_max_mapnr(unsigned long limit)
 33 {
 34         max_mapnr = limit;
 35 }
 36 #else
 37 static inline void set_max_mapnr(unsigned long limit) { }
 38 #endif
 39 
 40 extern unsigned long totalram_pages;
 41 extern void * high_memory;
 42 extern int page_cluster;
 43 
 44 #ifdef CONFIG_SYSCTL
 45 extern int sysctl_legacy_va_layout;
 46 #else
 47 #define sysctl_legacy_va_layout 0
 48 #endif
 49 
 50 #include <asm/page.h>
 51 #include <asm/pgtable.h>
 52 #include <asm/processor.h>
 53 
 54 #ifndef __pa_symbol
 55 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
 56 #endif
 57 
 58 extern unsigned long sysctl_user_reserve_kbytes;
 59 extern unsigned long sysctl_admin_reserve_kbytes;
 60 
 61 extern int sysctl_overcommit_memory;
 62 extern int sysctl_overcommit_ratio;
 63 extern unsigned long sysctl_overcommit_kbytes;
 64 
 65 extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
 66                                     size_t *, loff_t *);
 67 extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
 68                                     size_t *, loff_t *);
 69 
 70 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 71 
 72 /* to align the pointer to the (next) page boundary */
 73 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
 74 
 75 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
 76 #define PAGE_ALIGNED(addr)      IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
 77 
 78 /*
 79  * Linux kernel virtual memory manager primitives.
 80  * The idea being to have a "virtual" mm in the same way
 81  * we have a virtual fs - giving a cleaner interface to the
 82  * mm details, and allowing different kinds of memory mappings
 83  * (from shared memory to executable loading to arbitrary
 84  * mmap() functions).
 85  */
 86 
 87 extern struct kmem_cache *vm_area_cachep;
 88 
 89 #ifndef CONFIG_MMU
 90 extern struct rb_root nommu_region_tree;
 91 extern struct rw_semaphore nommu_region_sem;
 92 
 93 extern unsigned int kobjsize(const void *objp);
 94 #endif
 95 
 96 /*
 97  * vm_flags in vm_area_struct, see mm_types.h.
 98  */
 99 #define VM_NONE         0x00000000
100 
101 #define VM_READ         0x00000001      /* currently active flags */
102 #define VM_WRITE        0x00000002
103 #define VM_EXEC         0x00000004
104 #define VM_SHARED       0x00000008
105 
106 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
107 #define VM_MAYREAD      0x00000010      /* limits for mprotect() etc */
108 #define VM_MAYWRITE     0x00000020
109 #define VM_MAYEXEC      0x00000040
110 #define VM_MAYSHARE     0x00000080
111 
112 #define VM_GROWSDOWN    0x00000100      /* general info on the segment */
113 #define VM_PFNMAP       0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
114 #define VM_DENYWRITE    0x00000800      /* ETXTBSY on write attempts.. */
115 
116 #define VM_LOCKED       0x00002000
117 #define VM_IO           0x00004000      /* Memory mapped I/O or similar */
118 
119                                         /* Used by sys_madvise() */
120 #define VM_SEQ_READ     0x00008000      /* App will access data sequentially */
121 #define VM_RAND_READ    0x00010000      /* App will not benefit from clustered reads */
122 
123 #define VM_DONTCOPY     0x00020000      /* Do not copy this vma on fork */
124 #define VM_DONTEXPAND   0x00040000      /* Cannot expand with mremap() */
125 #define VM_ACCOUNT      0x00100000      /* Is a VM accounted object */
126 #define VM_NORESERVE    0x00200000      /* should the VM suppress accounting */
127 #define VM_HUGETLB      0x00400000      /* Huge TLB Page VM */
128 #define VM_NONLINEAR    0x00800000      /* Is non-linear (remap_file_pages) */
129 #define VM_ARCH_1       0x01000000      /* Architecture-specific flag */
130 #define VM_DONTDUMP     0x04000000      /* Do not include in the core dump */
131 
132 #ifdef CONFIG_MEM_SOFT_DIRTY
133 # define VM_SOFTDIRTY   0x08000000      /* Not soft dirty clean area */
134 #else
135 # define VM_SOFTDIRTY   0
136 #endif
137 
138 #define VM_MIXEDMAP     0x10000000      /* Can contain "struct page" and pure PFN pages */
139 #define VM_HUGEPAGE     0x20000000      /* MADV_HUGEPAGE marked this vma */
140 #define VM_NOHUGEPAGE   0x40000000      /* MADV_NOHUGEPAGE marked this vma */
141 #define VM_MERGEABLE    0x80000000      /* KSM may merge identical pages */
142 
143 #if defined(CONFIG_X86)
144 # define VM_PAT         VM_ARCH_1       /* PAT reserves whole VMA at once (x86) */
145 #elif defined(CONFIG_PPC)
146 # define VM_SAO         VM_ARCH_1       /* Strong Access Ordering (powerpc) */
147 #elif defined(CONFIG_PARISC)
148 # define VM_GROWSUP     VM_ARCH_1
149 #elif defined(CONFIG_METAG)
150 # define VM_GROWSUP     VM_ARCH_1
151 #elif defined(CONFIG_IA64)
152 # define VM_GROWSUP     VM_ARCH_1
153 #elif !defined(CONFIG_MMU)
154 # define VM_MAPPED_COPY VM_ARCH_1       /* T if mapped copy of data (nommu mmap) */
155 #endif
156 
157 #ifndef VM_GROWSUP
158 # define VM_GROWSUP     VM_NONE
159 #endif
160 
161 /* Bits set in the VMA until the stack is in its final location */
162 #define VM_STACK_INCOMPLETE_SETUP       (VM_RAND_READ | VM_SEQ_READ)
163 
164 #ifndef VM_STACK_DEFAULT_FLAGS          /* arch can override this */
165 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
166 #endif
167 
168 #ifdef CONFIG_STACK_GROWSUP
169 #define VM_STACK_FLAGS  (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
170 #else
171 #define VM_STACK_FLAGS  (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
172 #endif
173 
174 /*
175  * Special vmas that are non-mergable, non-mlock()able.
176  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
177  */
178 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
179 
180 /* This mask defines which mm->def_flags a process can inherit its parent */
181 #define VM_INIT_DEF_MASK        VM_NOHUGEPAGE
182 
183 /*
184  * mapping from the currently active vm_flags protection bits (the
185  * low four bits) to a page protection mask..
186  */
187 extern pgprot_t protection_map[16];
188 
189 #define FAULT_FLAG_WRITE        0x01    /* Fault was a write access */
190 #define FAULT_FLAG_NONLINEAR    0x02    /* Fault was via a nonlinear mapping */
191 #define FAULT_FLAG_MKWRITE      0x04    /* Fault was mkwrite of existing pte */
192 #define FAULT_FLAG_ALLOW_RETRY  0x08    /* Retry fault if blocking */
193 #define FAULT_FLAG_RETRY_NOWAIT 0x10    /* Don't drop mmap_sem and wait when retrying */
194 #define FAULT_FLAG_KILLABLE     0x20    /* The fault task is in SIGKILL killable region */
195 #define FAULT_FLAG_TRIED        0x40    /* second try */
196 #define FAULT_FLAG_USER         0x80    /* The fault originated in userspace */
197 
198 /*
199  * vm_fault is filled by the the pagefault handler and passed to the vma's
200  * ->fault function. The vma's ->fault is responsible for returning a bitmask
201  * of VM_FAULT_xxx flags that give details about how the fault was handled.
202  *
203  * pgoff should be used in favour of virtual_address, if possible. If pgoff
204  * is used, one may implement ->remap_pages to get nonlinear mapping support.
205  */
206 struct vm_fault {
207         unsigned int flags;             /* FAULT_FLAG_xxx flags */
208         pgoff_t pgoff;                  /* Logical page offset based on vma */
209         void __user *virtual_address;   /* Faulting virtual address */
210 
211         struct page *page;              /* ->fault handlers should return a
212                                          * page here, unless VM_FAULT_NOPAGE
213                                          * is set (which is also implied by
214                                          * VM_FAULT_ERROR).
215                                          */
216         /* for ->map_pages() only */
217         pgoff_t max_pgoff;              /* map pages for offset from pgoff till
218                                          * max_pgoff inclusive */
219         pte_t *pte;                     /* pte entry associated with ->pgoff */
220 };
221 
222 /*
223  * These are the virtual MM functions - opening of an area, closing and
224  * unmapping it (needed to keep files on disk up-to-date etc), pointer
225  * to the functions called when a no-page or a wp-page exception occurs. 
226  */
227 struct vm_operations_struct {
228         void (*open)(struct vm_area_struct * area);
229         void (*close)(struct vm_area_struct * area);
230         int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
231         void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
232 
233         /* notification that a previously read-only page is about to become
234          * writable, if an error is returned it will cause a SIGBUS */
235         int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
236 
237         /* called by access_process_vm when get_user_pages() fails, typically
238          * for use by special VMAs that can switch between memory and hardware
239          */
240         int (*access)(struct vm_area_struct *vma, unsigned long addr,
241                       void *buf, int len, int write);
242 #ifdef CONFIG_NUMA
243         /*
244          * set_policy() op must add a reference to any non-NULL @new mempolicy
245          * to hold the policy upon return.  Caller should pass NULL @new to
246          * remove a policy and fall back to surrounding context--i.e. do not
247          * install a MPOL_DEFAULT policy, nor the task or system default
248          * mempolicy.
249          */
250         int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
251 
252         /*
253          * get_policy() op must add reference [mpol_get()] to any policy at
254          * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
255          * in mm/mempolicy.c will do this automatically.
256          * get_policy() must NOT add a ref if the policy at (vma,addr) is not
257          * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
258          * If no [shared/vma] mempolicy exists at the addr, get_policy() op
259          * must return NULL--i.e., do not "fallback" to task or system default
260          * policy.
261          */
262         struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
263                                         unsigned long addr);
264         int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
265                 const nodemask_t *to, unsigned long flags);
266 #endif
267         /* called by sys_remap_file_pages() to populate non-linear mapping */
268         int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
269                            unsigned long size, pgoff_t pgoff);
270 };
271 
272 struct mmu_gather;
273 struct inode;
274 
275 #define page_private(page)              ((page)->private)
276 #define set_page_private(page, v)       ((page)->private = (v))
277 
278 /* It's valid only if the page is free path or free_list */
279 static inline void set_freepage_migratetype(struct page *page, int migratetype)
280 {
281         page->index = migratetype;
282 }
283 
284 /* It's valid only if the page is free path or free_list */
285 static inline int get_freepage_migratetype(struct page *page)
286 {
287         return page->index;
288 }
289 
290 /*
291  * FIXME: take this include out, include page-flags.h in
292  * files which need it (119 of them)
293  */
294 #include <linux/page-flags.h>
295 #include <linux/huge_mm.h>
296 
297 /*
298  * Methods to modify the page usage count.
299  *
300  * What counts for a page usage:
301  * - cache mapping   (page->mapping)
302  * - private data    (page->private)
303  * - page mapped in a task's page tables, each mapping
304  *   is counted separately
305  *
306  * Also, many kernel routines increase the page count before a critical
307  * routine so they can be sure the page doesn't go away from under them.
308  */
309 
310 /*
311  * Drop a ref, return true if the refcount fell to zero (the page has no users)
312  */
313 static inline int put_page_testzero(struct page *page)
314 {
315         VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
316         return atomic_dec_and_test(&page->_count);
317 }
318 
319 /*
320  * Try to grab a ref unless the page has a refcount of zero, return false if
321  * that is the case.
322  * This can be called when MMU is off so it must not access
323  * any of the virtual mappings.
324  */
325 static inline int get_page_unless_zero(struct page *page)
326 {
327         return atomic_inc_not_zero(&page->_count);
328 }
329 
330 /*
331  * Try to drop a ref unless the page has a refcount of one, return false if
332  * that is the case.
333  * This is to make sure that the refcount won't become zero after this drop.
334  * This can be called when MMU is off so it must not access
335  * any of the virtual mappings.
336  */
337 static inline int put_page_unless_one(struct page *page)
338 {
339         return atomic_add_unless(&page->_count, -1, 1);
340 }
341 
342 extern int page_is_ram(unsigned long pfn);
343 
344 /* Support for virtually mapped pages */
345 struct page *vmalloc_to_page(const void *addr);
346 unsigned long vmalloc_to_pfn(const void *addr);
347 
348 /*
349  * Determine if an address is within the vmalloc range
350  *
351  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
352  * is no special casing required.
353  */
354 static inline int is_vmalloc_addr(const void *x)
355 {
356 #ifdef CONFIG_MMU
357         unsigned long addr = (unsigned long)x;
358 
359         return addr >= VMALLOC_START && addr < VMALLOC_END;
360 #else
361         return 0;
362 #endif
363 }
364 #ifdef CONFIG_MMU
365 extern int is_vmalloc_or_module_addr(const void *x);
366 #else
367 static inline int is_vmalloc_or_module_addr(const void *x)
368 {
369         return 0;
370 }
371 #endif
372 
373 extern void kvfree(const void *addr);
374 
375 static inline void compound_lock(struct page *page)
376 {
377 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
378         VM_BUG_ON_PAGE(PageSlab(page), page);
379         bit_spin_lock(PG_compound_lock, &page->flags);
380 #endif
381 }
382 
383 static inline void compound_unlock(struct page *page)
384 {
385 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
386         VM_BUG_ON_PAGE(PageSlab(page), page);
387         bit_spin_unlock(PG_compound_lock, &page->flags);
388 #endif
389 }
390 
391 static inline unsigned long compound_lock_irqsave(struct page *page)
392 {
393         unsigned long uninitialized_var(flags);
394 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
395         local_irq_save(flags);
396         compound_lock(page);
397 #endif
398         return flags;
399 }
400 
401 static inline void compound_unlock_irqrestore(struct page *page,
402                                               unsigned long flags)
403 {
404 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
405         compound_unlock(page);
406         local_irq_restore(flags);
407 #endif
408 }
409 
410 static inline struct page *compound_head(struct page *page)
411 {
412         if (unlikely(PageTail(page))) {
413                 struct page *head = page->first_page;
414 
415                 /*
416                  * page->first_page may be a dangling pointer to an old
417                  * compound page, so recheck that it is still a tail
418                  * page before returning.
419                  */
420                 smp_rmb();
421                 if (likely(PageTail(page)))
422                         return head;
423         }
424         return page;
425 }
426 
427 /*
428  * The atomic page->_mapcount, starts from -1: so that transitions
429  * both from it and to it can be tracked, using atomic_inc_and_test
430  * and atomic_add_negative(-1).
431  */
432 static inline void page_mapcount_reset(struct page *page)
433 {
434         atomic_set(&(page)->_mapcount, -1);
435 }
436 
437 static inline int page_mapcount(struct page *page)
438 {
439         return atomic_read(&(page)->_mapcount) + 1;
440 }
441 
442 static inline int page_count(struct page *page)
443 {
444         return atomic_read(&compound_head(page)->_count);
445 }
446 
447 #ifdef CONFIG_HUGETLB_PAGE
448 extern int PageHeadHuge(struct page *page_head);
449 #else /* CONFIG_HUGETLB_PAGE */
450 static inline int PageHeadHuge(struct page *page_head)
451 {
452         return 0;
453 }
454 #endif /* CONFIG_HUGETLB_PAGE */
455 
456 static inline bool __compound_tail_refcounted(struct page *page)
457 {
458         return !PageSlab(page) && !PageHeadHuge(page);
459 }
460 
461 /*
462  * This takes a head page as parameter and tells if the
463  * tail page reference counting can be skipped.
464  *
465  * For this to be safe, PageSlab and PageHeadHuge must remain true on
466  * any given page where they return true here, until all tail pins
467  * have been released.
468  */
469 static inline bool compound_tail_refcounted(struct page *page)
470 {
471         VM_BUG_ON_PAGE(!PageHead(page), page);
472         return __compound_tail_refcounted(page);
473 }
474 
475 static inline void get_huge_page_tail(struct page *page)
476 {
477         /*
478          * __split_huge_page_refcount() cannot run from under us.
479          */
480         VM_BUG_ON_PAGE(!PageTail(page), page);
481         VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
482         VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
483         if (compound_tail_refcounted(page->first_page))
484                 atomic_inc(&page->_mapcount);
485 }
486 
487 extern bool __get_page_tail(struct page *page);
488 
489 static inline void get_page(struct page *page)
490 {
491         if (unlikely(PageTail(page)))
492                 if (likely(__get_page_tail(page)))
493                         return;
494         /*
495          * Getting a normal page or the head of a compound page
496          * requires to already have an elevated page->_count.
497          */
498         VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
499         atomic_inc(&page->_count);
500 }
501 
502 static inline struct page *virt_to_head_page(const void *x)
503 {
504         struct page *page = virt_to_page(x);
505         return compound_head(page);
506 }
507 
508 /*
509  * Setup the page count before being freed into the page allocator for
510  * the first time (boot or memory hotplug)
511  */
512 static inline void init_page_count(struct page *page)
513 {
514         atomic_set(&page->_count, 1);
515 }
516 
517 /*
518  * PageBuddy() indicate that the page is free and in the buddy system
519  * (see mm/page_alloc.c).
520  *
521  * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
522  * -2 so that an underflow of the page_mapcount() won't be mistaken
523  * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
524  * efficiently by most CPU architectures.
525  */
526 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
527 
528 static inline int PageBuddy(struct page *page)
529 {
530         return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
531 }
532 
533 static inline void __SetPageBuddy(struct page *page)
534 {
535         VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
536         atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
537 }
538 
539 static inline void __ClearPageBuddy(struct page *page)
540 {
541         VM_BUG_ON_PAGE(!PageBuddy(page), page);
542         atomic_set(&page->_mapcount, -1);
543 }
544 
545 void put_page(struct page *page);
546 void put_pages_list(struct list_head *pages);
547 
548 void split_page(struct page *page, unsigned int order);
549 int split_free_page(struct page *page);
550 
551 /*
552  * Compound pages have a destructor function.  Provide a
553  * prototype for that function and accessor functions.
554  * These are _only_ valid on the head of a PG_compound page.
555  */
556 typedef void compound_page_dtor(struct page *);
557 
558 static inline void set_compound_page_dtor(struct page *page,
559                                                 compound_page_dtor *dtor)
560 {
561         page[1].lru.next = (void *)dtor;
562 }
563 
564 static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
565 {
566         return (compound_page_dtor *)page[1].lru.next;
567 }
568 
569 static inline int compound_order(struct page *page)
570 {
571         if (!PageHead(page))
572                 return 0;
573         return (unsigned long)page[1].lru.prev;
574 }
575 
576 static inline void set_compound_order(struct page *page, unsigned long order)
577 {
578         page[1].lru.prev = (void *)order;
579 }
580 
581 #ifdef CONFIG_MMU
582 /*
583  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
584  * servicing faults for write access.  In the normal case, do always want
585  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
586  * that do not have writing enabled, when used by access_process_vm.
587  */
588 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
589 {
590         if (likely(vma->vm_flags & VM_WRITE))
591                 pte = pte_mkwrite(pte);
592         return pte;
593 }
594 
595 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
596                 struct page *page, pte_t *pte, bool write, bool anon);
597 #endif
598 
599 /*
600  * Multiple processes may "see" the same page. E.g. for untouched
601  * mappings of /dev/null, all processes see the same page full of
602  * zeroes, and text pages of executables and shared libraries have
603  * only one copy in memory, at most, normally.
604  *
605  * For the non-reserved pages, page_count(page) denotes a reference count.
606  *   page_count() == 0 means the page is free. page->lru is then used for
607  *   freelist management in the buddy allocator.
608  *   page_count() > 0  means the page has been allocated.
609  *
610  * Pages are allocated by the slab allocator in order to provide memory
611  * to kmalloc and kmem_cache_alloc. In this case, the management of the
612  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
613  * unless a particular usage is carefully commented. (the responsibility of
614  * freeing the kmalloc memory is the caller's, of course).
615  *
616  * A page may be used by anyone else who does a __get_free_page().
617  * In this case, page_count still tracks the references, and should only
618  * be used through the normal accessor functions. The top bits of page->flags
619  * and page->virtual store page management information, but all other fields
620  * are unused and could be used privately, carefully. The management of this
621  * page is the responsibility of the one who allocated it, and those who have
622  * subsequently been given references to it.
623  *
624  * The other pages (we may call them "pagecache pages") are completely
625  * managed by the Linux memory manager: I/O, buffers, swapping etc.
626  * The following discussion applies only to them.
627  *
628  * A pagecache page contains an opaque `private' member, which belongs to the
629  * page's address_space. Usually, this is the address of a circular list of
630  * the page's disk buffers. PG_private must be set to tell the VM to call
631  * into the filesystem to release these pages.
632  *
633  * A page may belong to an inode's memory mapping. In this case, page->mapping
634  * is the pointer to the inode, and page->index is the file offset of the page,
635  * in units of PAGE_CACHE_SIZE.
636  *
637  * If pagecache pages are not associated with an inode, they are said to be
638  * anonymous pages. These may become associated with the swapcache, and in that
639  * case PG_swapcache is set, and page->private is an offset into the swapcache.
640  *
641  * In either case (swapcache or inode backed), the pagecache itself holds one
642  * reference to the page. Setting PG_private should also increment the
643  * refcount. The each user mapping also has a reference to the page.
644  *
645  * The pagecache pages are stored in a per-mapping radix tree, which is
646  * rooted at mapping->page_tree, and indexed by offset.
647  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
648  * lists, we instead now tag pages as dirty/writeback in the radix tree.
649  *
650  * All pagecache pages may be subject to I/O:
651  * - inode pages may need to be read from disk,
652  * - inode pages which have been modified and are MAP_SHARED may need
653  *   to be written back to the inode on disk,
654  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
655  *   modified may need to be swapped out to swap space and (later) to be read
656  *   back into memory.
657  */
658 
659 /*
660  * The zone field is never updated after free_area_init_core()
661  * sets it, so none of the operations on it need to be atomic.
662  */
663 
664 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
665 #define SECTIONS_PGOFF          ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
666 #define NODES_PGOFF             (SECTIONS_PGOFF - NODES_WIDTH)
667 #define ZONES_PGOFF             (NODES_PGOFF - ZONES_WIDTH)
668 #define LAST_CPUPID_PGOFF       (ZONES_PGOFF - LAST_CPUPID_WIDTH)
669 
670 /*
671  * Define the bit shifts to access each section.  For non-existent
672  * sections we define the shift as 0; that plus a 0 mask ensures
673  * the compiler will optimise away reference to them.
674  */
675 #define SECTIONS_PGSHIFT        (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
676 #define NODES_PGSHIFT           (NODES_PGOFF * (NODES_WIDTH != 0))
677 #define ZONES_PGSHIFT           (ZONES_PGOFF * (ZONES_WIDTH != 0))
678 #define LAST_CPUPID_PGSHIFT     (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
679 
680 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
681 #ifdef NODE_NOT_IN_PAGE_FLAGS
682 #define ZONEID_SHIFT            (SECTIONS_SHIFT + ZONES_SHIFT)
683 #define ZONEID_PGOFF            ((SECTIONS_PGOFF < ZONES_PGOFF)? \
684                                                 SECTIONS_PGOFF : ZONES_PGOFF)
685 #else
686 #define ZONEID_SHIFT            (NODES_SHIFT + ZONES_SHIFT)
687 #define ZONEID_PGOFF            ((NODES_PGOFF < ZONES_PGOFF)? \
688                                                 NODES_PGOFF : ZONES_PGOFF)
689 #endif
690 
691 #define ZONEID_PGSHIFT          (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
692 
693 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
694 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
695 #endif
696 
697 #define ZONES_MASK              ((1UL << ZONES_WIDTH) - 1)
698 #define NODES_MASK              ((1UL << NODES_WIDTH) - 1)
699 #define SECTIONS_MASK           ((1UL << SECTIONS_WIDTH) - 1)
700 #define LAST_CPUPID_MASK        ((1UL << LAST_CPUPID_SHIFT) - 1)
701 #define ZONEID_MASK             ((1UL << ZONEID_SHIFT) - 1)
702 
703 static inline enum zone_type page_zonenum(const struct page *page)
704 {
705         return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
706 }
707 
708 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
709 #define SECTION_IN_PAGE_FLAGS
710 #endif
711 
712 /*
713  * The identification function is mainly used by the buddy allocator for
714  * determining if two pages could be buddies. We are not really identifying
715  * the zone since we could be using the section number id if we do not have
716  * node id available in page flags.
717  * We only guarantee that it will return the same value for two combinable
718  * pages in a zone.
719  */
720 static inline int page_zone_id(struct page *page)
721 {
722         return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
723 }
724 
725 static inline int zone_to_nid(struct zone *zone)
726 {
727 #ifdef CONFIG_NUMA
728         return zone->node;
729 #else
730         return 0;
731 #endif
732 }
733 
734 #ifdef NODE_NOT_IN_PAGE_FLAGS
735 extern int page_to_nid(const struct page *page);
736 #else
737 static inline int page_to_nid(const struct page *page)
738 {
739         return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
740 }
741 #endif
742 
743 #ifdef CONFIG_NUMA_BALANCING
744 static inline int cpu_pid_to_cpupid(int cpu, int pid)
745 {
746         return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
747 }
748 
749 static inline int cpupid_to_pid(int cpupid)
750 {
751         return cpupid & LAST__PID_MASK;
752 }
753 
754 static inline int cpupid_to_cpu(int cpupid)
755 {
756         return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
757 }
758 
759 static inline int cpupid_to_nid(int cpupid)
760 {
761         return cpu_to_node(cpupid_to_cpu(cpupid));
762 }
763 
764 static inline bool cpupid_pid_unset(int cpupid)
765 {
766         return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
767 }
768 
769 static inline bool cpupid_cpu_unset(int cpupid)
770 {
771         return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
772 }
773 
774 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
775 {
776         return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
777 }
778 
779 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
780 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
781 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
782 {
783         return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
784 }
785 
786 static inline int page_cpupid_last(struct page *page)
787 {
788         return page->_last_cpupid;
789 }
790 static inline void page_cpupid_reset_last(struct page *page)
791 {
792         page->_last_cpupid = -1 & LAST_CPUPID_MASK;
793 }
794 #else
795 static inline int page_cpupid_last(struct page *page)
796 {
797         return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
798 }
799 
800 extern int page_cpupid_xchg_last(struct page *page, int cpupid);
801 
802 static inline void page_cpupid_reset_last(struct page *page)
803 {
804         int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
805 
806         page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
807         page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
808 }
809 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
810 #else /* !CONFIG_NUMA_BALANCING */
811 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
812 {
813         return page_to_nid(page); /* XXX */
814 }
815 
816 static inline int page_cpupid_last(struct page *page)
817 {
818         return page_to_nid(page); /* XXX */
819 }
820 
821 static inline int cpupid_to_nid(int cpupid)
822 {
823         return -1;
824 }
825 
826 static inline int cpupid_to_pid(int cpupid)
827 {
828         return -1;
829 }
830 
831 static inline int cpupid_to_cpu(int cpupid)
832 {
833         return -1;
834 }
835 
836 static inline int cpu_pid_to_cpupid(int nid, int pid)
837 {
838         return -1;
839 }
840 
841 static inline bool cpupid_pid_unset(int cpupid)
842 {
843         return 1;
844 }
845 
846 static inline void page_cpupid_reset_last(struct page *page)
847 {
848 }
849 
850 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
851 {
852         return false;
853 }
854 #endif /* CONFIG_NUMA_BALANCING */
855 
856 static inline struct zone *page_zone(const struct page *page)
857 {
858         return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
859 }
860 
861 #ifdef SECTION_IN_PAGE_FLAGS
862 static inline void set_page_section(struct page *page, unsigned long section)
863 {
864         page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
865         page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
866 }
867 
868 static inline unsigned long page_to_section(const struct page *page)
869 {
870         return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
871 }
872 #endif
873 
874 static inline void set_page_zone(struct page *page, enum zone_type zone)
875 {
876         page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
877         page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
878 }
879 
880 static inline void set_page_node(struct page *page, unsigned long node)
881 {
882         page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
883         page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
884 }
885 
886 static inline void set_page_links(struct page *page, enum zone_type zone,
887         unsigned long node, unsigned long pfn)
888 {
889         set_page_zone(page, zone);
890         set_page_node(page, node);
891 #ifdef SECTION_IN_PAGE_FLAGS
892         set_page_section(page, pfn_to_section_nr(pfn));
893 #endif
894 }
895 
896 /*
897  * Some inline functions in vmstat.h depend on page_zone()
898  */
899 #include <linux/vmstat.h>
900 
901 static __always_inline void *lowmem_page_address(const struct page *page)
902 {
903         return __va(PFN_PHYS(page_to_pfn(page)));
904 }
905 
906 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
907 #define HASHED_PAGE_VIRTUAL
908 #endif
909 
910 #if defined(WANT_PAGE_VIRTUAL)
911 static inline void *page_address(const struct page *page)
912 {
913         return page->virtual;
914 }
915 static inline void set_page_address(struct page *page, void *address)
916 {
917         page->virtual = address;
918 }
919 #define page_address_init()  do { } while(0)
920 #endif
921 
922 #if defined(HASHED_PAGE_VIRTUAL)
923 void *page_address(const struct page *page);
924 void set_page_address(struct page *page, void *virtual);
925 void page_address_init(void);
926 #endif
927 
928 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
929 #define page_address(page) lowmem_page_address(page)
930 #define set_page_address(page, address)  do { } while(0)
931 #define page_address_init()  do { } while(0)
932 #endif
933 
934 /*
935  * On an anonymous page mapped into a user virtual memory area,
936  * page->mapping points to its anon_vma, not to a struct address_space;
937  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
938  *
939  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
940  * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
941  * and then page->mapping points, not to an anon_vma, but to a private
942  * structure which KSM associates with that merged page.  See ksm.h.
943  *
944  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
945  *
946  * Please note that, confusingly, "page_mapping" refers to the inode
947  * address_space which maps the page from disk; whereas "page_mapped"
948  * refers to user virtual address space into which the page is mapped.
949  */
950 #define PAGE_MAPPING_ANON       1
951 #define PAGE_MAPPING_KSM        2
952 #define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
953 
954 extern struct address_space *page_mapping(struct page *page);
955 
956 /* Neutral page->mapping pointer to address_space or anon_vma or other */
957 static inline void *page_rmapping(struct page *page)
958 {
959         return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
960 }
961 
962 extern struct address_space *__page_file_mapping(struct page *);
963 
964 static inline
965 struct address_space *page_file_mapping(struct page *page)
966 {
967         if (unlikely(PageSwapCache(page)))
968                 return __page_file_mapping(page);
969 
970         return page->mapping;
971 }
972 
973 static inline int PageAnon(struct page *page)
974 {
975         return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
976 }
977 
978 /*
979  * Return the pagecache index of the passed page.  Regular pagecache pages
980  * use ->index whereas swapcache pages use ->private
981  */
982 static inline pgoff_t page_index(struct page *page)
983 {
984         if (unlikely(PageSwapCache(page)))
985                 return page_private(page);
986         return page->index;
987 }
988 
989 extern pgoff_t __page_file_index(struct page *page);
990 
991 /*
992  * Return the file index of the page. Regular pagecache pages use ->index
993  * whereas swapcache pages use swp_offset(->private)
994  */
995 static inline pgoff_t page_file_index(struct page *page)
996 {
997         if (unlikely(PageSwapCache(page)))
998                 return __page_file_index(page);
999 
1000         return page->index;
1001 }
1002 
1003 /*
1004  * Return true if this page is mapped into pagetables.
1005  */
1006 static inline int page_mapped(struct page *page)
1007 {
1008         return atomic_read(&(page)->_mapcount) >= 0;
1009 }
1010 
1011 /*
1012  * Different kinds of faults, as returned by handle_mm_fault().
1013  * Used to decide whether a process gets delivered SIGBUS or
1014  * just gets major/minor fault counters bumped up.
1015  */
1016 
1017 #define VM_FAULT_MINOR  0 /* For backwards compat. Remove me quickly. */
1018 
1019 #define VM_FAULT_OOM    0x0001
1020 #define VM_FAULT_SIGBUS 0x0002
1021 #define VM_FAULT_MAJOR  0x0004
1022 #define VM_FAULT_WRITE  0x0008  /* Special case for get_user_pages */
1023 #define VM_FAULT_HWPOISON 0x0010        /* Hit poisoned small page */
1024 #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
1025 
1026 #define VM_FAULT_NOPAGE 0x0100  /* ->fault installed the pte, not return page */
1027 #define VM_FAULT_LOCKED 0x0200  /* ->fault locked the returned page */
1028 #define VM_FAULT_RETRY  0x0400  /* ->fault blocked, must retry */
1029 #define VM_FAULT_FALLBACK 0x0800        /* huge page fault failed, fall back to small */
1030 
1031 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1032 
1033 #define VM_FAULT_ERROR  (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
1034                          VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
1035 
1036 /* Encode hstate index for a hwpoisoned large page */
1037 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1038 #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1039 
1040 /*
1041  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1042  */
1043 extern void pagefault_out_of_memory(void);
1044 
1045 #define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
1046 
1047 /*
1048  * Flags passed to show_mem() and show_free_areas() to suppress output in
1049  * various contexts.
1050  */
1051 #define SHOW_MEM_FILTER_NODES           (0x0001u)       /* disallowed nodes */
1052 
1053 extern void show_free_areas(unsigned int flags);
1054 extern bool skip_free_areas_node(unsigned int flags, int nid);
1055 
1056 int shmem_zero_setup(struct vm_area_struct *);
1057 #ifdef CONFIG_SHMEM
1058 bool shmem_mapping(struct address_space *mapping);
1059 #else
1060 static inline bool shmem_mapping(struct address_space *mapping)
1061 {
1062         return false;
1063 }
1064 #endif
1065 
1066 extern int can_do_mlock(void);
1067 extern int user_shm_lock(size_t, struct user_struct *);
1068 extern void user_shm_unlock(size_t, struct user_struct *);
1069 
1070 /*
1071  * Parameter block passed down to zap_pte_range in exceptional cases.
1072  */
1073 struct zap_details {
1074         struct vm_area_struct *nonlinear_vma;   /* Check page->index if set */
1075         struct address_space *check_mapping;    /* Check page->mapping if set */
1076         pgoff_t first_index;                    /* Lowest page->index to unmap */
1077         pgoff_t last_index;                     /* Highest page->index to unmap */
1078 };
1079 
1080 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1081                 pte_t pte);
1082 
1083 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1084                 unsigned long size);
1085 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1086                 unsigned long size, struct zap_details *);
1087 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1088                 unsigned long start, unsigned long end);
1089 
1090 /**
1091  * mm_walk - callbacks for walk_page_range
1092  * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
1093  * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
1094  * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1095  *             this handler is required to be able to handle
1096  *             pmd_trans_huge() pmds.  They may simply choose to
1097  *             split_huge_page() instead of handling it explicitly.
1098  * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1099  * @pte_hole: if set, called for each hole at all levels
1100  * @hugetlb_entry: if set, called for each hugetlb entry
1101  *                 *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
1102  *                            is used.
1103  *
1104  * (see walk_page_range for more details)
1105  */
1106 struct mm_walk {
1107         int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
1108                          unsigned long next, struct mm_walk *walk);
1109         int (*pud_entry)(pud_t *pud, unsigned long addr,
1110                          unsigned long next, struct mm_walk *walk);
1111         int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1112                          unsigned long next, struct mm_walk *walk);
1113         int (*pte_entry)(pte_t *pte, unsigned long addr,
1114                          unsigned long next, struct mm_walk *walk);
1115         int (*pte_hole)(unsigned long addr, unsigned long next,
1116                         struct mm_walk *walk);
1117         int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1118                              unsigned long addr, unsigned long next,
1119                              struct mm_walk *walk);
1120         struct mm_struct *mm;
1121         void *private;
1122 };
1123 
1124 int walk_page_range(unsigned long addr, unsigned long end,
1125                 struct mm_walk *walk);
1126 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1127                 unsigned long end, unsigned long floor, unsigned long ceiling);
1128 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1129                         struct vm_area_struct *vma);
1130 void unmap_mapping_range(struct address_space *mapping,
1131                 loff_t const holebegin, loff_t const holelen, int even_cows);
1132 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1133         unsigned long *pfn);
1134 int follow_phys(struct vm_area_struct *vma, unsigned long address,
1135                 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1136 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1137                         void *buf, int len, int write);
1138 
1139 static inline void unmap_shared_mapping_range(struct address_space *mapping,
1140                 loff_t const holebegin, loff_t const holelen)
1141 {
1142         unmap_mapping_range(mapping, holebegin, holelen, 0);
1143 }
1144 
1145 extern void truncate_pagecache(struct inode *inode, loff_t new);
1146 extern void truncate_setsize(struct inode *inode, loff_t newsize);
1147 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1148 int truncate_inode_page(struct address_space *mapping, struct page *page);
1149 int generic_error_remove_page(struct address_space *mapping, struct page *page);
1150 int invalidate_inode_page(struct page *page);
1151 
1152 #ifdef CONFIG_MMU
1153 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1154                         unsigned long address, unsigned int flags);
1155 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1156                             unsigned long address, unsigned int fault_flags);
1157 #else
1158 static inline int handle_mm_fault(struct mm_struct *mm,
1159                         struct vm_area_struct *vma, unsigned long address,
1160                         unsigned int flags)
1161 {
1162         /* should never happen if there's no MMU */
1163         BUG();
1164         return VM_FAULT_SIGBUS;
1165 }
1166 static inline int fixup_user_fault(struct task_struct *tsk,
1167                 struct mm_struct *mm, unsigned long address,
1168                 unsigned int fault_flags)
1169 {
1170         /* should never happen if there's no MMU */
1171         BUG();
1172         return -EFAULT;
1173 }
1174 #endif
1175 
1176 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1177 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1178                 void *buf, int len, int write);
1179 
1180 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1181                       unsigned long start, unsigned long nr_pages,
1182                       unsigned int foll_flags, struct page **pages,
1183                       struct vm_area_struct **vmas, int *nonblocking);
1184 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1185                     unsigned long start, unsigned long nr_pages,
1186                     int write, int force, struct page **pages,
1187                     struct vm_area_struct **vmas);
1188 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1189                         struct page **pages);
1190 struct kvec;
1191 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1192                         struct page **pages);
1193 int get_kernel_page(unsigned long start, int write, struct page **pages);
1194 struct page *get_dump_page(unsigned long addr);
1195 
1196 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1197 extern void do_invalidatepage(struct page *page, unsigned int offset,
1198                               unsigned int length);
1199 
1200 int __set_page_dirty_nobuffers(struct page *page);
1201 int __set_page_dirty_no_writeback(struct page *page);
1202 int redirty_page_for_writepage(struct writeback_control *wbc,
1203                                 struct page *page);
1204 void account_page_dirtied(struct page *page, struct address_space *mapping);
1205 void account_page_writeback(struct page *page);
1206 int set_page_dirty(struct page *page);
1207 int set_page_dirty_lock(struct page *page);
1208 int clear_page_dirty_for_io(struct page *page);
1209 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1210 
1211 /* Is the vma a continuation of the stack vma above it? */
1212 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1213 {
1214         return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1215 }
1216 
1217 static inline int stack_guard_page_start(struct vm_area_struct *vma,
1218                                              unsigned long addr)
1219 {
1220         return (vma->vm_flags & VM_GROWSDOWN) &&
1221                 (vma->vm_start == addr) &&
1222                 !vma_growsdown(vma->vm_prev, addr);
1223 }
1224 
1225 /* Is the vma a continuation of the stack vma below it? */
1226 static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1227 {
1228         return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1229 }
1230 
1231 static inline int stack_guard_page_end(struct vm_area_struct *vma,
1232                                            unsigned long addr)
1233 {
1234         return (vma->vm_flags & VM_GROWSUP) &&
1235                 (vma->vm_end == addr) &&
1236                 !vma_growsup(vma->vm_next, addr);
1237 }
1238 
1239 extern pid_t
1240 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1241 
1242 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1243                 unsigned long old_addr, struct vm_area_struct *new_vma,
1244                 unsigned long new_addr, unsigned long len,
1245                 bool need_rmap_locks);
1246 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1247                               unsigned long end, pgprot_t newprot,
1248                               int dirty_accountable, int prot_numa);
1249 extern int mprotect_fixup(struct vm_area_struct *vma,
1250                           struct vm_area_struct **pprev, unsigned long start,
1251                           unsigned long end, unsigned long newflags);
1252 
1253 /*
1254  * doesn't attempt to fault and will return short.
1255  */
1256 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1257                           struct page **pages);
1258 /*
1259  * per-process(per-mm_struct) statistics.
1260  */
1261 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1262 {
1263         long val = atomic_long_read(&mm->rss_stat.count[member]);
1264 
1265 #ifdef SPLIT_RSS_COUNTING
1266         /*
1267          * counter is updated in asynchronous manner and may go to minus.
1268          * But it's never be expected number for users.
1269          */
1270         if (val < 0)
1271                 val = 0;
1272 #endif
1273         return (unsigned long)val;
1274 }
1275 
1276 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1277 {
1278         atomic_long_add(value, &mm->rss_stat.count[member]);
1279 }
1280 
1281 static inline void inc_mm_counter(struct mm_struct *mm, int member)
1282 {
1283         atomic_long_inc(&mm->rss_stat.count[member]);
1284 }
1285 
1286 static inline void dec_mm_counter(struct mm_struct *mm, int member)
1287 {
1288         atomic_long_dec(&mm->rss_stat.count[member]);
1289 }
1290 
1291 static inline unsigned long get_mm_rss(struct mm_struct *mm)
1292 {
1293         return get_mm_counter(mm, MM_FILEPAGES) +
1294                 get_mm_counter(mm, MM_ANONPAGES);
1295 }
1296 
1297 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1298 {
1299         return max(mm->hiwater_rss, get_mm_rss(mm));
1300 }
1301 
1302 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1303 {
1304         return max(mm->hiwater_vm, mm->total_vm);
1305 }
1306 
1307 static inline void update_hiwater_rss(struct mm_struct *mm)
1308 {
1309         unsigned long _rss = get_mm_rss(mm);
1310 
1311         if ((mm)->hiwater_rss < _rss)
1312                 (mm)->hiwater_rss = _rss;
1313 }
1314 
1315 static inline void update_hiwater_vm(struct mm_struct *mm)
1316 {
1317         if (mm->hiwater_vm < mm->total_vm)
1318                 mm->hiwater_vm = mm->total_vm;
1319 }
1320 
1321 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1322                                          struct mm_struct *mm)
1323 {
1324         unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1325 
1326         if (*maxrss < hiwater_rss)
1327                 *maxrss = hiwater_rss;
1328 }
1329 
1330 #if defined(SPLIT_RSS_COUNTING)
1331 void sync_mm_rss(struct mm_struct *mm);
1332 #else
1333 static inline void sync_mm_rss(struct mm_struct *mm)
1334 {
1335 }
1336 #endif
1337 
1338 int vma_wants_writenotify(struct vm_area_struct *vma);
1339 
1340 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1341                                spinlock_t **ptl);
1342 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1343                                     spinlock_t **ptl)
1344 {
1345         pte_t *ptep;
1346         __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1347         return ptep;
1348 }
1349 
1350 #ifdef __PAGETABLE_PUD_FOLDED
1351 static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1352                                                 unsigned long address)
1353 {
1354         return 0;
1355 }
1356 #else
1357 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1358 #endif
1359 
1360 #ifdef __PAGETABLE_PMD_FOLDED
1361 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1362                                                 unsigned long address)
1363 {
1364         return 0;
1365 }
1366 #else
1367 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1368 #endif
1369 
1370 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1371                 pmd_t *pmd, unsigned long address);
1372 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1373 
1374 /*
1375  * The following ifdef needed to get the 4level-fixup.h header to work.
1376  * Remove it when 4level-fixup.h has been removed.
1377  */
1378 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1379 static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1380 {
1381         return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1382                 NULL: pud_offset(pgd, address);
1383 }
1384 
1385 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1386 {
1387         return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1388                 NULL: pmd_offset(pud, address);
1389 }
1390 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1391 
1392 #if USE_SPLIT_PTE_PTLOCKS
1393 #if ALLOC_SPLIT_PTLOCKS
1394 void __init ptlock_cache_init(void);
1395 extern bool ptlock_alloc(struct page *page);
1396 extern void ptlock_free(struct page *page);
1397 
1398 static inline spinlock_t *ptlock_ptr(struct page *page)
1399 {
1400         return page->ptl;
1401 }
1402 #else /* ALLOC_SPLIT_PTLOCKS */
1403 static inline void ptlock_cache_init(void)
1404 {
1405 }
1406 
1407 static inline bool ptlock_alloc(struct page *page)
1408 {
1409         return true;
1410 }
1411 
1412 static inline void ptlock_free(struct page *page)
1413 {
1414 }
1415 
1416 static inline spinlock_t *ptlock_ptr(struct page *page)
1417 {
1418         return &page->ptl;
1419 }
1420 #endif /* ALLOC_SPLIT_PTLOCKS */
1421 
1422 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1423 {
1424         return ptlock_ptr(pmd_page(*pmd));
1425 }
1426 
1427 static inline bool ptlock_init(struct page *page)
1428 {
1429         /*
1430          * prep_new_page() initialize page->private (and therefore page->ptl)
1431          * with 0. Make sure nobody took it in use in between.
1432          *
1433          * It can happen if arch try to use slab for page table allocation:
1434          * slab code uses page->slab_cache and page->first_page (for tail
1435          * pages), which share storage with page->ptl.
1436          */
1437         VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1438         if (!ptlock_alloc(page))
1439                 return false;
1440         spin_lock_init(ptlock_ptr(page));
1441         return true;
1442 }
1443 
1444 /* Reset page->mapping so free_pages_check won't complain. */
1445 static inline void pte_lock_deinit(struct page *page)
1446 {
1447         page->mapping = NULL;
1448         ptlock_free(page);
1449 }
1450 
1451 #else   /* !USE_SPLIT_PTE_PTLOCKS */
1452 /*
1453  * We use mm->page_table_lock to guard all pagetable pages of the mm.
1454  */
1455 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1456 {
1457         return &mm->page_table_lock;
1458 }
1459 static inline void ptlock_cache_init(void) {}
1460 static inline bool ptlock_init(struct page *page) { return true; }
1461 static inline void pte_lock_deinit(struct page *page) {}
1462 #endif /* USE_SPLIT_PTE_PTLOCKS */
1463 
1464 static inline void pgtable_init(void)
1465 {
1466         ptlock_cache_init();
1467         pgtable_cache_init();
1468 }
1469 
1470 static inline bool pgtable_page_ctor(struct page *page)
1471 {
1472         inc_zone_page_state(page, NR_PAGETABLE);
1473         return ptlock_init(page);
1474 }
1475 
1476 static inline void pgtable_page_dtor(struct page *page)
1477 {
1478         pte_lock_deinit(page);
1479         dec_zone_page_state(page, NR_PAGETABLE);
1480 }
1481 
1482 #define pte_offset_map_lock(mm, pmd, address, ptlp)     \
1483 ({                                                      \
1484         spinlock_t *__ptl = pte_lockptr(mm, pmd);       \
1485         pte_t *__pte = pte_offset_map(pmd, address);    \
1486         *(ptlp) = __ptl;                                \
1487         spin_lock(__ptl);                               \
1488         __pte;                                          \
1489 })
1490 
1491 #define pte_unmap_unlock(pte, ptl)      do {            \
1492         spin_unlock(ptl);                               \
1493         pte_unmap(pte);                                 \
1494 } while (0)
1495 
1496 #define pte_alloc_map(mm, vma, pmd, address)                            \
1497         ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,    \
1498                                                         pmd, address))? \
1499          NULL: pte_offset_map(pmd, address))
1500 
1501 #define pte_alloc_map_lock(mm, pmd, address, ptlp)      \
1502         ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,   \
1503                                                         pmd, address))? \
1504                 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1505 
1506 #define pte_alloc_kernel(pmd, address)                  \
1507         ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1508                 NULL: pte_offset_kernel(pmd, address))
1509 
1510 #if USE_SPLIT_PMD_PTLOCKS
1511 
1512 static struct page *pmd_to_page(pmd_t *pmd)
1513 {
1514         unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1515         return virt_to_page((void *)((unsigned long) pmd & mask));
1516 }
1517 
1518 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1519 {
1520         return ptlock_ptr(pmd_to_page(pmd));
1521 }
1522 
1523 static inline bool pgtable_pmd_page_ctor(struct page *page)
1524 {
1525 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1526         page->pmd_huge_pte = NULL;
1527 #endif
1528         return ptlock_init(page);
1529 }
1530 
1531 static inline void pgtable_pmd_page_dtor(struct page *page)
1532 {
1533 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1534         VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1535 #endif
1536         ptlock_free(page);
1537 }
1538 
1539 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1540 
1541 #else
1542 
1543 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1544 {
1545         return &mm->page_table_lock;
1546 }
1547 
1548 static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1549 static inline void pgtable_pmd_page_dtor(struct page *page) {}
1550 
1551 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1552 
1553 #endif
1554 
1555 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1556 {
1557         spinlock_t *ptl = pmd_lockptr(mm, pmd);
1558         spin_lock(ptl);
1559         return ptl;
1560 }
1561 
1562 extern void free_area_init(unsigned long * zones_size);
1563 extern void free_area_init_node(int nid, unsigned long * zones_size,
1564                 unsigned long zone_start_pfn, unsigned long *zholes_size);
1565 extern void free_initmem(void);
1566 
1567 /*
1568  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1569  * into the buddy system. The freed pages will be poisoned with pattern
1570  * "poison" if it's within range [0, UCHAR_MAX].
1571  * Return pages freed into the buddy system.
1572  */
1573 extern unsigned long free_reserved_area(void *start, void *end,
1574                                         int poison, char *s);
1575 
1576 #ifdef  CONFIG_HIGHMEM
1577 /*
1578  * Free a highmem page into the buddy system, adjusting totalhigh_pages
1579  * and totalram_pages.
1580  */
1581 extern void free_highmem_page(struct page *page);
1582 #endif
1583 
1584 extern void adjust_managed_page_count(struct page *page, long count);
1585 extern void mem_init_print_info(const char *str);
1586 
1587 /* Free the reserved page into the buddy system, so it gets managed. */
1588 static inline void __free_reserved_page(struct page *page)
1589 {
1590         ClearPageReserved(page);
1591         init_page_count(page);
1592         __free_page(page);
1593 }
1594 
1595 static inline void free_reserved_page(struct page *page)
1596 {
1597         __free_reserved_page(page);
1598         adjust_managed_page_count(page, 1);
1599 }
1600 
1601 static inline void mark_page_reserved(struct page *page)
1602 {
1603         SetPageReserved(page);
1604         adjust_managed_page_count(page, -1);
1605 }
1606 
1607 /*
1608  * Default method to free all the __init memory into the buddy system.
1609  * The freed pages will be poisoned with pattern "poison" if it's within
1610  * range [0, UCHAR_MAX].
1611  * Return pages freed into the buddy system.
1612  */
1613 static inline unsigned long free_initmem_default(int poison)
1614 {
1615         extern char __init_begin[], __init_end[];
1616 
1617         return free_reserved_area(&__init_begin, &__init_end,
1618                                   poison, "unused kernel");
1619 }
1620 
1621 static inline unsigned long get_num_physpages(void)
1622 {
1623         int nid;
1624         unsigned long phys_pages = 0;
1625 
1626         for_each_online_node(nid)
1627                 phys_pages += node_present_pages(nid);
1628 
1629         return phys_pages;
1630 }
1631 
1632 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1633 /*
1634  * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1635  * zones, allocate the backing mem_map and account for memory holes in a more
1636  * architecture independent manner. This is a substitute for creating the
1637  * zone_sizes[] and zholes_size[] arrays and passing them to
1638  * free_area_init_node()
1639  *
1640  * An architecture is expected to register range of page frames backed by
1641  * physical memory with memblock_add[_node]() before calling
1642  * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1643  * usage, an architecture is expected to do something like
1644  *
1645  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1646  *                                                       max_highmem_pfn};
1647  * for_each_valid_physical_page_range()
1648  *      memblock_add_node(base, size, nid)
1649  * free_area_init_nodes(max_zone_pfns);
1650  *
1651  * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1652  * registered physical page range.  Similarly
1653  * sparse_memory_present_with_active_regions() calls memory_present() for
1654  * each range when SPARSEMEM is enabled.
1655  *
1656  * See mm/page_alloc.c for more information on each function exposed by
1657  * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1658  */
1659 extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1660 unsigned long node_map_pfn_alignment(void);
1661 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1662                                                 unsigned long end_pfn);
1663 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1664                                                 unsigned long end_pfn);
1665 extern void get_pfn_range_for_nid(unsigned int nid,
1666                         unsigned long *start_pfn, unsigned long *end_pfn);
1667 extern unsigned long find_min_pfn_with_active_regions(void);
1668 extern void free_bootmem_with_active_regions(int nid,
1669                                                 unsigned long max_low_pfn);
1670 extern void sparse_memory_present_with_active_regions(int nid);
1671 
1672 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1673 
1674 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1675     !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1676 static inline int __early_pfn_to_nid(unsigned long pfn)
1677 {
1678         return 0;
1679 }
1680 #else
1681 /* please see mm/page_alloc.c */
1682 extern int __meminit early_pfn_to_nid(unsigned long pfn);
1683 /* there is a per-arch backend function. */
1684 extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1685 #endif
1686 
1687 extern void set_dma_reserve(unsigned long new_dma_reserve);
1688 extern void memmap_init_zone(unsigned long, int, unsigned long,
1689                                 unsigned long, enum memmap_context);
1690 extern void setup_per_zone_wmarks(void);
1691 extern int __meminit init_per_zone_wmark_min(void);
1692 extern void mem_init(void);
1693 extern void __init mmap_init(void);
1694 extern void show_mem(unsigned int flags);
1695 extern void si_meminfo(struct sysinfo * val);
1696 extern void si_meminfo_node(struct sysinfo *val, int nid);
1697 
1698 extern __printf(3, 4)
1699 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1700 
1701 extern void setup_per_cpu_pageset(void);
1702 
1703 extern void zone_pcp_update(struct zone *zone);
1704 extern void zone_pcp_reset(struct zone *zone);
1705 
1706 /* page_alloc.c */
1707 extern int min_free_kbytes;
1708 
1709 /* nommu.c */
1710 extern atomic_long_t mmap_pages_allocated;
1711 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1712 
1713 /* interval_tree.c */
1714 void vma_interval_tree_insert(struct vm_area_struct *node,
1715                               struct rb_root *root);
1716 void vma_interval_tree_insert_after(struct vm_area_struct *node,
1717                                     struct vm_area_struct *prev,
1718                                     struct rb_root *root);
1719 void vma_interval_tree_remove(struct vm_area_struct *node,
1720                               struct rb_root *root);
1721 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1722                                 unsigned long start, unsigned long last);
1723 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1724                                 unsigned long start, unsigned long last);
1725 
1726 #define vma_interval_tree_foreach(vma, root, start, last)               \
1727         for (vma = vma_interval_tree_iter_first(root, start, last);     \
1728              vma; vma = vma_interval_tree_iter_next(vma, start, last))
1729 
1730 static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1731                                         struct list_head *list)
1732 {
1733         list_add_tail(&vma->shared.nonlinear, list);
1734 }
1735 
1736 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1737                                    struct rb_root *root);
1738 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1739                                    struct rb_root *root);
1740 struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1741         struct rb_root *root, unsigned long start, unsigned long last);
1742 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1743         struct anon_vma_chain *node, unsigned long start, unsigned long last);
1744 #ifdef CONFIG_DEBUG_VM_RB
1745 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1746 #endif
1747 
1748 #define anon_vma_interval_tree_foreach(avc, root, start, last)           \
1749         for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1750              avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1751 
1752 /* mmap.c */
1753 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1754 extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1755         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1756 extern struct vm_area_struct *vma_merge(struct mm_struct *,
1757         struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1758         unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1759         struct mempolicy *);
1760 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1761 extern int split_vma(struct mm_struct *,
1762         struct vm_area_struct *, unsigned long addr, int new_below);
1763 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1764 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1765         struct rb_node **, struct rb_node *);
1766 extern void unlink_file_vma(struct vm_area_struct *);
1767 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1768         unsigned long addr, unsigned long len, pgoff_t pgoff,
1769         bool *need_rmap_locks);
1770 extern void exit_mmap(struct mm_struct *);
1771 
1772 extern int mm_take_all_locks(struct mm_struct *mm);
1773 extern void mm_drop_all_locks(struct mm_struct *mm);
1774 
1775 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1776 extern struct file *get_mm_exe_file(struct mm_struct *mm);
1777 
1778 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1779 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
1780                                    unsigned long addr, unsigned long len,
1781                                    unsigned long flags, struct page **pages);
1782 extern int install_special_mapping(struct mm_struct *mm,
1783                                    unsigned long addr, unsigned long len,
1784                                    unsigned long flags, struct page **pages);
1785 
1786 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1787 
1788 extern unsigned long mmap_region(struct file *file, unsigned long addr,
1789         unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1790 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1791         unsigned long len, unsigned long prot, unsigned long flags,
1792         unsigned long pgoff, unsigned long *populate);
1793 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1794 
1795 #ifdef CONFIG_MMU
1796 extern int __mm_populate(unsigned long addr, unsigned long len,
1797                          int ignore_errors);
1798 static inline void mm_populate(unsigned long addr, unsigned long len)
1799 {
1800         /* Ignore errors */
1801         (void) __mm_populate(addr, len, 1);
1802 }
1803 #else
1804 static inline void mm_populate(unsigned long addr, unsigned long len) {}
1805 #endif
1806 
1807 /* These take the mm semaphore themselves */
1808 extern unsigned long vm_brk(unsigned long, unsigned long);
1809 extern int vm_munmap(unsigned long, size_t);
1810 extern unsigned long vm_mmap(struct file *, unsigned long,
1811         unsigned long, unsigned long,
1812         unsigned long, unsigned long);
1813 
1814 struct vm_unmapped_area_info {
1815 #define VM_UNMAPPED_AREA_TOPDOWN 1
1816         unsigned long flags;
1817         unsigned long length;
1818         unsigned long low_limit;
1819         unsigned long high_limit;
1820         unsigned long align_mask;
1821         unsigned long align_offset;
1822 };
1823 
1824 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1825 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1826 
1827 /*
1828  * Search for an unmapped address range.
1829  *
1830  * We are looking for a range that:
1831  * - does not intersect with any VMA;
1832  * - is contained within the [low_limit, high_limit) interval;
1833  * - is at least the desired size.
1834  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1835  */
1836 static inline unsigned long
1837 vm_unmapped_area(struct vm_unmapped_area_info *info)
1838 {
1839         if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1840                 return unmapped_area(info);
1841         else
1842                 return unmapped_area_topdown(info);
1843 }
1844 
1845 /* truncate.c */
1846 extern void truncate_inode_pages(struct address_space *, loff_t);
1847 extern void truncate_inode_pages_range(struct address_space *,
1848                                        loff_t lstart, loff_t lend);
1849 extern void truncate_inode_pages_final(struct address_space *);
1850 
1851 /* generic vm_area_ops exported for stackable file systems */
1852 extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1853 extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1854 extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1855 
1856 /* mm/page-writeback.c */
1857 int write_one_page(struct page *page, int wait);
1858 void task_dirty_inc(struct task_struct *tsk);
1859 
1860 /* readahead.c */
1861 #define VM_MAX_READAHEAD        128     /* kbytes */
1862 #define VM_MIN_READAHEAD        16      /* kbytes (includes current page) */
1863 
1864 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1865                         pgoff_t offset, unsigned long nr_to_read);
1866 
1867 void page_cache_sync_readahead(struct address_space *mapping,
1868                                struct file_ra_state *ra,
1869                                struct file *filp,
1870                                pgoff_t offset,
1871                                unsigned long size);
1872 
1873 void page_cache_async_readahead(struct address_space *mapping,
1874                                 struct file_ra_state *ra,
1875                                 struct file *filp,
1876                                 struct page *pg,
1877                                 pgoff_t offset,
1878                                 unsigned long size);
1879 
1880 unsigned long max_sane_readahead(unsigned long nr);
1881 
1882 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
1883 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1884 
1885 /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
1886 extern int expand_downwards(struct vm_area_struct *vma,
1887                 unsigned long address);
1888 #if VM_GROWSUP
1889 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1890 #else
1891   #define expand_upwards(vma, address) do { } while (0)
1892 #endif
1893 
1894 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1895 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1896 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1897                                              struct vm_area_struct **pprev);
1898 
1899 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
1900    NULL if none.  Assume start_addr < end_addr. */
1901 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1902 {
1903         struct vm_area_struct * vma = find_vma(mm,start_addr);
1904 
1905         if (vma && end_addr <= vma->vm_start)
1906                 vma = NULL;
1907         return vma;
1908 }
1909 
1910 static inline unsigned long vma_pages(struct vm_area_struct *vma)
1911 {
1912         return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1913 }
1914 
1915 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
1916 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1917                                 unsigned long vm_start, unsigned long vm_end)
1918 {
1919         struct vm_area_struct *vma = find_vma(mm, vm_start);
1920 
1921         if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1922                 vma = NULL;
1923 
1924         return vma;
1925 }
1926 
1927 #ifdef CONFIG_MMU
1928 pgprot_t vm_get_page_prot(unsigned long vm_flags);
1929 #else
1930 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1931 {
1932         return __pgprot(0);
1933 }
1934 #endif
1935 
1936 #ifdef CONFIG_NUMA_BALANCING
1937 unsigned long change_prot_numa(struct vm_area_struct *vma,
1938                         unsigned long start, unsigned long end);
1939 #endif
1940 
1941 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1942 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1943                         unsigned long pfn, unsigned long size, pgprot_t);
1944 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1945 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1946                         unsigned long pfn);
1947 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1948                         unsigned long pfn);
1949 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
1950 
1951 
1952 struct page *follow_page_mask(struct vm_area_struct *vma,
1953                               unsigned long address, unsigned int foll_flags,
1954                               unsigned int *page_mask);
1955 
1956 static inline struct page *follow_page(struct vm_area_struct *vma,
1957                 unsigned long address, unsigned int foll_flags)
1958 {
1959         unsigned int unused_page_mask;
1960         return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
1961 }
1962 
1963 #define FOLL_WRITE      0x01    /* check pte is writable */
1964 #define FOLL_TOUCH      0x02    /* mark page accessed */
1965 #define FOLL_GET        0x04    /* do get_page on page */
1966 #define FOLL_DUMP       0x08    /* give error on hole if it would be zero */
1967 #define FOLL_FORCE      0x10    /* get_user_pages read/write w/o permission */
1968 #define FOLL_NOWAIT     0x20    /* if a disk transfer is needed, start the IO
1969                                  * and return without waiting upon it */
1970 #define FOLL_MLOCK      0x40    /* mark page as mlocked */
1971 #define FOLL_SPLIT      0x80    /* don't return transhuge pages, split them */
1972 #define FOLL_HWPOISON   0x100   /* check page is hwpoisoned */
1973 #define FOLL_NUMA       0x200   /* force NUMA hinting page fault */
1974 #define FOLL_MIGRATION  0x400   /* wait for page to replace migration entry */
1975 
1976 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1977                         void *data);
1978 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1979                                unsigned long size, pte_fn_t fn, void *data);
1980 
1981 #ifdef CONFIG_PROC_FS
1982 void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1983 #else
1984 static inline void vm_stat_account(struct mm_struct *mm,
1985                         unsigned long flags, struct file *file, long pages)
1986 {
1987         mm->total_vm += pages;
1988 }
1989 #endif /* CONFIG_PROC_FS */
1990 
1991 #ifdef CONFIG_DEBUG_PAGEALLOC
1992 extern void kernel_map_pages(struct page *page, int numpages, int enable);
1993 #ifdef CONFIG_HIBERNATION
1994 extern bool kernel_page_present(struct page *page);
1995 #endif /* CONFIG_HIBERNATION */
1996 #else
1997 static inline void
1998 kernel_map_pages(struct page *page, int numpages, int enable) {}
1999 #ifdef CONFIG_HIBERNATION
2000 static inline bool kernel_page_present(struct page *page) { return true; }
2001 #endif /* CONFIG_HIBERNATION */
2002 #endif
2003 
2004 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2005 #ifdef  __HAVE_ARCH_GATE_AREA
2006 int in_gate_area_no_mm(unsigned long addr);
2007 int in_gate_area(struct mm_struct *mm, unsigned long addr);
2008 #else
2009 int in_gate_area_no_mm(unsigned long addr);
2010 #define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
2011 #endif  /* __HAVE_ARCH_GATE_AREA */
2012 
2013 #ifdef CONFIG_SYSCTL
2014 extern int sysctl_drop_caches;
2015 int drop_caches_sysctl_handler(struct ctl_table *, int,
2016                                         void __user *, size_t *, loff_t *);
2017 #endif
2018 
2019 unsigned long shrink_slab(struct shrink_control *shrink,
2020                           unsigned long nr_pages_scanned,
2021                           unsigned long lru_pages);
2022 
2023 #ifndef CONFIG_MMU
2024 #define randomize_va_space 0
2025 #else
2026 extern int randomize_va_space;
2027 #endif
2028 
2029 const char * arch_vma_name(struct vm_area_struct *vma);
2030 void print_vma_addr(char *prefix, unsigned long rip);
2031 
2032 void sparse_mem_maps_populate_node(struct page **map_map,
2033                                    unsigned long pnum_begin,
2034                                    unsigned long pnum_end,
2035                                    unsigned long map_count,
2036                                    int nodeid);
2037 
2038 struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2039 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2040 pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
2041 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2042 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2043 void *vmemmap_alloc_block(unsigned long size, int node);
2044 void *vmemmap_alloc_block_buf(unsigned long size, int node);
2045 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2046 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2047                                int node);
2048 int vmemmap_populate(unsigned long start, unsigned long end, int node);
2049 void vmemmap_populate_print_last(void);
2050 #ifdef CONFIG_MEMORY_HOTPLUG
2051 void vmemmap_free(unsigned long start, unsigned long end);
2052 #endif
2053 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2054                                   unsigned long size);
2055 
2056 enum mf_flags {
2057         MF_COUNT_INCREASED = 1 << 0,
2058         MF_ACTION_REQUIRED = 1 << 1,
2059         MF_MUST_KILL = 1 << 2,
2060         MF_SOFT_OFFLINE = 1 << 3,
2061 };
2062 extern int memory_failure(unsigned long pfn, int trapno, int flags);
2063 extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2064 extern int unpoison_memory(unsigned long pfn);
2065 extern int sysctl_memory_failure_early_kill;
2066 extern int sysctl_memory_failure_recovery;
2067 extern void shake_page(struct page *p, int access);
2068 extern atomic_long_t num_poisoned_pages;
2069 extern int soft_offline_page(struct page *page, int flags);
2070 
2071 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2072 extern void clear_huge_page(struct page *page,
2073                             unsigned long addr,
2074                             unsigned int pages_per_huge_page);
2075 extern void copy_user_huge_page(struct page *dst, struct page *src,
2076                                 unsigned long addr, struct vm_area_struct *vma,
2077                                 unsigned int pages_per_huge_page);
2078 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2079 
2080 #ifdef CONFIG_DEBUG_PAGEALLOC
2081 extern unsigned int _debug_guardpage_minorder;
2082 
2083 static inline unsigned int debug_guardpage_minorder(void)
2084 {
2085         return _debug_guardpage_minorder;
2086 }
2087 
2088 static inline bool page_is_guard(struct page *page)
2089 {
2090         return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
2091 }
2092 #else
2093 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2094 static inline bool page_is_guard(struct page *page) { return false; }
2095 #endif /* CONFIG_DEBUG_PAGEALLOC */
2096 
2097 #if MAX_NUMNODES > 1
2098 void __init setup_nr_node_ids(void);
2099 #else
2100 static inline void setup_nr_node_ids(void) {}
2101 #endif
2102 
2103 #endif /* __KERNEL__ */
2104 #endif /* _LINUX_MM_H */
2105 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us