Version:  2.0.40 2.2.26 2.4.37 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0

Linux/include/linux/mm.h

  1 #ifndef _LINUX_MM_H
  2 #define _LINUX_MM_H
  3 
  4 #include <linux/errno.h>
  5 
  6 #ifdef __KERNEL__
  7 
  8 #include <linux/mmdebug.h>
  9 #include <linux/gfp.h>
 10 #include <linux/bug.h>
 11 #include <linux/list.h>
 12 #include <linux/mmzone.h>
 13 #include <linux/rbtree.h>
 14 #include <linux/atomic.h>
 15 #include <linux/debug_locks.h>
 16 #include <linux/mm_types.h>
 17 #include <linux/range.h>
 18 #include <linux/pfn.h>
 19 #include <linux/bit_spinlock.h>
 20 #include <linux/shrinker.h>
 21 #include <linux/resource.h>
 22 #include <linux/page_ext.h>
 23 
 24 struct mempolicy;
 25 struct anon_vma;
 26 struct anon_vma_chain;
 27 struct file_ra_state;
 28 struct user_struct;
 29 struct writeback_control;
 30 
 31 #ifndef CONFIG_NEED_MULTIPLE_NODES      /* Don't use mapnrs, do it properly */
 32 extern unsigned long max_mapnr;
 33 
 34 static inline void set_max_mapnr(unsigned long limit)
 35 {
 36         max_mapnr = limit;
 37 }
 38 #else
 39 static inline void set_max_mapnr(unsigned long limit) { }
 40 #endif
 41 
 42 extern unsigned long totalram_pages;
 43 extern void * high_memory;
 44 extern int page_cluster;
 45 
 46 #ifdef CONFIG_SYSCTL
 47 extern int sysctl_legacy_va_layout;
 48 #else
 49 #define sysctl_legacy_va_layout 0
 50 #endif
 51 
 52 #include <asm/page.h>
 53 #include <asm/pgtable.h>
 54 #include <asm/processor.h>
 55 
 56 #ifndef __pa_symbol
 57 #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
 58 #endif
 59 
 60 /*
 61  * To prevent common memory management code establishing
 62  * a zero page mapping on a read fault.
 63  * This macro should be defined within <asm/pgtable.h>.
 64  * s390 does this to prevent multiplexing of hardware bits
 65  * related to the physical page in case of virtualization.
 66  */
 67 #ifndef mm_forbids_zeropage
 68 #define mm_forbids_zeropage(X)  (0)
 69 #endif
 70 
 71 extern unsigned long sysctl_user_reserve_kbytes;
 72 extern unsigned long sysctl_admin_reserve_kbytes;
 73 
 74 extern int sysctl_overcommit_memory;
 75 extern int sysctl_overcommit_ratio;
 76 extern unsigned long sysctl_overcommit_kbytes;
 77 
 78 extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
 79                                     size_t *, loff_t *);
 80 extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
 81                                     size_t *, loff_t *);
 82 
 83 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 84 
 85 /* to align the pointer to the (next) page boundary */
 86 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
 87 
 88 /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
 89 #define PAGE_ALIGNED(addr)      IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
 90 
 91 /*
 92  * Linux kernel virtual memory manager primitives.
 93  * The idea being to have a "virtual" mm in the same way
 94  * we have a virtual fs - giving a cleaner interface to the
 95  * mm details, and allowing different kinds of memory mappings
 96  * (from shared memory to executable loading to arbitrary
 97  * mmap() functions).
 98  */
 99 
100 extern struct kmem_cache *vm_area_cachep;
101 
102 #ifndef CONFIG_MMU
103 extern struct rb_root nommu_region_tree;
104 extern struct rw_semaphore nommu_region_sem;
105 
106 extern unsigned int kobjsize(const void *objp);
107 #endif
108 
109 /*
110  * vm_flags in vm_area_struct, see mm_types.h.
111  */
112 #define VM_NONE         0x00000000
113 
114 #define VM_READ         0x00000001      /* currently active flags */
115 #define VM_WRITE        0x00000002
116 #define VM_EXEC         0x00000004
117 #define VM_SHARED       0x00000008
118 
119 /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
120 #define VM_MAYREAD      0x00000010      /* limits for mprotect() etc */
121 #define VM_MAYWRITE     0x00000020
122 #define VM_MAYEXEC      0x00000040
123 #define VM_MAYSHARE     0x00000080
124 
125 #define VM_GROWSDOWN    0x00000100      /* general info on the segment */
126 #define VM_PFNMAP       0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
127 #define VM_DENYWRITE    0x00000800      /* ETXTBSY on write attempts.. */
128 
129 #define VM_LOCKED       0x00002000
130 #define VM_IO           0x00004000      /* Memory mapped I/O or similar */
131 
132                                         /* Used by sys_madvise() */
133 #define VM_SEQ_READ     0x00008000      /* App will access data sequentially */
134 #define VM_RAND_READ    0x00010000      /* App will not benefit from clustered reads */
135 
136 #define VM_DONTCOPY     0x00020000      /* Do not copy this vma on fork */
137 #define VM_DONTEXPAND   0x00040000      /* Cannot expand with mremap() */
138 #define VM_ACCOUNT      0x00100000      /* Is a VM accounted object */
139 #define VM_NORESERVE    0x00200000      /* should the VM suppress accounting */
140 #define VM_HUGETLB      0x00400000      /* Huge TLB Page VM */
141 #define VM_ARCH_1       0x01000000      /* Architecture-specific flag */
142 #define VM_ARCH_2       0x02000000
143 #define VM_DONTDUMP     0x04000000      /* Do not include in the core dump */
144 
145 #ifdef CONFIG_MEM_SOFT_DIRTY
146 # define VM_SOFTDIRTY   0x08000000      /* Not soft dirty clean area */
147 #else
148 # define VM_SOFTDIRTY   0
149 #endif
150 
151 #define VM_MIXEDMAP     0x10000000      /* Can contain "struct page" and pure PFN pages */
152 #define VM_HUGEPAGE     0x20000000      /* MADV_HUGEPAGE marked this vma */
153 #define VM_NOHUGEPAGE   0x40000000      /* MADV_NOHUGEPAGE marked this vma */
154 #define VM_MERGEABLE    0x80000000      /* KSM may merge identical pages */
155 
156 #if defined(CONFIG_X86)
157 # define VM_PAT         VM_ARCH_1       /* PAT reserves whole VMA at once (x86) */
158 #elif defined(CONFIG_PPC)
159 # define VM_SAO         VM_ARCH_1       /* Strong Access Ordering (powerpc) */
160 #elif defined(CONFIG_PARISC)
161 # define VM_GROWSUP     VM_ARCH_1
162 #elif defined(CONFIG_METAG)
163 # define VM_GROWSUP     VM_ARCH_1
164 #elif defined(CONFIG_IA64)
165 # define VM_GROWSUP     VM_ARCH_1
166 #elif !defined(CONFIG_MMU)
167 # define VM_MAPPED_COPY VM_ARCH_1       /* T if mapped copy of data (nommu mmap) */
168 #endif
169 
170 #if defined(CONFIG_X86)
171 /* MPX specific bounds table or bounds directory */
172 # define VM_MPX         VM_ARCH_2
173 #endif
174 
175 #ifndef VM_GROWSUP
176 # define VM_GROWSUP     VM_NONE
177 #endif
178 
179 /* Bits set in the VMA until the stack is in its final location */
180 #define VM_STACK_INCOMPLETE_SETUP       (VM_RAND_READ | VM_SEQ_READ)
181 
182 #ifndef VM_STACK_DEFAULT_FLAGS          /* arch can override this */
183 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
184 #endif
185 
186 #ifdef CONFIG_STACK_GROWSUP
187 #define VM_STACK_FLAGS  (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
188 #else
189 #define VM_STACK_FLAGS  (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
190 #endif
191 
192 /*
193  * Special vmas that are non-mergable, non-mlock()able.
194  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
195  */
196 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
197 
198 /* This mask defines which mm->def_flags a process can inherit its parent */
199 #define VM_INIT_DEF_MASK        VM_NOHUGEPAGE
200 
201 /*
202  * mapping from the currently active vm_flags protection bits (the
203  * low four bits) to a page protection mask..
204  */
205 extern pgprot_t protection_map[16];
206 
207 #define FAULT_FLAG_WRITE        0x01    /* Fault was a write access */
208 #define FAULT_FLAG_MKWRITE      0x02    /* Fault was mkwrite of existing pte */
209 #define FAULT_FLAG_ALLOW_RETRY  0x04    /* Retry fault if blocking */
210 #define FAULT_FLAG_RETRY_NOWAIT 0x08    /* Don't drop mmap_sem and wait when retrying */
211 #define FAULT_FLAG_KILLABLE     0x10    /* The fault task is in SIGKILL killable region */
212 #define FAULT_FLAG_TRIED        0x20    /* Second try */
213 #define FAULT_FLAG_USER         0x40    /* The fault originated in userspace */
214 
215 /*
216  * vm_fault is filled by the the pagefault handler and passed to the vma's
217  * ->fault function. The vma's ->fault is responsible for returning a bitmask
218  * of VM_FAULT_xxx flags that give details about how the fault was handled.
219  *
220  * pgoff should be used in favour of virtual_address, if possible.
221  */
222 struct vm_fault {
223         unsigned int flags;             /* FAULT_FLAG_xxx flags */
224         pgoff_t pgoff;                  /* Logical page offset based on vma */
225         void __user *virtual_address;   /* Faulting virtual address */
226 
227         struct page *cow_page;          /* Handler may choose to COW */
228         struct page *page;              /* ->fault handlers should return a
229                                          * page here, unless VM_FAULT_NOPAGE
230                                          * is set (which is also implied by
231                                          * VM_FAULT_ERROR).
232                                          */
233         /* for ->map_pages() only */
234         pgoff_t max_pgoff;              /* map pages for offset from pgoff till
235                                          * max_pgoff inclusive */
236         pte_t *pte;                     /* pte entry associated with ->pgoff */
237 };
238 
239 /*
240  * These are the virtual MM functions - opening of an area, closing and
241  * unmapping it (needed to keep files on disk up-to-date etc), pointer
242  * to the functions called when a no-page or a wp-page exception occurs. 
243  */
244 struct vm_operations_struct {
245         void (*open)(struct vm_area_struct * area);
246         void (*close)(struct vm_area_struct * area);
247         int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
248         void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
249 
250         /* notification that a previously read-only page is about to become
251          * writable, if an error is returned it will cause a SIGBUS */
252         int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
253 
254         /* called by access_process_vm when get_user_pages() fails, typically
255          * for use by special VMAs that can switch between memory and hardware
256          */
257         int (*access)(struct vm_area_struct *vma, unsigned long addr,
258                       void *buf, int len, int write);
259 
260         /* Called by the /proc/PID/maps code to ask the vma whether it
261          * has a special name.  Returning non-NULL will also cause this
262          * vma to be dumped unconditionally. */
263         const char *(*name)(struct vm_area_struct *vma);
264 
265 #ifdef CONFIG_NUMA
266         /*
267          * set_policy() op must add a reference to any non-NULL @new mempolicy
268          * to hold the policy upon return.  Caller should pass NULL @new to
269          * remove a policy and fall back to surrounding context--i.e. do not
270          * install a MPOL_DEFAULT policy, nor the task or system default
271          * mempolicy.
272          */
273         int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
274 
275         /*
276          * get_policy() op must add reference [mpol_get()] to any policy at
277          * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
278          * in mm/mempolicy.c will do this automatically.
279          * get_policy() must NOT add a ref if the policy at (vma,addr) is not
280          * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
281          * If no [shared/vma] mempolicy exists at the addr, get_policy() op
282          * must return NULL--i.e., do not "fallback" to task or system default
283          * policy.
284          */
285         struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
286                                         unsigned long addr);
287 #endif
288         /*
289          * Called by vm_normal_page() for special PTEs to find the
290          * page for @addr.  This is useful if the default behavior
291          * (using pte_page()) would not find the correct page.
292          */
293         struct page *(*find_special_page)(struct vm_area_struct *vma,
294                                           unsigned long addr);
295 };
296 
297 struct mmu_gather;
298 struct inode;
299 
300 #define page_private(page)              ((page)->private)
301 #define set_page_private(page, v)       ((page)->private = (v))
302 
303 /* It's valid only if the page is free path or free_list */
304 static inline void set_freepage_migratetype(struct page *page, int migratetype)
305 {
306         page->index = migratetype;
307 }
308 
309 /* It's valid only if the page is free path or free_list */
310 static inline int get_freepage_migratetype(struct page *page)
311 {
312         return page->index;
313 }
314 
315 /*
316  * FIXME: take this include out, include page-flags.h in
317  * files which need it (119 of them)
318  */
319 #include <linux/page-flags.h>
320 #include <linux/huge_mm.h>
321 
322 /*
323  * Methods to modify the page usage count.
324  *
325  * What counts for a page usage:
326  * - cache mapping   (page->mapping)
327  * - private data    (page->private)
328  * - page mapped in a task's page tables, each mapping
329  *   is counted separately
330  *
331  * Also, many kernel routines increase the page count before a critical
332  * routine so they can be sure the page doesn't go away from under them.
333  */
334 
335 /*
336  * Drop a ref, return true if the refcount fell to zero (the page has no users)
337  */
338 static inline int put_page_testzero(struct page *page)
339 {
340         VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
341         return atomic_dec_and_test(&page->_count);
342 }
343 
344 /*
345  * Try to grab a ref unless the page has a refcount of zero, return false if
346  * that is the case.
347  * This can be called when MMU is off so it must not access
348  * any of the virtual mappings.
349  */
350 static inline int get_page_unless_zero(struct page *page)
351 {
352         return atomic_inc_not_zero(&page->_count);
353 }
354 
355 /*
356  * Try to drop a ref unless the page has a refcount of one, return false if
357  * that is the case.
358  * This is to make sure that the refcount won't become zero after this drop.
359  * This can be called when MMU is off so it must not access
360  * any of the virtual mappings.
361  */
362 static inline int put_page_unless_one(struct page *page)
363 {
364         return atomic_add_unless(&page->_count, -1, 1);
365 }
366 
367 extern int page_is_ram(unsigned long pfn);
368 extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
369 
370 /* Support for virtually mapped pages */
371 struct page *vmalloc_to_page(const void *addr);
372 unsigned long vmalloc_to_pfn(const void *addr);
373 
374 /*
375  * Determine if an address is within the vmalloc range
376  *
377  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
378  * is no special casing required.
379  */
380 static inline int is_vmalloc_addr(const void *x)
381 {
382 #ifdef CONFIG_MMU
383         unsigned long addr = (unsigned long)x;
384 
385         return addr >= VMALLOC_START && addr < VMALLOC_END;
386 #else
387         return 0;
388 #endif
389 }
390 #ifdef CONFIG_MMU
391 extern int is_vmalloc_or_module_addr(const void *x);
392 #else
393 static inline int is_vmalloc_or_module_addr(const void *x)
394 {
395         return 0;
396 }
397 #endif
398 
399 extern void kvfree(const void *addr);
400 
401 static inline void compound_lock(struct page *page)
402 {
403 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
404         VM_BUG_ON_PAGE(PageSlab(page), page);
405         bit_spin_lock(PG_compound_lock, &page->flags);
406 #endif
407 }
408 
409 static inline void compound_unlock(struct page *page)
410 {
411 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
412         VM_BUG_ON_PAGE(PageSlab(page), page);
413         bit_spin_unlock(PG_compound_lock, &page->flags);
414 #endif
415 }
416 
417 static inline unsigned long compound_lock_irqsave(struct page *page)
418 {
419         unsigned long uninitialized_var(flags);
420 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
421         local_irq_save(flags);
422         compound_lock(page);
423 #endif
424         return flags;
425 }
426 
427 static inline void compound_unlock_irqrestore(struct page *page,
428                                               unsigned long flags)
429 {
430 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
431         compound_unlock(page);
432         local_irq_restore(flags);
433 #endif
434 }
435 
436 static inline struct page *compound_head_by_tail(struct page *tail)
437 {
438         struct page *head = tail->first_page;
439 
440         /*
441          * page->first_page may be a dangling pointer to an old
442          * compound page, so recheck that it is still a tail
443          * page before returning.
444          */
445         smp_rmb();
446         if (likely(PageTail(tail)))
447                 return head;
448         return tail;
449 }
450 
451 /*
452  * Since either compound page could be dismantled asynchronously in THP
453  * or we access asynchronously arbitrary positioned struct page, there
454  * would be tail flag race. To handle this race, we should call
455  * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
456  */
457 static inline struct page *compound_head(struct page *page)
458 {
459         if (unlikely(PageTail(page)))
460                 return compound_head_by_tail(page);
461         return page;
462 }
463 
464 /*
465  * If we access compound page synchronously such as access to
466  * allocated page, there is no need to handle tail flag race, so we can
467  * check tail flag directly without any synchronization primitive.
468  */
469 static inline struct page *compound_head_fast(struct page *page)
470 {
471         if (unlikely(PageTail(page)))
472                 return page->first_page;
473         return page;
474 }
475 
476 /*
477  * The atomic page->_mapcount, starts from -1: so that transitions
478  * both from it and to it can be tracked, using atomic_inc_and_test
479  * and atomic_add_negative(-1).
480  */
481 static inline void page_mapcount_reset(struct page *page)
482 {
483         atomic_set(&(page)->_mapcount, -1);
484 }
485 
486 static inline int page_mapcount(struct page *page)
487 {
488         VM_BUG_ON_PAGE(PageSlab(page), page);
489         return atomic_read(&page->_mapcount) + 1;
490 }
491 
492 static inline int page_count(struct page *page)
493 {
494         return atomic_read(&compound_head(page)->_count);
495 }
496 
497 #ifdef CONFIG_HUGETLB_PAGE
498 extern int PageHeadHuge(struct page *page_head);
499 #else /* CONFIG_HUGETLB_PAGE */
500 static inline int PageHeadHuge(struct page *page_head)
501 {
502         return 0;
503 }
504 #endif /* CONFIG_HUGETLB_PAGE */
505 
506 static inline bool __compound_tail_refcounted(struct page *page)
507 {
508         return !PageSlab(page) && !PageHeadHuge(page);
509 }
510 
511 /*
512  * This takes a head page as parameter and tells if the
513  * tail page reference counting can be skipped.
514  *
515  * For this to be safe, PageSlab and PageHeadHuge must remain true on
516  * any given page where they return true here, until all tail pins
517  * have been released.
518  */
519 static inline bool compound_tail_refcounted(struct page *page)
520 {
521         VM_BUG_ON_PAGE(!PageHead(page), page);
522         return __compound_tail_refcounted(page);
523 }
524 
525 static inline void get_huge_page_tail(struct page *page)
526 {
527         /*
528          * __split_huge_page_refcount() cannot run from under us.
529          */
530         VM_BUG_ON_PAGE(!PageTail(page), page);
531         VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
532         VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
533         if (compound_tail_refcounted(page->first_page))
534                 atomic_inc(&page->_mapcount);
535 }
536 
537 extern bool __get_page_tail(struct page *page);
538 
539 static inline void get_page(struct page *page)
540 {
541         if (unlikely(PageTail(page)))
542                 if (likely(__get_page_tail(page)))
543                         return;
544         /*
545          * Getting a normal page or the head of a compound page
546          * requires to already have an elevated page->_count.
547          */
548         VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
549         atomic_inc(&page->_count);
550 }
551 
552 static inline struct page *virt_to_head_page(const void *x)
553 {
554         struct page *page = virt_to_page(x);
555 
556         /*
557          * We don't need to worry about synchronization of tail flag
558          * when we call virt_to_head_page() since it is only called for
559          * already allocated page and this page won't be freed until
560          * this virt_to_head_page() is finished. So use _fast variant.
561          */
562         return compound_head_fast(page);
563 }
564 
565 /*
566  * Setup the page count before being freed into the page allocator for
567  * the first time (boot or memory hotplug)
568  */
569 static inline void init_page_count(struct page *page)
570 {
571         atomic_set(&page->_count, 1);
572 }
573 
574 /*
575  * PageBuddy() indicate that the page is free and in the buddy system
576  * (see mm/page_alloc.c).
577  *
578  * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
579  * -2 so that an underflow of the page_mapcount() won't be mistaken
580  * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
581  * efficiently by most CPU architectures.
582  */
583 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
584 
585 static inline int PageBuddy(struct page *page)
586 {
587         return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
588 }
589 
590 static inline void __SetPageBuddy(struct page *page)
591 {
592         VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
593         atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
594 }
595 
596 static inline void __ClearPageBuddy(struct page *page)
597 {
598         VM_BUG_ON_PAGE(!PageBuddy(page), page);
599         atomic_set(&page->_mapcount, -1);
600 }
601 
602 #define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
603 
604 static inline int PageBalloon(struct page *page)
605 {
606         return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
607 }
608 
609 static inline void __SetPageBalloon(struct page *page)
610 {
611         VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
612         atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
613 }
614 
615 static inline void __ClearPageBalloon(struct page *page)
616 {
617         VM_BUG_ON_PAGE(!PageBalloon(page), page);
618         atomic_set(&page->_mapcount, -1);
619 }
620 
621 void put_page(struct page *page);
622 void put_pages_list(struct list_head *pages);
623 
624 void split_page(struct page *page, unsigned int order);
625 int split_free_page(struct page *page);
626 
627 /*
628  * Compound pages have a destructor function.  Provide a
629  * prototype for that function and accessor functions.
630  * These are _only_ valid on the head of a PG_compound page.
631  */
632 
633 static inline void set_compound_page_dtor(struct page *page,
634                                                 compound_page_dtor *dtor)
635 {
636         page[1].compound_dtor = dtor;
637 }
638 
639 static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
640 {
641         return page[1].compound_dtor;
642 }
643 
644 static inline int compound_order(struct page *page)
645 {
646         if (!PageHead(page))
647                 return 0;
648         return page[1].compound_order;
649 }
650 
651 static inline void set_compound_order(struct page *page, unsigned long order)
652 {
653         page[1].compound_order = order;
654 }
655 
656 #ifdef CONFIG_MMU
657 /*
658  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
659  * servicing faults for write access.  In the normal case, do always want
660  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
661  * that do not have writing enabled, when used by access_process_vm.
662  */
663 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
664 {
665         if (likely(vma->vm_flags & VM_WRITE))
666                 pte = pte_mkwrite(pte);
667         return pte;
668 }
669 
670 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
671                 struct page *page, pte_t *pte, bool write, bool anon);
672 #endif
673 
674 /*
675  * Multiple processes may "see" the same page. E.g. for untouched
676  * mappings of /dev/null, all processes see the same page full of
677  * zeroes, and text pages of executables and shared libraries have
678  * only one copy in memory, at most, normally.
679  *
680  * For the non-reserved pages, page_count(page) denotes a reference count.
681  *   page_count() == 0 means the page is free. page->lru is then used for
682  *   freelist management in the buddy allocator.
683  *   page_count() > 0  means the page has been allocated.
684  *
685  * Pages are allocated by the slab allocator in order to provide memory
686  * to kmalloc and kmem_cache_alloc. In this case, the management of the
687  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
688  * unless a particular usage is carefully commented. (the responsibility of
689  * freeing the kmalloc memory is the caller's, of course).
690  *
691  * A page may be used by anyone else who does a __get_free_page().
692  * In this case, page_count still tracks the references, and should only
693  * be used through the normal accessor functions. The top bits of page->flags
694  * and page->virtual store page management information, but all other fields
695  * are unused and could be used privately, carefully. The management of this
696  * page is the responsibility of the one who allocated it, and those who have
697  * subsequently been given references to it.
698  *
699  * The other pages (we may call them "pagecache pages") are completely
700  * managed by the Linux memory manager: I/O, buffers, swapping etc.
701  * The following discussion applies only to them.
702  *
703  * A pagecache page contains an opaque `private' member, which belongs to the
704  * page's address_space. Usually, this is the address of a circular list of
705  * the page's disk buffers. PG_private must be set to tell the VM to call
706  * into the filesystem to release these pages.
707  *
708  * A page may belong to an inode's memory mapping. In this case, page->mapping
709  * is the pointer to the inode, and page->index is the file offset of the page,
710  * in units of PAGE_CACHE_SIZE.
711  *
712  * If pagecache pages are not associated with an inode, they are said to be
713  * anonymous pages. These may become associated with the swapcache, and in that
714  * case PG_swapcache is set, and page->private is an offset into the swapcache.
715  *
716  * In either case (swapcache or inode backed), the pagecache itself holds one
717  * reference to the page. Setting PG_private should also increment the
718  * refcount. The each user mapping also has a reference to the page.
719  *
720  * The pagecache pages are stored in a per-mapping radix tree, which is
721  * rooted at mapping->page_tree, and indexed by offset.
722  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
723  * lists, we instead now tag pages as dirty/writeback in the radix tree.
724  *
725  * All pagecache pages may be subject to I/O:
726  * - inode pages may need to be read from disk,
727  * - inode pages which have been modified and are MAP_SHARED may need
728  *   to be written back to the inode on disk,
729  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
730  *   modified may need to be swapped out to swap space and (later) to be read
731  *   back into memory.
732  */
733 
734 /*
735  * The zone field is never updated after free_area_init_core()
736  * sets it, so none of the operations on it need to be atomic.
737  */
738 
739 /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
740 #define SECTIONS_PGOFF          ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
741 #define NODES_PGOFF             (SECTIONS_PGOFF - NODES_WIDTH)
742 #define ZONES_PGOFF             (NODES_PGOFF - ZONES_WIDTH)
743 #define LAST_CPUPID_PGOFF       (ZONES_PGOFF - LAST_CPUPID_WIDTH)
744 
745 /*
746  * Define the bit shifts to access each section.  For non-existent
747  * sections we define the shift as 0; that plus a 0 mask ensures
748  * the compiler will optimise away reference to them.
749  */
750 #define SECTIONS_PGSHIFT        (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
751 #define NODES_PGSHIFT           (NODES_PGOFF * (NODES_WIDTH != 0))
752 #define ZONES_PGSHIFT           (ZONES_PGOFF * (ZONES_WIDTH != 0))
753 #define LAST_CPUPID_PGSHIFT     (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
754 
755 /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
756 #ifdef NODE_NOT_IN_PAGE_FLAGS
757 #define ZONEID_SHIFT            (SECTIONS_SHIFT + ZONES_SHIFT)
758 #define ZONEID_PGOFF            ((SECTIONS_PGOFF < ZONES_PGOFF)? \
759                                                 SECTIONS_PGOFF : ZONES_PGOFF)
760 #else
761 #define ZONEID_SHIFT            (NODES_SHIFT + ZONES_SHIFT)
762 #define ZONEID_PGOFF            ((NODES_PGOFF < ZONES_PGOFF)? \
763                                                 NODES_PGOFF : ZONES_PGOFF)
764 #endif
765 
766 #define ZONEID_PGSHIFT          (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
767 
768 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
769 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
770 #endif
771 
772 #define ZONES_MASK              ((1UL << ZONES_WIDTH) - 1)
773 #define NODES_MASK              ((1UL << NODES_WIDTH) - 1)
774 #define SECTIONS_MASK           ((1UL << SECTIONS_WIDTH) - 1)
775 #define LAST_CPUPID_MASK        ((1UL << LAST_CPUPID_SHIFT) - 1)
776 #define ZONEID_MASK             ((1UL << ZONEID_SHIFT) - 1)
777 
778 static inline enum zone_type page_zonenum(const struct page *page)
779 {
780         return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
781 }
782 
783 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
784 #define SECTION_IN_PAGE_FLAGS
785 #endif
786 
787 /*
788  * The identification function is mainly used by the buddy allocator for
789  * determining if two pages could be buddies. We are not really identifying
790  * the zone since we could be using the section number id if we do not have
791  * node id available in page flags.
792  * We only guarantee that it will return the same value for two combinable
793  * pages in a zone.
794  */
795 static inline int page_zone_id(struct page *page)
796 {
797         return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
798 }
799 
800 static inline int zone_to_nid(struct zone *zone)
801 {
802 #ifdef CONFIG_NUMA
803         return zone->node;
804 #else
805         return 0;
806 #endif
807 }
808 
809 #ifdef NODE_NOT_IN_PAGE_FLAGS
810 extern int page_to_nid(const struct page *page);
811 #else
812 static inline int page_to_nid(const struct page *page)
813 {
814         return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
815 }
816 #endif
817 
818 #ifdef CONFIG_NUMA_BALANCING
819 static inline int cpu_pid_to_cpupid(int cpu, int pid)
820 {
821         return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
822 }
823 
824 static inline int cpupid_to_pid(int cpupid)
825 {
826         return cpupid & LAST__PID_MASK;
827 }
828 
829 static inline int cpupid_to_cpu(int cpupid)
830 {
831         return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
832 }
833 
834 static inline int cpupid_to_nid(int cpupid)
835 {
836         return cpu_to_node(cpupid_to_cpu(cpupid));
837 }
838 
839 static inline bool cpupid_pid_unset(int cpupid)
840 {
841         return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
842 }
843 
844 static inline bool cpupid_cpu_unset(int cpupid)
845 {
846         return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
847 }
848 
849 static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
850 {
851         return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
852 }
853 
854 #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
855 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
856 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
857 {
858         return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
859 }
860 
861 static inline int page_cpupid_last(struct page *page)
862 {
863         return page->_last_cpupid;
864 }
865 static inline void page_cpupid_reset_last(struct page *page)
866 {
867         page->_last_cpupid = -1 & LAST_CPUPID_MASK;
868 }
869 #else
870 static inline int page_cpupid_last(struct page *page)
871 {
872         return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
873 }
874 
875 extern int page_cpupid_xchg_last(struct page *page, int cpupid);
876 
877 static inline void page_cpupid_reset_last(struct page *page)
878 {
879         int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
880 
881         page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
882         page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
883 }
884 #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
885 #else /* !CONFIG_NUMA_BALANCING */
886 static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
887 {
888         return page_to_nid(page); /* XXX */
889 }
890 
891 static inline int page_cpupid_last(struct page *page)
892 {
893         return page_to_nid(page); /* XXX */
894 }
895 
896 static inline int cpupid_to_nid(int cpupid)
897 {
898         return -1;
899 }
900 
901 static inline int cpupid_to_pid(int cpupid)
902 {
903         return -1;
904 }
905 
906 static inline int cpupid_to_cpu(int cpupid)
907 {
908         return -1;
909 }
910 
911 static inline int cpu_pid_to_cpupid(int nid, int pid)
912 {
913         return -1;
914 }
915 
916 static inline bool cpupid_pid_unset(int cpupid)
917 {
918         return 1;
919 }
920 
921 static inline void page_cpupid_reset_last(struct page *page)
922 {
923 }
924 
925 static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
926 {
927         return false;
928 }
929 #endif /* CONFIG_NUMA_BALANCING */
930 
931 static inline struct zone *page_zone(const struct page *page)
932 {
933         return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
934 }
935 
936 #ifdef SECTION_IN_PAGE_FLAGS
937 static inline void set_page_section(struct page *page, unsigned long section)
938 {
939         page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
940         page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
941 }
942 
943 static inline unsigned long page_to_section(const struct page *page)
944 {
945         return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
946 }
947 #endif
948 
949 static inline void set_page_zone(struct page *page, enum zone_type zone)
950 {
951         page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
952         page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
953 }
954 
955 static inline void set_page_node(struct page *page, unsigned long node)
956 {
957         page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
958         page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
959 }
960 
961 static inline void set_page_links(struct page *page, enum zone_type zone,
962         unsigned long node, unsigned long pfn)
963 {
964         set_page_zone(page, zone);
965         set_page_node(page, node);
966 #ifdef SECTION_IN_PAGE_FLAGS
967         set_page_section(page, pfn_to_section_nr(pfn));
968 #endif
969 }
970 
971 /*
972  * Some inline functions in vmstat.h depend on page_zone()
973  */
974 #include <linux/vmstat.h>
975 
976 static __always_inline void *lowmem_page_address(const struct page *page)
977 {
978         return __va(PFN_PHYS(page_to_pfn(page)));
979 }
980 
981 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
982 #define HASHED_PAGE_VIRTUAL
983 #endif
984 
985 #if defined(WANT_PAGE_VIRTUAL)
986 static inline void *page_address(const struct page *page)
987 {
988         return page->virtual;
989 }
990 static inline void set_page_address(struct page *page, void *address)
991 {
992         page->virtual = address;
993 }
994 #define page_address_init()  do { } while(0)
995 #endif
996 
997 #if defined(HASHED_PAGE_VIRTUAL)
998 void *page_address(const struct page *page);
999 void set_page_address(struct page *page, void *virtual);
1000 void page_address_init(void);
1001 #endif
1002 
1003 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1004 #define page_address(page) lowmem_page_address(page)
1005 #define set_page_address(page, address)  do { } while(0)
1006 #define page_address_init()  do { } while(0)
1007 #endif
1008 
1009 /*
1010  * On an anonymous page mapped into a user virtual memory area,
1011  * page->mapping points to its anon_vma, not to a struct address_space;
1012  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
1013  *
1014  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
1015  * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
1016  * and then page->mapping points, not to an anon_vma, but to a private
1017  * structure which KSM associates with that merged page.  See ksm.h.
1018  *
1019  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
1020  *
1021  * Please note that, confusingly, "page_mapping" refers to the inode
1022  * address_space which maps the page from disk; whereas "page_mapped"
1023  * refers to user virtual address space into which the page is mapped.
1024  */
1025 #define PAGE_MAPPING_ANON       1
1026 #define PAGE_MAPPING_KSM        2
1027 #define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
1028 
1029 extern struct address_space *page_mapping(struct page *page);
1030 
1031 /* Neutral page->mapping pointer to address_space or anon_vma or other */
1032 static inline void *page_rmapping(struct page *page)
1033 {
1034         return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
1035 }
1036 
1037 extern struct address_space *__page_file_mapping(struct page *);
1038 
1039 static inline
1040 struct address_space *page_file_mapping(struct page *page)
1041 {
1042         if (unlikely(PageSwapCache(page)))
1043                 return __page_file_mapping(page);
1044 
1045         return page->mapping;
1046 }
1047 
1048 static inline int PageAnon(struct page *page)
1049 {
1050         return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
1051 }
1052 
1053 /*
1054  * Return the pagecache index of the passed page.  Regular pagecache pages
1055  * use ->index whereas swapcache pages use ->private
1056  */
1057 static inline pgoff_t page_index(struct page *page)
1058 {
1059         if (unlikely(PageSwapCache(page)))
1060                 return page_private(page);
1061         return page->index;
1062 }
1063 
1064 extern pgoff_t __page_file_index(struct page *page);
1065 
1066 /*
1067  * Return the file index of the page. Regular pagecache pages use ->index
1068  * whereas swapcache pages use swp_offset(->private)
1069  */
1070 static inline pgoff_t page_file_index(struct page *page)
1071 {
1072         if (unlikely(PageSwapCache(page)))
1073                 return __page_file_index(page);
1074 
1075         return page->index;
1076 }
1077 
1078 /*
1079  * Return true if this page is mapped into pagetables.
1080  */
1081 static inline int page_mapped(struct page *page)
1082 {
1083         return atomic_read(&(page)->_mapcount) >= 0;
1084 }
1085 
1086 /*
1087  * Different kinds of faults, as returned by handle_mm_fault().
1088  * Used to decide whether a process gets delivered SIGBUS or
1089  * just gets major/minor fault counters bumped up.
1090  */
1091 
1092 #define VM_FAULT_MINOR  0 /* For backwards compat. Remove me quickly. */
1093 
1094 #define VM_FAULT_OOM    0x0001
1095 #define VM_FAULT_SIGBUS 0x0002
1096 #define VM_FAULT_MAJOR  0x0004
1097 #define VM_FAULT_WRITE  0x0008  /* Special case for get_user_pages */
1098 #define VM_FAULT_HWPOISON 0x0010        /* Hit poisoned small page */
1099 #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
1100 #define VM_FAULT_SIGSEGV 0x0040
1101 
1102 #define VM_FAULT_NOPAGE 0x0100  /* ->fault installed the pte, not return page */
1103 #define VM_FAULT_LOCKED 0x0200  /* ->fault locked the returned page */
1104 #define VM_FAULT_RETRY  0x0400  /* ->fault blocked, must retry */
1105 #define VM_FAULT_FALLBACK 0x0800        /* huge page fault failed, fall back to small */
1106 
1107 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1108 
1109 #define VM_FAULT_ERROR  (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1110                          VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1111                          VM_FAULT_FALLBACK)
1112 
1113 /* Encode hstate index for a hwpoisoned large page */
1114 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1115 #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1116 
1117 /*
1118  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1119  */
1120 extern void pagefault_out_of_memory(void);
1121 
1122 #define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
1123 
1124 /*
1125  * Flags passed to show_mem() and show_free_areas() to suppress output in
1126  * various contexts.
1127  */
1128 #define SHOW_MEM_FILTER_NODES           (0x0001u)       /* disallowed nodes */
1129 
1130 extern void show_free_areas(unsigned int flags);
1131 extern bool skip_free_areas_node(unsigned int flags, int nid);
1132 
1133 int shmem_zero_setup(struct vm_area_struct *);
1134 #ifdef CONFIG_SHMEM
1135 bool shmem_mapping(struct address_space *mapping);
1136 #else
1137 static inline bool shmem_mapping(struct address_space *mapping)
1138 {
1139         return false;
1140 }
1141 #endif
1142 
1143 extern int can_do_mlock(void);
1144 extern int user_shm_lock(size_t, struct user_struct *);
1145 extern void user_shm_unlock(size_t, struct user_struct *);
1146 
1147 /*
1148  * Parameter block passed down to zap_pte_range in exceptional cases.
1149  */
1150 struct zap_details {
1151         struct address_space *check_mapping;    /* Check page->mapping if set */
1152         pgoff_t first_index;                    /* Lowest page->index to unmap */
1153         pgoff_t last_index;                     /* Highest page->index to unmap */
1154 };
1155 
1156 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1157                 pte_t pte);
1158 
1159 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1160                 unsigned long size);
1161 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1162                 unsigned long size, struct zap_details *);
1163 void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1164                 unsigned long start, unsigned long end);
1165 
1166 /**
1167  * mm_walk - callbacks for walk_page_range
1168  * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1169  *             this handler is required to be able to handle
1170  *             pmd_trans_huge() pmds.  They may simply choose to
1171  *             split_huge_page() instead of handling it explicitly.
1172  * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1173  * @pte_hole: if set, called for each hole at all levels
1174  * @hugetlb_entry: if set, called for each hugetlb entry
1175  * @test_walk: caller specific callback function to determine whether
1176  *             we walk over the current vma or not. A positive returned
1177  *             value means "do page table walk over the current vma,"
1178  *             and a negative one means "abort current page table walk
1179  *             right now." 0 means "skip the current vma."
1180  * @mm:        mm_struct representing the target process of page table walk
1181  * @vma:       vma currently walked (NULL if walking outside vmas)
1182  * @private:   private data for callbacks' usage
1183  *
1184  * (see the comment on walk_page_range() for more details)
1185  */
1186 struct mm_walk {
1187         int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1188                          unsigned long next, struct mm_walk *walk);
1189         int (*pte_entry)(pte_t *pte, unsigned long addr,
1190                          unsigned long next, struct mm_walk *walk);
1191         int (*pte_hole)(unsigned long addr, unsigned long next,
1192                         struct mm_walk *walk);
1193         int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1194                              unsigned long addr, unsigned long next,
1195                              struct mm_walk *walk);
1196         int (*test_walk)(unsigned long addr, unsigned long next,
1197                         struct mm_walk *walk);
1198         struct mm_struct *mm;
1199         struct vm_area_struct *vma;
1200         void *private;
1201 };
1202 
1203 int walk_page_range(unsigned long addr, unsigned long end,
1204                 struct mm_walk *walk);
1205 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1206 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1207                 unsigned long end, unsigned long floor, unsigned long ceiling);
1208 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1209                         struct vm_area_struct *vma);
1210 void unmap_mapping_range(struct address_space *mapping,
1211                 loff_t const holebegin, loff_t const holelen, int even_cows);
1212 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1213         unsigned long *pfn);
1214 int follow_phys(struct vm_area_struct *vma, unsigned long address,
1215                 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1216 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1217                         void *buf, int len, int write);
1218 
1219 static inline void unmap_shared_mapping_range(struct address_space *mapping,
1220                 loff_t const holebegin, loff_t const holelen)
1221 {
1222         unmap_mapping_range(mapping, holebegin, holelen, 0);
1223 }
1224 
1225 extern void truncate_pagecache(struct inode *inode, loff_t new);
1226 extern void truncate_setsize(struct inode *inode, loff_t newsize);
1227 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1228 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1229 int truncate_inode_page(struct address_space *mapping, struct page *page);
1230 int generic_error_remove_page(struct address_space *mapping, struct page *page);
1231 int invalidate_inode_page(struct page *page);
1232 
1233 #ifdef CONFIG_MMU
1234 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1235                         unsigned long address, unsigned int flags);
1236 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1237                             unsigned long address, unsigned int fault_flags);
1238 #else
1239 static inline int handle_mm_fault(struct mm_struct *mm,
1240                         struct vm_area_struct *vma, unsigned long address,
1241                         unsigned int flags)
1242 {
1243         /* should never happen if there's no MMU */
1244         BUG();
1245         return VM_FAULT_SIGBUS;
1246 }
1247 static inline int fixup_user_fault(struct task_struct *tsk,
1248                 struct mm_struct *mm, unsigned long address,
1249                 unsigned int fault_flags)
1250 {
1251         /* should never happen if there's no MMU */
1252         BUG();
1253         return -EFAULT;
1254 }
1255 #endif
1256 
1257 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1258 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1259                 void *buf, int len, int write);
1260 
1261 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1262                       unsigned long start, unsigned long nr_pages,
1263                       unsigned int foll_flags, struct page **pages,
1264                       struct vm_area_struct **vmas, int *nonblocking);
1265 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1266                     unsigned long start, unsigned long nr_pages,
1267                     int write, int force, struct page **pages,
1268                     struct vm_area_struct **vmas);
1269 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
1270                     unsigned long start, unsigned long nr_pages,
1271                     int write, int force, struct page **pages,
1272                     int *locked);
1273 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1274                                unsigned long start, unsigned long nr_pages,
1275                                int write, int force, struct page **pages,
1276                                unsigned int gup_flags);
1277 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1278                     unsigned long start, unsigned long nr_pages,
1279                     int write, int force, struct page **pages);
1280 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1281                         struct page **pages);
1282 struct kvec;
1283 int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1284                         struct page **pages);
1285 int get_kernel_page(unsigned long start, int write, struct page **pages);
1286 struct page *get_dump_page(unsigned long addr);
1287 
1288 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1289 extern void do_invalidatepage(struct page *page, unsigned int offset,
1290                               unsigned int length);
1291 
1292 int __set_page_dirty_nobuffers(struct page *page);
1293 int __set_page_dirty_no_writeback(struct page *page);
1294 int redirty_page_for_writepage(struct writeback_control *wbc,
1295                                 struct page *page);
1296 void account_page_dirtied(struct page *page, struct address_space *mapping);
1297 int set_page_dirty(struct page *page);
1298 int set_page_dirty_lock(struct page *page);
1299 int clear_page_dirty_for_io(struct page *page);
1300 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1301 
1302 /* Is the vma a continuation of the stack vma above it? */
1303 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1304 {
1305         return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1306 }
1307 
1308 static inline int stack_guard_page_start(struct vm_area_struct *vma,
1309                                              unsigned long addr)
1310 {
1311         return (vma->vm_flags & VM_GROWSDOWN) &&
1312                 (vma->vm_start == addr) &&
1313                 !vma_growsdown(vma->vm_prev, addr);
1314 }
1315 
1316 /* Is the vma a continuation of the stack vma below it? */
1317 static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1318 {
1319         return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1320 }
1321 
1322 static inline int stack_guard_page_end(struct vm_area_struct *vma,
1323                                            unsigned long addr)
1324 {
1325         return (vma->vm_flags & VM_GROWSUP) &&
1326                 (vma->vm_end == addr) &&
1327                 !vma_growsup(vma->vm_next, addr);
1328 }
1329 
1330 extern struct task_struct *task_of_stack(struct task_struct *task,
1331                                 struct vm_area_struct *vma, bool in_group);
1332 
1333 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1334                 unsigned long old_addr, struct vm_area_struct *new_vma,
1335                 unsigned long new_addr, unsigned long len,
1336                 bool need_rmap_locks);
1337 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1338                               unsigned long end, pgprot_t newprot,
1339                               int dirty_accountable, int prot_numa);
1340 extern int mprotect_fixup(struct vm_area_struct *vma,
1341                           struct vm_area_struct **pprev, unsigned long start,
1342                           unsigned long end, unsigned long newflags);
1343 
1344 /*
1345  * doesn't attempt to fault and will return short.
1346  */
1347 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1348                           struct page **pages);
1349 /*
1350  * per-process(per-mm_struct) statistics.
1351  */
1352 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1353 {
1354         long val = atomic_long_read(&mm->rss_stat.count[member]);
1355 
1356 #ifdef SPLIT_RSS_COUNTING
1357         /*
1358          * counter is updated in asynchronous manner and may go to minus.
1359          * But it's never be expected number for users.
1360          */
1361         if (val < 0)
1362                 val = 0;
1363 #endif
1364         return (unsigned long)val;
1365 }
1366 
1367 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1368 {
1369         atomic_long_add(value, &mm->rss_stat.count[member]);
1370 }
1371 
1372 static inline void inc_mm_counter(struct mm_struct *mm, int member)
1373 {
1374         atomic_long_inc(&mm->rss_stat.count[member]);
1375 }
1376 
1377 static inline void dec_mm_counter(struct mm_struct *mm, int member)
1378 {
1379         atomic_long_dec(&mm->rss_stat.count[member]);
1380 }
1381 
1382 static inline unsigned long get_mm_rss(struct mm_struct *mm)
1383 {
1384         return get_mm_counter(mm, MM_FILEPAGES) +
1385                 get_mm_counter(mm, MM_ANONPAGES);
1386 }
1387 
1388 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1389 {
1390         return max(mm->hiwater_rss, get_mm_rss(mm));
1391 }
1392 
1393 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1394 {
1395         return max(mm->hiwater_vm, mm->total_vm);
1396 }
1397 
1398 static inline void update_hiwater_rss(struct mm_struct *mm)
1399 {
1400         unsigned long _rss = get_mm_rss(mm);
1401 
1402         if ((mm)->hiwater_rss < _rss)
1403                 (mm)->hiwater_rss = _rss;
1404 }
1405 
1406 static inline void update_hiwater_vm(struct mm_struct *mm)
1407 {
1408         if (mm->hiwater_vm < mm->total_vm)
1409                 mm->hiwater_vm = mm->total_vm;
1410 }
1411 
1412 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1413 {
1414         mm->hiwater_rss = get_mm_rss(mm);
1415 }
1416 
1417 static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1418                                          struct mm_struct *mm)
1419 {
1420         unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1421 
1422         if (*maxrss < hiwater_rss)
1423                 *maxrss = hiwater_rss;
1424 }
1425 
1426 #if defined(SPLIT_RSS_COUNTING)
1427 void sync_mm_rss(struct mm_struct *mm);
1428 #else
1429 static inline void sync_mm_rss(struct mm_struct *mm)
1430 {
1431 }
1432 #endif
1433 
1434 int vma_wants_writenotify(struct vm_area_struct *vma);
1435 
1436 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1437                                spinlock_t **ptl);
1438 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1439                                     spinlock_t **ptl)
1440 {
1441         pte_t *ptep;
1442         __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1443         return ptep;
1444 }
1445 
1446 #ifdef __PAGETABLE_PUD_FOLDED
1447 static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1448                                                 unsigned long address)
1449 {
1450         return 0;
1451 }
1452 #else
1453 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1454 #endif
1455 
1456 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1457 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1458                                                 unsigned long address)
1459 {
1460         return 0;
1461 }
1462 
1463 static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
1464 
1465 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1466 {
1467         return 0;
1468 }
1469 
1470 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1471 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1472 
1473 #else
1474 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1475 
1476 static inline void mm_nr_pmds_init(struct mm_struct *mm)
1477 {
1478         atomic_long_set(&mm->nr_pmds, 0);
1479 }
1480 
1481 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1482 {
1483         return atomic_long_read(&mm->nr_pmds);
1484 }
1485 
1486 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1487 {
1488         atomic_long_inc(&mm->nr_pmds);
1489 }
1490 
1491 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1492 {
1493         atomic_long_dec(&mm->nr_pmds);
1494 }
1495 #endif
1496 
1497 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1498                 pmd_t *pmd, unsigned long address);
1499 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1500 
1501 /*
1502  * The following ifdef needed to get the 4level-fixup.h header to work.
1503  * Remove it when 4level-fixup.h has been removed.
1504  */
1505 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1506 static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1507 {
1508         return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1509                 NULL: pud_offset(pgd, address);
1510 }
1511 
1512 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1513 {
1514         return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1515                 NULL: pmd_offset(pud, address);
1516 }
1517 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1518 
1519 #if USE_SPLIT_PTE_PTLOCKS
1520 #if ALLOC_SPLIT_PTLOCKS
1521 void __init ptlock_cache_init(void);
1522 extern bool ptlock_alloc(struct page *page);
1523 extern void ptlock_free(struct page *page);
1524 
1525 static inline spinlock_t *ptlock_ptr(struct page *page)
1526 {
1527         return page->ptl;
1528 }
1529 #else /* ALLOC_SPLIT_PTLOCKS */
1530 static inline void ptlock_cache_init(void)
1531 {
1532 }
1533 
1534 static inline bool ptlock_alloc(struct page *page)
1535 {
1536         return true;
1537 }
1538 
1539 static inline void ptlock_free(struct page *page)
1540 {
1541 }
1542 
1543 static inline spinlock_t *ptlock_ptr(struct page *page)
1544 {
1545         return &page->ptl;
1546 }
1547 #endif /* ALLOC_SPLIT_PTLOCKS */
1548 
1549 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1550 {
1551         return ptlock_ptr(pmd_page(*pmd));
1552 }
1553 
1554 static inline bool ptlock_init(struct page *page)
1555 {
1556         /*
1557          * prep_new_page() initialize page->private (and therefore page->ptl)
1558          * with 0. Make sure nobody took it in use in between.
1559          *
1560          * It can happen if arch try to use slab for page table allocation:
1561          * slab code uses page->slab_cache and page->first_page (for tail
1562          * pages), which share storage with page->ptl.
1563          */
1564         VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1565         if (!ptlock_alloc(page))
1566                 return false;
1567         spin_lock_init(ptlock_ptr(page));
1568         return true;
1569 }
1570 
1571 /* Reset page->mapping so free_pages_check won't complain. */
1572 static inline void pte_lock_deinit(struct page *page)
1573 {
1574         page->mapping = NULL;
1575         ptlock_free(page);
1576 }
1577 
1578 #else   /* !USE_SPLIT_PTE_PTLOCKS */
1579 /*
1580  * We use mm->page_table_lock to guard all pagetable pages of the mm.
1581  */
1582 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1583 {
1584         return &mm->page_table_lock;
1585 }
1586 static inline void ptlock_cache_init(void) {}
1587 static inline bool ptlock_init(struct page *page) { return true; }
1588 static inline void pte_lock_deinit(struct page *page) {}
1589 #endif /* USE_SPLIT_PTE_PTLOCKS */
1590 
1591 static inline void pgtable_init(void)
1592 {
1593         ptlock_cache_init();
1594         pgtable_cache_init();
1595 }
1596 
1597 static inline bool pgtable_page_ctor(struct page *page)
1598 {
1599         inc_zone_page_state(page, NR_PAGETABLE);
1600         return ptlock_init(page);
1601 }
1602 
1603 static inline void pgtable_page_dtor(struct page *page)
1604 {
1605         pte_lock_deinit(page);
1606         dec_zone_page_state(page, NR_PAGETABLE);
1607 }
1608 
1609 #define pte_offset_map_lock(mm, pmd, address, ptlp)     \
1610 ({                                                      \
1611         spinlock_t *__ptl = pte_lockptr(mm, pmd);       \
1612         pte_t *__pte = pte_offset_map(pmd, address);    \
1613         *(ptlp) = __ptl;                                \
1614         spin_lock(__ptl);                               \
1615         __pte;                                          \
1616 })
1617 
1618 #define pte_unmap_unlock(pte, ptl)      do {            \
1619         spin_unlock(ptl);                               \
1620         pte_unmap(pte);                                 \
1621 } while (0)
1622 
1623 #define pte_alloc_map(mm, vma, pmd, address)                            \
1624         ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,    \
1625                                                         pmd, address))? \
1626          NULL: pte_offset_map(pmd, address))
1627 
1628 #define pte_alloc_map_lock(mm, pmd, address, ptlp)      \
1629         ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,   \
1630                                                         pmd, address))? \
1631                 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1632 
1633 #define pte_alloc_kernel(pmd, address)                  \
1634         ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1635                 NULL: pte_offset_kernel(pmd, address))
1636 
1637 #if USE_SPLIT_PMD_PTLOCKS
1638 
1639 static struct page *pmd_to_page(pmd_t *pmd)
1640 {
1641         unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1642         return virt_to_page((void *)((unsigned long) pmd & mask));
1643 }
1644 
1645 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1646 {
1647         return ptlock_ptr(pmd_to_page(pmd));
1648 }
1649 
1650 static inline bool pgtable_pmd_page_ctor(struct page *page)
1651 {
1652 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1653         page->pmd_huge_pte = NULL;
1654 #endif
1655         return ptlock_init(page);
1656 }
1657 
1658 static inline void pgtable_pmd_page_dtor(struct page *page)
1659 {
1660 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1661         VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1662 #endif
1663         ptlock_free(page);
1664 }
1665 
1666 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1667 
1668 #else
1669 
1670 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1671 {
1672         return &mm->page_table_lock;
1673 }
1674 
1675 static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1676 static inline void pgtable_pmd_page_dtor(struct page *page) {}
1677 
1678 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1679 
1680 #endif
1681 
1682 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1683 {
1684         spinlock_t *ptl = pmd_lockptr(mm, pmd);
1685         spin_lock(ptl);
1686         return ptl;
1687 }
1688 
1689 extern void free_area_init(unsigned long * zones_size);
1690 extern void free_area_init_node(int nid, unsigned long * zones_size,
1691                 unsigned long zone_start_pfn, unsigned long *zholes_size);
1692 extern void free_initmem(void);
1693 
1694 /*
1695  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1696  * into the buddy system. The freed pages will be poisoned with pattern
1697  * "poison" if it's within range [0, UCHAR_MAX].
1698  * Return pages freed into the buddy system.
1699  */
1700 extern unsigned long free_reserved_area(void *start, void *end,
1701                                         int poison, char *s);
1702 
1703 #ifdef  CONFIG_HIGHMEM
1704 /*
1705  * Free a highmem page into the buddy system, adjusting totalhigh_pages
1706  * and totalram_pages.
1707  */
1708 extern void free_highmem_page(struct page *page);
1709 #endif
1710 
1711 extern void adjust_managed_page_count(struct page *page, long count);
1712 extern void mem_init_print_info(const char *str);
1713 
1714 /* Free the reserved page into the buddy system, so it gets managed. */
1715 static inline void __free_reserved_page(struct page *page)
1716 {
1717         ClearPageReserved(page);
1718         init_page_count(page);
1719         __free_page(page);
1720 }
1721 
1722 static inline void free_reserved_page(struct page *page)
1723 {
1724         __free_reserved_page(page);
1725         adjust_managed_page_count(page, 1);
1726 }
1727 
1728 static inline void mark_page_reserved(struct page *page)
1729 {
1730         SetPageReserved(page);
1731         adjust_managed_page_count(page, -1);
1732 }
1733 
1734 /*
1735  * Default method to free all the __init memory into the buddy system.
1736  * The freed pages will be poisoned with pattern "poison" if it's within
1737  * range [0, UCHAR_MAX].
1738  * Return pages freed into the buddy system.
1739  */
1740 static inline unsigned long free_initmem_default(int poison)
1741 {
1742         extern char __init_begin[], __init_end[];
1743 
1744         return free_reserved_area(&__init_begin, &__init_end,
1745                                   poison, "unused kernel");
1746 }
1747 
1748 static inline unsigned long get_num_physpages(void)
1749 {
1750         int nid;
1751         unsigned long phys_pages = 0;
1752 
1753         for_each_online_node(nid)
1754                 phys_pages += node_present_pages(nid);
1755 
1756         return phys_pages;
1757 }
1758 
1759 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1760 /*
1761  * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1762  * zones, allocate the backing mem_map and account for memory holes in a more
1763  * architecture independent manner. This is a substitute for creating the
1764  * zone_sizes[] and zholes_size[] arrays and passing them to
1765  * free_area_init_node()
1766  *
1767  * An architecture is expected to register range of page frames backed by
1768  * physical memory with memblock_add[_node]() before calling
1769  * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1770  * usage, an architecture is expected to do something like
1771  *
1772  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1773  *                                                       max_highmem_pfn};
1774  * for_each_valid_physical_page_range()
1775  *      memblock_add_node(base, size, nid)
1776  * free_area_init_nodes(max_zone_pfns);
1777  *
1778  * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1779  * registered physical page range.  Similarly
1780  * sparse_memory_present_with_active_regions() calls memory_present() for
1781  * each range when SPARSEMEM is enabled.
1782  *
1783  * See mm/page_alloc.c for more information on each function exposed by
1784  * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1785  */
1786 extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1787 unsigned long node_map_pfn_alignment(void);
1788 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1789                                                 unsigned long end_pfn);
1790 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1791                                                 unsigned long end_pfn);
1792 extern void get_pfn_range_for_nid(unsigned int nid,
1793                         unsigned long *start_pfn, unsigned long *end_pfn);
1794 extern unsigned long find_min_pfn_with_active_regions(void);
1795 extern void free_bootmem_with_active_regions(int nid,
1796                                                 unsigned long max_low_pfn);
1797 extern void sparse_memory_present_with_active_regions(int nid);
1798 
1799 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1800 
1801 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1802     !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1803 static inline int __early_pfn_to_nid(unsigned long pfn)
1804 {
1805         return 0;
1806 }
1807 #else
1808 /* please see mm/page_alloc.c */
1809 extern int __meminit early_pfn_to_nid(unsigned long pfn);
1810 /* there is a per-arch backend function. */
1811 extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1812 #endif
1813 
1814 extern void set_dma_reserve(unsigned long new_dma_reserve);
1815 extern void memmap_init_zone(unsigned long, int, unsigned long,
1816                                 unsigned long, enum memmap_context);
1817 extern void setup_per_zone_wmarks(void);
1818 extern int __meminit init_per_zone_wmark_min(void);
1819 extern void mem_init(void);
1820 extern void __init mmap_init(void);
1821 extern void show_mem(unsigned int flags);
1822 extern void si_meminfo(struct sysinfo * val);
1823 extern void si_meminfo_node(struct sysinfo *val, int nid);
1824 
1825 extern __printf(3, 4)
1826 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1827 
1828 extern void setup_per_cpu_pageset(void);
1829 
1830 extern void zone_pcp_update(struct zone *zone);
1831 extern void zone_pcp_reset(struct zone *zone);
1832 
1833 /* page_alloc.c */
1834 extern int min_free_kbytes;
1835 
1836 /* nommu.c */
1837 extern atomic_long_t mmap_pages_allocated;
1838 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1839 
1840 /* interval_tree.c */
1841 void vma_interval_tree_insert(struct vm_area_struct *node,
1842                               struct rb_root *root);
1843 void vma_interval_tree_insert_after(struct vm_area_struct *node,
1844                                     struct vm_area_struct *prev,
1845                                     struct rb_root *root);
1846 void vma_interval_tree_remove(struct vm_area_struct *node,
1847                               struct rb_root *root);
1848 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1849                                 unsigned long start, unsigned long last);
1850 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1851                                 unsigned long start, unsigned long last);
1852 
1853 #define vma_interval_tree_foreach(vma, root, start, last)               \
1854         for (vma = vma_interval_tree_iter_first(root, start, last);     \
1855              vma; vma = vma_interval_tree_iter_next(vma, start, last))
1856 
1857 void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1858                                    struct rb_root *root);
1859 void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1860                                    struct rb_root *root);
1861 struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1862         struct rb_root *root, unsigned long start, unsigned long last);
1863 struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1864         struct anon_vma_chain *node, unsigned long start, unsigned long last);
1865 #ifdef CONFIG_DEBUG_VM_RB
1866 void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1867 #endif
1868 
1869 #define anon_vma_interval_tree_foreach(avc, root, start, last)           \
1870         for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1871              avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1872 
1873 /* mmap.c */
1874 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1875 extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1876         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1877 extern struct vm_area_struct *vma_merge(struct mm_struct *,
1878         struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1879         unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1880         struct mempolicy *);
1881 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1882 extern int split_vma(struct mm_struct *,
1883         struct vm_area_struct *, unsigned long addr, int new_below);
1884 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1885 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1886         struct rb_node **, struct rb_node *);
1887 extern void unlink_file_vma(struct vm_area_struct *);
1888 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1889         unsigned long addr, unsigned long len, pgoff_t pgoff,
1890         bool *need_rmap_locks);
1891 extern void exit_mmap(struct mm_struct *);
1892 
1893 static inline int check_data_rlimit(unsigned long rlim,
1894                                     unsigned long new,
1895                                     unsigned long start,
1896                                     unsigned long end_data,
1897                                     unsigned long start_data)
1898 {
1899         if (rlim < RLIM_INFINITY) {
1900                 if (((new - start) + (end_data - start_data)) > rlim)
1901                         return -ENOSPC;
1902         }
1903 
1904         return 0;
1905 }
1906 
1907 extern int mm_take_all_locks(struct mm_struct *mm);
1908 extern void mm_drop_all_locks(struct mm_struct *mm);
1909 
1910 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1911 extern struct file *get_mm_exe_file(struct mm_struct *mm);
1912 
1913 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1914 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
1915                                    unsigned long addr, unsigned long len,
1916                                    unsigned long flags,
1917                                    const struct vm_special_mapping *spec);
1918 /* This is an obsolete alternative to _install_special_mapping. */
1919 extern int install_special_mapping(struct mm_struct *mm,
1920                                    unsigned long addr, unsigned long len,
1921                                    unsigned long flags, struct page **pages);
1922 
1923 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1924 
1925 extern unsigned long mmap_region(struct file *file, unsigned long addr,
1926         unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1927 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1928         unsigned long len, unsigned long prot, unsigned long flags,
1929         unsigned long pgoff, unsigned long *populate);
1930 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1931 
1932 #ifdef CONFIG_MMU
1933 extern int __mm_populate(unsigned long addr, unsigned long len,
1934                          int ignore_errors);
1935 static inline void mm_populate(unsigned long addr, unsigned long len)
1936 {
1937         /* Ignore errors */
1938         (void) __mm_populate(addr, len, 1);
1939 }
1940 #else
1941 static inline void mm_populate(unsigned long addr, unsigned long len) {}
1942 #endif
1943 
1944 /* These take the mm semaphore themselves */
1945 extern unsigned long vm_brk(unsigned long, unsigned long);
1946 extern int vm_munmap(unsigned long, size_t);
1947 extern unsigned long vm_mmap(struct file *, unsigned long,
1948         unsigned long, unsigned long,
1949         unsigned long, unsigned long);
1950 
1951 struct vm_unmapped_area_info {
1952 #define VM_UNMAPPED_AREA_TOPDOWN 1
1953         unsigned long flags;
1954         unsigned long length;
1955         unsigned long low_limit;
1956         unsigned long high_limit;
1957         unsigned long align_mask;
1958         unsigned long align_offset;
1959 };
1960 
1961 extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1962 extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1963 
1964 /*
1965  * Search for an unmapped address range.
1966  *
1967  * We are looking for a range that:
1968  * - does not intersect with any VMA;
1969  * - is contained within the [low_limit, high_limit) interval;
1970  * - is at least the desired size.
1971  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1972  */
1973 static inline unsigned long
1974 vm_unmapped_area(struct vm_unmapped_area_info *info)
1975 {
1976         if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1977                 return unmapped_area(info);
1978         else
1979                 return unmapped_area_topdown(info);
1980 }
1981 
1982 /* truncate.c */
1983 extern void truncate_inode_pages(struct address_space *, loff_t);
1984 extern void truncate_inode_pages_range(struct address_space *,
1985                                        loff_t lstart, loff_t lend);
1986 extern void truncate_inode_pages_final(struct address_space *);
1987 
1988 /* generic vm_area_ops exported for stackable file systems */
1989 extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1990 extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1991 extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1992 
1993 /* mm/page-writeback.c */
1994 int write_one_page(struct page *page, int wait);
1995 void task_dirty_inc(struct task_struct *tsk);
1996 
1997 /* readahead.c */
1998 #define VM_MAX_READAHEAD        128     /* kbytes */
1999 #define VM_MIN_READAHEAD        16      /* kbytes (includes current page) */
2000 
2001 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2002                         pgoff_t offset, unsigned long nr_to_read);
2003 
2004 void page_cache_sync_readahead(struct address_space *mapping,
2005                                struct file_ra_state *ra,
2006                                struct file *filp,
2007                                pgoff_t offset,
2008                                unsigned long size);
2009 
2010 void page_cache_async_readahead(struct address_space *mapping,
2011                                 struct file_ra_state *ra,
2012                                 struct file *filp,
2013                                 struct page *pg,
2014                                 pgoff_t offset,
2015                                 unsigned long size);
2016 
2017 unsigned long max_sane_readahead(unsigned long nr);
2018 
2019 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2020 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2021 
2022 /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
2023 extern int expand_downwards(struct vm_area_struct *vma,
2024                 unsigned long address);
2025 #if VM_GROWSUP
2026 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2027 #else
2028   #define expand_upwards(vma, address) (0)
2029 #endif
2030 
2031 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
2032 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2033 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2034                                              struct vm_area_struct **pprev);
2035 
2036 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
2037    NULL if none.  Assume start_addr < end_addr. */
2038 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2039 {
2040         struct vm_area_struct * vma = find_vma(mm,start_addr);
2041 
2042         if (vma && end_addr <= vma->vm_start)
2043                 vma = NULL;
2044         return vma;
2045 }
2046 
2047 static inline unsigned long vma_pages(struct vm_area_struct *vma)
2048 {
2049         return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2050 }
2051 
2052 /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
2053 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2054                                 unsigned long vm_start, unsigned long vm_end)
2055 {
2056         struct vm_area_struct *vma = find_vma(mm, vm_start);
2057 
2058         if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2059                 vma = NULL;
2060 
2061         return vma;
2062 }
2063 
2064 #ifdef CONFIG_MMU
2065 pgprot_t vm_get_page_prot(unsigned long vm_flags);
2066 void vma_set_page_prot(struct vm_area_struct *vma);
2067 #else
2068 static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2069 {
2070         return __pgprot(0);
2071 }
2072 static inline void vma_set_page_prot(struct vm_area_struct *vma)
2073 {
2074         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2075 }
2076 #endif
2077 
2078 #ifdef CONFIG_NUMA_BALANCING
2079 unsigned long change_prot_numa(struct vm_area_struct *vma,
2080                         unsigned long start, unsigned long end);
2081 #endif
2082 
2083 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2084 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2085                         unsigned long pfn, unsigned long size, pgprot_t);
2086 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2087 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2088                         unsigned long pfn);
2089 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2090                         unsigned long pfn);
2091 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2092 
2093 
2094 struct page *follow_page_mask(struct vm_area_struct *vma,
2095                               unsigned long address, unsigned int foll_flags,
2096                               unsigned int *page_mask);
2097 
2098 static inline struct page *follow_page(struct vm_area_struct *vma,
2099                 unsigned long address, unsigned int foll_flags)
2100 {
2101         unsigned int unused_page_mask;
2102         return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2103 }
2104 
2105 #define FOLL_WRITE      0x01    /* check pte is writable */
2106 #define FOLL_TOUCH      0x02    /* mark page accessed */
2107 #define FOLL_GET        0x04    /* do get_page on page */
2108 #define FOLL_DUMP       0x08    /* give error on hole if it would be zero */
2109 #define FOLL_FORCE      0x10    /* get_user_pages read/write w/o permission */
2110 #define FOLL_NOWAIT     0x20    /* if a disk transfer is needed, start the IO
2111                                  * and return without waiting upon it */
2112 #define FOLL_MLOCK      0x40    /* mark page as mlocked */
2113 #define FOLL_SPLIT      0x80    /* don't return transhuge pages, split them */
2114 #define FOLL_HWPOISON   0x100   /* check page is hwpoisoned */
2115 #define FOLL_NUMA       0x200   /* force NUMA hinting page fault */
2116 #define FOLL_MIGRATION  0x400   /* wait for page to replace migration entry */
2117 #define FOLL_TRIED      0x800   /* a retry, previous pass started an IO */
2118 
2119 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2120                         void *data);
2121 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2122                                unsigned long size, pte_fn_t fn, void *data);
2123 
2124 #ifdef CONFIG_PROC_FS
2125 void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
2126 #else
2127 static inline void vm_stat_account(struct mm_struct *mm,
2128                         unsigned long flags, struct file *file, long pages)
2129 {
2130         mm->total_vm += pages;
2131 }
2132 #endif /* CONFIG_PROC_FS */
2133 
2134 #ifdef CONFIG_DEBUG_PAGEALLOC
2135 extern bool _debug_pagealloc_enabled;
2136 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2137 
2138 static inline bool debug_pagealloc_enabled(void)
2139 {
2140         return _debug_pagealloc_enabled;
2141 }
2142 
2143 static inline void
2144 kernel_map_pages(struct page *page, int numpages, int enable)
2145 {
2146         if (!debug_pagealloc_enabled())
2147                 return;
2148 
2149         __kernel_map_pages(page, numpages, enable);
2150 }
2151 #ifdef CONFIG_HIBERNATION
2152 extern bool kernel_page_present(struct page *page);
2153 #endif /* CONFIG_HIBERNATION */
2154 #else
2155 static inline void
2156 kernel_map_pages(struct page *page, int numpages, int enable) {}
2157 #ifdef CONFIG_HIBERNATION
2158 static inline bool kernel_page_present(struct page *page) { return true; }
2159 #endif /* CONFIG_HIBERNATION */
2160 #endif
2161 
2162 #ifdef __HAVE_ARCH_GATE_AREA
2163 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2164 extern int in_gate_area_no_mm(unsigned long addr);
2165 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2166 #else
2167 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2168 {
2169         return NULL;
2170 }
2171 static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2172 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2173 {
2174         return 0;
2175 }
2176 #endif  /* __HAVE_ARCH_GATE_AREA */
2177 
2178 #ifdef CONFIG_SYSCTL
2179 extern int sysctl_drop_caches;
2180 int drop_caches_sysctl_handler(struct ctl_table *, int,
2181                                         void __user *, size_t *, loff_t *);
2182 #endif
2183 
2184 void drop_slab(void);
2185 void drop_slab_node(int nid);
2186 
2187 #ifndef CONFIG_MMU
2188 #define randomize_va_space 0
2189 #else
2190 extern int randomize_va_space;
2191 #endif
2192 
2193 const char * arch_vma_name(struct vm_area_struct *vma);
2194 void print_vma_addr(char *prefix, unsigned long rip);
2195 
2196 void sparse_mem_maps_populate_node(struct page **map_map,
2197                                    unsigned long pnum_begin,
2198                                    unsigned long pnum_end,
2199                                    unsigned long map_count,
2200                                    int nodeid);
2201 
2202 struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2203 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2204 pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
2205 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2206 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2207 void *vmemmap_alloc_block(unsigned long size, int node);
2208 void *vmemmap_alloc_block_buf(unsigned long size, int node);
2209 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2210 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2211                                int node);
2212 int vmemmap_populate(unsigned long start, unsigned long end, int node);
2213 void vmemmap_populate_print_last(void);
2214 #ifdef CONFIG_MEMORY_HOTPLUG
2215 void vmemmap_free(unsigned long start, unsigned long end);
2216 #endif
2217 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2218                                   unsigned long size);
2219 
2220 enum mf_flags {
2221         MF_COUNT_INCREASED = 1 << 0,
2222         MF_ACTION_REQUIRED = 1 << 1,
2223         MF_MUST_KILL = 1 << 2,
2224         MF_SOFT_OFFLINE = 1 << 3,
2225 };
2226 extern int memory_failure(unsigned long pfn, int trapno, int flags);
2227 extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2228 extern int unpoison_memory(unsigned long pfn);
2229 extern int sysctl_memory_failure_early_kill;
2230 extern int sysctl_memory_failure_recovery;
2231 extern void shake_page(struct page *p, int access);
2232 extern atomic_long_t num_poisoned_pages;
2233 extern int soft_offline_page(struct page *page, int flags);
2234 
2235 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2236 extern void clear_huge_page(struct page *page,
2237                             unsigned long addr,
2238                             unsigned int pages_per_huge_page);
2239 extern void copy_user_huge_page(struct page *dst, struct page *src,
2240                                 unsigned long addr, struct vm_area_struct *vma,
2241                                 unsigned int pages_per_huge_page);
2242 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2243 
2244 extern struct page_ext_operations debug_guardpage_ops;
2245 extern struct page_ext_operations page_poisoning_ops;
2246 
2247 #ifdef CONFIG_DEBUG_PAGEALLOC
2248 extern unsigned int _debug_guardpage_minorder;
2249 extern bool _debug_guardpage_enabled;
2250 
2251 static inline unsigned int debug_guardpage_minorder(void)
2252 {
2253         return _debug_guardpage_minorder;
2254 }
2255 
2256 static inline bool debug_guardpage_enabled(void)
2257 {
2258         return _debug_guardpage_enabled;
2259 }
2260 
2261 static inline bool page_is_guard(struct page *page)
2262 {
2263         struct page_ext *page_ext;
2264 
2265         if (!debug_guardpage_enabled())
2266                 return false;
2267 
2268         page_ext = lookup_page_ext(page);
2269         return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2270 }
2271 #else
2272 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2273 static inline bool debug_guardpage_enabled(void) { return false; }
2274 static inline bool page_is_guard(struct page *page) { return false; }
2275 #endif /* CONFIG_DEBUG_PAGEALLOC */
2276 
2277 #if MAX_NUMNODES > 1
2278 void __init setup_nr_node_ids(void);
2279 #else
2280 static inline void setup_nr_node_ids(void) {}
2281 #endif
2282 
2283 #endif /* __KERNEL__ */
2284 #endif /* _LINUX_MM_H */
2285 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us