Version:  2.0.40 2.2.26 2.4.37 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19

Linux/mm/page_alloc.c

  1 /*
  2  *  linux/mm/page_alloc.c
  3  *
  4  *  Manages the free list, the system allocates free pages here.
  5  *  Note that kmalloc() lives in slab.c
  6  *
  7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  8  *  Swap reorganised 29.12.95, Stephen Tweedie
  9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 15  */
 16 
 17 #include <linux/stddef.h>
 18 #include <linux/mm.h>
 19 #include <linux/swap.h>
 20 #include <linux/interrupt.h>
 21 #include <linux/pagemap.h>
 22 #include <linux/jiffies.h>
 23 #include <linux/bootmem.h>
 24 #include <linux/memblock.h>
 25 #include <linux/compiler.h>
 26 #include <linux/kernel.h>
 27 #include <linux/kmemcheck.h>
 28 #include <linux/module.h>
 29 #include <linux/suspend.h>
 30 #include <linux/pagevec.h>
 31 #include <linux/blkdev.h>
 32 #include <linux/slab.h>
 33 #include <linux/ratelimit.h>
 34 #include <linux/oom.h>
 35 #include <linux/notifier.h>
 36 #include <linux/topology.h>
 37 #include <linux/sysctl.h>
 38 #include <linux/cpu.h>
 39 #include <linux/cpuset.h>
 40 #include <linux/memory_hotplug.h>
 41 #include <linux/nodemask.h>
 42 #include <linux/vmalloc.h>
 43 #include <linux/vmstat.h>
 44 #include <linux/mempolicy.h>
 45 #include <linux/stop_machine.h>
 46 #include <linux/sort.h>
 47 #include <linux/pfn.h>
 48 #include <linux/backing-dev.h>
 49 #include <linux/fault-inject.h>
 50 #include <linux/page-isolation.h>
 51 #include <linux/page_ext.h>
 52 #include <linux/debugobjects.h>
 53 #include <linux/kmemleak.h>
 54 #include <linux/compaction.h>
 55 #include <trace/events/kmem.h>
 56 #include <linux/prefetch.h>
 57 #include <linux/mm_inline.h>
 58 #include <linux/migrate.h>
 59 #include <linux/page_ext.h>
 60 #include <linux/hugetlb.h>
 61 #include <linux/sched/rt.h>
 62 #include <linux/page_owner.h>
 63 
 64 #include <asm/sections.h>
 65 #include <asm/tlbflush.h>
 66 #include <asm/div64.h>
 67 #include "internal.h"
 68 
 69 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 70 static DEFINE_MUTEX(pcp_batch_high_lock);
 71 #define MIN_PERCPU_PAGELIST_FRACTION    (8)
 72 
 73 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
 74 DEFINE_PER_CPU(int, numa_node);
 75 EXPORT_PER_CPU_SYMBOL(numa_node);
 76 #endif
 77 
 78 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
 79 /*
 80  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 81  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 82  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 83  * defined in <linux/topology.h>.
 84  */
 85 DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
 86 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
 87 int _node_numa_mem_[MAX_NUMNODES];
 88 #endif
 89 
 90 /*
 91  * Array of node states.
 92  */
 93 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
 94         [N_POSSIBLE] = NODE_MASK_ALL,
 95         [N_ONLINE] = { { [0] = 1UL } },
 96 #ifndef CONFIG_NUMA
 97         [N_NORMAL_MEMORY] = { { [0] = 1UL } },
 98 #ifdef CONFIG_HIGHMEM
 99         [N_HIGH_MEMORY] = { { [0] = 1UL } },
100 #endif
101 #ifdef CONFIG_MOVABLE_NODE
102         [N_MEMORY] = { { [0] = 1UL } },
103 #endif
104         [N_CPU] = { { [0] = 1UL } },
105 #endif  /* NUMA */
106 };
107 EXPORT_SYMBOL(node_states);
108 
109 /* Protect totalram_pages and zone->managed_pages */
110 static DEFINE_SPINLOCK(managed_page_count_lock);
111 
112 unsigned long totalram_pages __read_mostly;
113 unsigned long totalreserve_pages __read_mostly;
114 unsigned long totalcma_pages __read_mostly;
115 /*
116  * When calculating the number of globally allowed dirty pages, there
117  * is a certain number of per-zone reserves that should not be
118  * considered dirtyable memory.  This is the sum of those reserves
119  * over all existing zones that contribute dirtyable memory.
120  */
121 unsigned long dirty_balance_reserve __read_mostly;
122 
123 int percpu_pagelist_fraction;
124 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
125 
126 #ifdef CONFIG_PM_SLEEP
127 /*
128  * The following functions are used by the suspend/hibernate code to temporarily
129  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
130  * while devices are suspended.  To avoid races with the suspend/hibernate code,
131  * they should always be called with pm_mutex held (gfp_allowed_mask also should
132  * only be modified with pm_mutex held, unless the suspend/hibernate code is
133  * guaranteed not to run in parallel with that modification).
134  */
135 
136 static gfp_t saved_gfp_mask;
137 
138 void pm_restore_gfp_mask(void)
139 {
140         WARN_ON(!mutex_is_locked(&pm_mutex));
141         if (saved_gfp_mask) {
142                 gfp_allowed_mask = saved_gfp_mask;
143                 saved_gfp_mask = 0;
144         }
145 }
146 
147 void pm_restrict_gfp_mask(void)
148 {
149         WARN_ON(!mutex_is_locked(&pm_mutex));
150         WARN_ON(saved_gfp_mask);
151         saved_gfp_mask = gfp_allowed_mask;
152         gfp_allowed_mask &= ~GFP_IOFS;
153 }
154 
155 bool pm_suspended_storage(void)
156 {
157         if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
158                 return false;
159         return true;
160 }
161 #endif /* CONFIG_PM_SLEEP */
162 
163 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
164 int pageblock_order __read_mostly;
165 #endif
166 
167 static void __free_pages_ok(struct page *page, unsigned int order);
168 
169 /*
170  * results with 256, 32 in the lowmem_reserve sysctl:
171  *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
172  *      1G machine -> (16M dma, 784M normal, 224M high)
173  *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
174  *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
175  *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
176  *
177  * TBD: should special case ZONE_DMA32 machines here - in those we normally
178  * don't need any ZONE_NORMAL reservation
179  */
180 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
181 #ifdef CONFIG_ZONE_DMA
182          256,
183 #endif
184 #ifdef CONFIG_ZONE_DMA32
185          256,
186 #endif
187 #ifdef CONFIG_HIGHMEM
188          32,
189 #endif
190          32,
191 };
192 
193 EXPORT_SYMBOL(totalram_pages);
194 
195 static char * const zone_names[MAX_NR_ZONES] = {
196 #ifdef CONFIG_ZONE_DMA
197          "DMA",
198 #endif
199 #ifdef CONFIG_ZONE_DMA32
200          "DMA32",
201 #endif
202          "Normal",
203 #ifdef CONFIG_HIGHMEM
204          "HighMem",
205 #endif
206          "Movable",
207 };
208 
209 int min_free_kbytes = 1024;
210 int user_min_free_kbytes = -1;
211 
212 static unsigned long __meminitdata nr_kernel_pages;
213 static unsigned long __meminitdata nr_all_pages;
214 static unsigned long __meminitdata dma_reserve;
215 
216 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
217 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
218 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
219 static unsigned long __initdata required_kernelcore;
220 static unsigned long __initdata required_movablecore;
221 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
222 
223 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
224 int movable_zone;
225 EXPORT_SYMBOL(movable_zone);
226 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
227 
228 #if MAX_NUMNODES > 1
229 int nr_node_ids __read_mostly = MAX_NUMNODES;
230 int nr_online_nodes __read_mostly = 1;
231 EXPORT_SYMBOL(nr_node_ids);
232 EXPORT_SYMBOL(nr_online_nodes);
233 #endif
234 
235 int page_group_by_mobility_disabled __read_mostly;
236 
237 void set_pageblock_migratetype(struct page *page, int migratetype)
238 {
239         if (unlikely(page_group_by_mobility_disabled &&
240                      migratetype < MIGRATE_PCPTYPES))
241                 migratetype = MIGRATE_UNMOVABLE;
242 
243         set_pageblock_flags_group(page, (unsigned long)migratetype,
244                                         PB_migrate, PB_migrate_end);
245 }
246 
247 bool oom_killer_disabled __read_mostly;
248 
249 #ifdef CONFIG_DEBUG_VM
250 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
251 {
252         int ret = 0;
253         unsigned seq;
254         unsigned long pfn = page_to_pfn(page);
255         unsigned long sp, start_pfn;
256 
257         do {
258                 seq = zone_span_seqbegin(zone);
259                 start_pfn = zone->zone_start_pfn;
260                 sp = zone->spanned_pages;
261                 if (!zone_spans_pfn(zone, pfn))
262                         ret = 1;
263         } while (zone_span_seqretry(zone, seq));
264 
265         if (ret)
266                 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
267                         pfn, zone_to_nid(zone), zone->name,
268                         start_pfn, start_pfn + sp);
269 
270         return ret;
271 }
272 
273 static int page_is_consistent(struct zone *zone, struct page *page)
274 {
275         if (!pfn_valid_within(page_to_pfn(page)))
276                 return 0;
277         if (zone != page_zone(page))
278                 return 0;
279 
280         return 1;
281 }
282 /*
283  * Temporary debugging check for pages not lying within a given zone.
284  */
285 static int bad_range(struct zone *zone, struct page *page)
286 {
287         if (page_outside_zone_boundaries(zone, page))
288                 return 1;
289         if (!page_is_consistent(zone, page))
290                 return 1;
291 
292         return 0;
293 }
294 #else
295 static inline int bad_range(struct zone *zone, struct page *page)
296 {
297         return 0;
298 }
299 #endif
300 
301 static void bad_page(struct page *page, const char *reason,
302                 unsigned long bad_flags)
303 {
304         static unsigned long resume;
305         static unsigned long nr_shown;
306         static unsigned long nr_unshown;
307 
308         /* Don't complain about poisoned pages */
309         if (PageHWPoison(page)) {
310                 page_mapcount_reset(page); /* remove PageBuddy */
311                 return;
312         }
313 
314         /*
315          * Allow a burst of 60 reports, then keep quiet for that minute;
316          * or allow a steady drip of one report per second.
317          */
318         if (nr_shown == 60) {
319                 if (time_before(jiffies, resume)) {
320                         nr_unshown++;
321                         goto out;
322                 }
323                 if (nr_unshown) {
324                         printk(KERN_ALERT
325                               "BUG: Bad page state: %lu messages suppressed\n",
326                                 nr_unshown);
327                         nr_unshown = 0;
328                 }
329                 nr_shown = 0;
330         }
331         if (nr_shown++ == 0)
332                 resume = jiffies + 60 * HZ;
333 
334         printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
335                 current->comm, page_to_pfn(page));
336         dump_page_badflags(page, reason, bad_flags);
337 
338         print_modules();
339         dump_stack();
340 out:
341         /* Leave bad fields for debug, except PageBuddy could make trouble */
342         page_mapcount_reset(page); /* remove PageBuddy */
343         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
344 }
345 
346 /*
347  * Higher-order pages are called "compound pages".  They are structured thusly:
348  *
349  * The first PAGE_SIZE page is called the "head page".
350  *
351  * The remaining PAGE_SIZE pages are called "tail pages".
352  *
353  * All pages have PG_compound set.  All tail pages have their ->first_page
354  * pointing at the head page.
355  *
356  * The first tail page's ->lru.next holds the address of the compound page's
357  * put_page() function.  Its ->lru.prev holds the order of allocation.
358  * This usage means that zero-order pages may not be compound.
359  */
360 
361 static void free_compound_page(struct page *page)
362 {
363         __free_pages_ok(page, compound_order(page));
364 }
365 
366 void prep_compound_page(struct page *page, unsigned long order)
367 {
368         int i;
369         int nr_pages = 1 << order;
370 
371         set_compound_page_dtor(page, free_compound_page);
372         set_compound_order(page, order);
373         __SetPageHead(page);
374         for (i = 1; i < nr_pages; i++) {
375                 struct page *p = page + i;
376                 set_page_count(p, 0);
377                 p->first_page = page;
378                 /* Make sure p->first_page is always valid for PageTail() */
379                 smp_wmb();
380                 __SetPageTail(p);
381         }
382 }
383 
384 /* update __split_huge_page_refcount if you change this function */
385 static int destroy_compound_page(struct page *page, unsigned long order)
386 {
387         int i;
388         int nr_pages = 1 << order;
389         int bad = 0;
390 
391         if (unlikely(compound_order(page) != order)) {
392                 bad_page(page, "wrong compound order", 0);
393                 bad++;
394         }
395 
396         __ClearPageHead(page);
397 
398         for (i = 1; i < nr_pages; i++) {
399                 struct page *p = page + i;
400 
401                 if (unlikely(!PageTail(p))) {
402                         bad_page(page, "PageTail not set", 0);
403                         bad++;
404                 } else if (unlikely(p->first_page != page)) {
405                         bad_page(page, "first_page not consistent", 0);
406                         bad++;
407                 }
408                 __ClearPageTail(p);
409         }
410 
411         return bad;
412 }
413 
414 static inline void prep_zero_page(struct page *page, unsigned int order,
415                                                         gfp_t gfp_flags)
416 {
417         int i;
418 
419         /*
420          * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
421          * and __GFP_HIGHMEM from hard or soft interrupt context.
422          */
423         VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
424         for (i = 0; i < (1 << order); i++)
425                 clear_highpage(page + i);
426 }
427 
428 #ifdef CONFIG_DEBUG_PAGEALLOC
429 unsigned int _debug_guardpage_minorder;
430 bool _debug_pagealloc_enabled __read_mostly;
431 bool _debug_guardpage_enabled __read_mostly;
432 
433 static int __init early_debug_pagealloc(char *buf)
434 {
435         if (!buf)
436                 return -EINVAL;
437 
438         if (strcmp(buf, "on") == 0)
439                 _debug_pagealloc_enabled = true;
440 
441         return 0;
442 }
443 early_param("debug_pagealloc", early_debug_pagealloc);
444 
445 static bool need_debug_guardpage(void)
446 {
447         /* If we don't use debug_pagealloc, we don't need guard page */
448         if (!debug_pagealloc_enabled())
449                 return false;
450 
451         return true;
452 }
453 
454 static void init_debug_guardpage(void)
455 {
456         if (!debug_pagealloc_enabled())
457                 return;
458 
459         _debug_guardpage_enabled = true;
460 }
461 
462 struct page_ext_operations debug_guardpage_ops = {
463         .need = need_debug_guardpage,
464         .init = init_debug_guardpage,
465 };
466 
467 static int __init debug_guardpage_minorder_setup(char *buf)
468 {
469         unsigned long res;
470 
471         if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
472                 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
473                 return 0;
474         }
475         _debug_guardpage_minorder = res;
476         printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
477         return 0;
478 }
479 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
480 
481 static inline void set_page_guard(struct zone *zone, struct page *page,
482                                 unsigned int order, int migratetype)
483 {
484         struct page_ext *page_ext;
485 
486         if (!debug_guardpage_enabled())
487                 return;
488 
489         page_ext = lookup_page_ext(page);
490         __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
491 
492         INIT_LIST_HEAD(&page->lru);
493         set_page_private(page, order);
494         /* Guard pages are not available for any usage */
495         __mod_zone_freepage_state(zone, -(1 << order), migratetype);
496 }
497 
498 static inline void clear_page_guard(struct zone *zone, struct page *page,
499                                 unsigned int order, int migratetype)
500 {
501         struct page_ext *page_ext;
502 
503         if (!debug_guardpage_enabled())
504                 return;
505 
506         page_ext = lookup_page_ext(page);
507         __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
508 
509         set_page_private(page, 0);
510         if (!is_migrate_isolate(migratetype))
511                 __mod_zone_freepage_state(zone, (1 << order), migratetype);
512 }
513 #else
514 struct page_ext_operations debug_guardpage_ops = { NULL, };
515 static inline void set_page_guard(struct zone *zone, struct page *page,
516                                 unsigned int order, int migratetype) {}
517 static inline void clear_page_guard(struct zone *zone, struct page *page,
518                                 unsigned int order, int migratetype) {}
519 #endif
520 
521 static inline void set_page_order(struct page *page, unsigned int order)
522 {
523         set_page_private(page, order);
524         __SetPageBuddy(page);
525 }
526 
527 static inline void rmv_page_order(struct page *page)
528 {
529         __ClearPageBuddy(page);
530         set_page_private(page, 0);
531 }
532 
533 /*
534  * This function checks whether a page is free && is the buddy
535  * we can do coalesce a page and its buddy if
536  * (a) the buddy is not in a hole &&
537  * (b) the buddy is in the buddy system &&
538  * (c) a page and its buddy have the same order &&
539  * (d) a page and its buddy are in the same zone.
540  *
541  * For recording whether a page is in the buddy system, we set ->_mapcount
542  * PAGE_BUDDY_MAPCOUNT_VALUE.
543  * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
544  * serialized by zone->lock.
545  *
546  * For recording page's order, we use page_private(page).
547  */
548 static inline int page_is_buddy(struct page *page, struct page *buddy,
549                                                         unsigned int order)
550 {
551         if (!pfn_valid_within(page_to_pfn(buddy)))
552                 return 0;
553 
554         if (page_is_guard(buddy) && page_order(buddy) == order) {
555                 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
556 
557                 if (page_zone_id(page) != page_zone_id(buddy))
558                         return 0;
559 
560                 return 1;
561         }
562 
563         if (PageBuddy(buddy) && page_order(buddy) == order) {
564                 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
565 
566                 /*
567                  * zone check is done late to avoid uselessly
568                  * calculating zone/node ids for pages that could
569                  * never merge.
570                  */
571                 if (page_zone_id(page) != page_zone_id(buddy))
572                         return 0;
573 
574                 return 1;
575         }
576         return 0;
577 }
578 
579 /*
580  * Freeing function for a buddy system allocator.
581  *
582  * The concept of a buddy system is to maintain direct-mapped table
583  * (containing bit values) for memory blocks of various "orders".
584  * The bottom level table contains the map for the smallest allocatable
585  * units of memory (here, pages), and each level above it describes
586  * pairs of units from the levels below, hence, "buddies".
587  * At a high level, all that happens here is marking the table entry
588  * at the bottom level available, and propagating the changes upward
589  * as necessary, plus some accounting needed to play nicely with other
590  * parts of the VM system.
591  * At each level, we keep a list of pages, which are heads of continuous
592  * free pages of length of (1 << order) and marked with _mapcount
593  * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
594  * field.
595  * So when we are allocating or freeing one, we can derive the state of the
596  * other.  That is, if we allocate a small block, and both were
597  * free, the remainder of the region must be split into blocks.
598  * If a block is freed, and its buddy is also free, then this
599  * triggers coalescing into a block of larger size.
600  *
601  * -- nyc
602  */
603 
604 static inline void __free_one_page(struct page *page,
605                 unsigned long pfn,
606                 struct zone *zone, unsigned int order,
607                 int migratetype)
608 {
609         unsigned long page_idx;
610         unsigned long combined_idx;
611         unsigned long uninitialized_var(buddy_idx);
612         struct page *buddy;
613         int max_order = MAX_ORDER;
614 
615         VM_BUG_ON(!zone_is_initialized(zone));
616 
617         if (unlikely(PageCompound(page)))
618                 if (unlikely(destroy_compound_page(page, order)))
619                         return;
620 
621         VM_BUG_ON(migratetype == -1);
622         if (is_migrate_isolate(migratetype)) {
623                 /*
624                  * We restrict max order of merging to prevent merge
625                  * between freepages on isolate pageblock and normal
626                  * pageblock. Without this, pageblock isolation
627                  * could cause incorrect freepage accounting.
628                  */
629                 max_order = min(MAX_ORDER, pageblock_order + 1);
630         } else {
631                 __mod_zone_freepage_state(zone, 1 << order, migratetype);
632         }
633 
634         page_idx = pfn & ((1 << max_order) - 1);
635 
636         VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
637         VM_BUG_ON_PAGE(bad_range(zone, page), page);
638 
639         while (order < max_order - 1) {
640                 buddy_idx = __find_buddy_index(page_idx, order);
641                 buddy = page + (buddy_idx - page_idx);
642                 if (!page_is_buddy(page, buddy, order))
643                         break;
644                 /*
645                  * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
646                  * merge with it and move up one order.
647                  */
648                 if (page_is_guard(buddy)) {
649                         clear_page_guard(zone, buddy, order, migratetype);
650                 } else {
651                         list_del(&buddy->lru);
652                         zone->free_area[order].nr_free--;
653                         rmv_page_order(buddy);
654                 }
655                 combined_idx = buddy_idx & page_idx;
656                 page = page + (combined_idx - page_idx);
657                 page_idx = combined_idx;
658                 order++;
659         }
660         set_page_order(page, order);
661 
662         /*
663          * If this is not the largest possible page, check if the buddy
664          * of the next-highest order is free. If it is, it's possible
665          * that pages are being freed that will coalesce soon. In case,
666          * that is happening, add the free page to the tail of the list
667          * so it's less likely to be used soon and more likely to be merged
668          * as a higher order page
669          */
670         if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
671                 struct page *higher_page, *higher_buddy;
672                 combined_idx = buddy_idx & page_idx;
673                 higher_page = page + (combined_idx - page_idx);
674                 buddy_idx = __find_buddy_index(combined_idx, order + 1);
675                 higher_buddy = higher_page + (buddy_idx - combined_idx);
676                 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
677                         list_add_tail(&page->lru,
678                                 &zone->free_area[order].free_list[migratetype]);
679                         goto out;
680                 }
681         }
682 
683         list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
684 out:
685         zone->free_area[order].nr_free++;
686 }
687 
688 static inline int free_pages_check(struct page *page)
689 {
690         const char *bad_reason = NULL;
691         unsigned long bad_flags = 0;
692 
693         if (unlikely(page_mapcount(page)))
694                 bad_reason = "nonzero mapcount";
695         if (unlikely(page->mapping != NULL))
696                 bad_reason = "non-NULL mapping";
697         if (unlikely(atomic_read(&page->_count) != 0))
698                 bad_reason = "nonzero _count";
699         if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
700                 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
701                 bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
702         }
703 #ifdef CONFIG_MEMCG
704         if (unlikely(page->mem_cgroup))
705                 bad_reason = "page still charged to cgroup";
706 #endif
707         if (unlikely(bad_reason)) {
708                 bad_page(page, bad_reason, bad_flags);
709                 return 1;
710         }
711         page_cpupid_reset_last(page);
712         if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
713                 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
714         return 0;
715 }
716 
717 /*
718  * Frees a number of pages from the PCP lists
719  * Assumes all pages on list are in same zone, and of same order.
720  * count is the number of pages to free.
721  *
722  * If the zone was previously in an "all pages pinned" state then look to
723  * see if this freeing clears that state.
724  *
725  * And clear the zone's pages_scanned counter, to hold off the "all pages are
726  * pinned" detection logic.
727  */
728 static void free_pcppages_bulk(struct zone *zone, int count,
729                                         struct per_cpu_pages *pcp)
730 {
731         int migratetype = 0;
732         int batch_free = 0;
733         int to_free = count;
734         unsigned long nr_scanned;
735 
736         spin_lock(&zone->lock);
737         nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
738         if (nr_scanned)
739                 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
740 
741         while (to_free) {
742                 struct page *page;
743                 struct list_head *list;
744 
745                 /*
746                  * Remove pages from lists in a round-robin fashion. A
747                  * batch_free count is maintained that is incremented when an
748                  * empty list is encountered.  This is so more pages are freed
749                  * off fuller lists instead of spinning excessively around empty
750                  * lists
751                  */
752                 do {
753                         batch_free++;
754                         if (++migratetype == MIGRATE_PCPTYPES)
755                                 migratetype = 0;
756                         list = &pcp->lists[migratetype];
757                 } while (list_empty(list));
758 
759                 /* This is the only non-empty list. Free them all. */
760                 if (batch_free == MIGRATE_PCPTYPES)
761                         batch_free = to_free;
762 
763                 do {
764                         int mt; /* migratetype of the to-be-freed page */
765 
766                         page = list_entry(list->prev, struct page, lru);
767                         /* must delete as __free_one_page list manipulates */
768                         list_del(&page->lru);
769                         mt = get_freepage_migratetype(page);
770                         if (unlikely(has_isolate_pageblock(zone)))
771                                 mt = get_pageblock_migratetype(page);
772 
773                         /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
774                         __free_one_page(page, page_to_pfn(page), zone, 0, mt);
775                         trace_mm_page_pcpu_drain(page, 0, mt);
776                 } while (--to_free && --batch_free && !list_empty(list));
777         }
778         spin_unlock(&zone->lock);
779 }
780 
781 static void free_one_page(struct zone *zone,
782                                 struct page *page, unsigned long pfn,
783                                 unsigned int order,
784                                 int migratetype)
785 {
786         unsigned long nr_scanned;
787         spin_lock(&zone->lock);
788         nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
789         if (nr_scanned)
790                 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
791 
792         if (unlikely(has_isolate_pageblock(zone) ||
793                 is_migrate_isolate(migratetype))) {
794                 migratetype = get_pfnblock_migratetype(page, pfn);
795         }
796         __free_one_page(page, pfn, zone, order, migratetype);
797         spin_unlock(&zone->lock);
798 }
799 
800 static bool free_pages_prepare(struct page *page, unsigned int order)
801 {
802         int i;
803         int bad = 0;
804 
805         VM_BUG_ON_PAGE(PageTail(page), page);
806         VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
807 
808         trace_mm_page_free(page, order);
809         kmemcheck_free_shadow(page, order);
810 
811         if (PageAnon(page))
812                 page->mapping = NULL;
813         for (i = 0; i < (1 << order); i++)
814                 bad += free_pages_check(page + i);
815         if (bad)
816                 return false;
817 
818         reset_page_owner(page, order);
819 
820         if (!PageHighMem(page)) {
821                 debug_check_no_locks_freed(page_address(page),
822                                            PAGE_SIZE << order);
823                 debug_check_no_obj_freed(page_address(page),
824                                            PAGE_SIZE << order);
825         }
826         arch_free_page(page, order);
827         kernel_map_pages(page, 1 << order, 0);
828 
829         return true;
830 }
831 
832 static void __free_pages_ok(struct page *page, unsigned int order)
833 {
834         unsigned long flags;
835         int migratetype;
836         unsigned long pfn = page_to_pfn(page);
837 
838         if (!free_pages_prepare(page, order))
839                 return;
840 
841         migratetype = get_pfnblock_migratetype(page, pfn);
842         local_irq_save(flags);
843         __count_vm_events(PGFREE, 1 << order);
844         set_freepage_migratetype(page, migratetype);
845         free_one_page(page_zone(page), page, pfn, order, migratetype);
846         local_irq_restore(flags);
847 }
848 
849 void __init __free_pages_bootmem(struct page *page, unsigned int order)
850 {
851         unsigned int nr_pages = 1 << order;
852         struct page *p = page;
853         unsigned int loop;
854 
855         prefetchw(p);
856         for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
857                 prefetchw(p + 1);
858                 __ClearPageReserved(p);
859                 set_page_count(p, 0);
860         }
861         __ClearPageReserved(p);
862         set_page_count(p, 0);
863 
864         page_zone(page)->managed_pages += nr_pages;
865         set_page_refcounted(page);
866         __free_pages(page, order);
867 }
868 
869 #ifdef CONFIG_CMA
870 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
871 void __init init_cma_reserved_pageblock(struct page *page)
872 {
873         unsigned i = pageblock_nr_pages;
874         struct page *p = page;
875 
876         do {
877                 __ClearPageReserved(p);
878                 set_page_count(p, 0);
879         } while (++p, --i);
880 
881         set_pageblock_migratetype(page, MIGRATE_CMA);
882 
883         if (pageblock_order >= MAX_ORDER) {
884                 i = pageblock_nr_pages;
885                 p = page;
886                 do {
887                         set_page_refcounted(p);
888                         __free_pages(p, MAX_ORDER - 1);
889                         p += MAX_ORDER_NR_PAGES;
890                 } while (i -= MAX_ORDER_NR_PAGES);
891         } else {
892                 set_page_refcounted(page);
893                 __free_pages(page, pageblock_order);
894         }
895 
896         adjust_managed_page_count(page, pageblock_nr_pages);
897 }
898 #endif
899 
900 /*
901  * The order of subdivision here is critical for the IO subsystem.
902  * Please do not alter this order without good reasons and regression
903  * testing. Specifically, as large blocks of memory are subdivided,
904  * the order in which smaller blocks are delivered depends on the order
905  * they're subdivided in this function. This is the primary factor
906  * influencing the order in which pages are delivered to the IO
907  * subsystem according to empirical testing, and this is also justified
908  * by considering the behavior of a buddy system containing a single
909  * large block of memory acted on by a series of small allocations.
910  * This behavior is a critical factor in sglist merging's success.
911  *
912  * -- nyc
913  */
914 static inline void expand(struct zone *zone, struct page *page,
915         int low, int high, struct free_area *area,
916         int migratetype)
917 {
918         unsigned long size = 1 << high;
919 
920         while (high > low) {
921                 area--;
922                 high--;
923                 size >>= 1;
924                 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
925 
926                 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
927                         debug_guardpage_enabled() &&
928                         high < debug_guardpage_minorder()) {
929                         /*
930                          * Mark as guard pages (or page), that will allow to
931                          * merge back to allocator when buddy will be freed.
932                          * Corresponding page table entries will not be touched,
933                          * pages will stay not present in virtual address space
934                          */
935                         set_page_guard(zone, &page[size], high, migratetype);
936                         continue;
937                 }
938                 list_add(&page[size].lru, &area->free_list[migratetype]);
939                 area->nr_free++;
940                 set_page_order(&page[size], high);
941         }
942 }
943 
944 /*
945  * This page is about to be returned from the page allocator
946  */
947 static inline int check_new_page(struct page *page)
948 {
949         const char *bad_reason = NULL;
950         unsigned long bad_flags = 0;
951 
952         if (unlikely(page_mapcount(page)))
953                 bad_reason = "nonzero mapcount";
954         if (unlikely(page->mapping != NULL))
955                 bad_reason = "non-NULL mapping";
956         if (unlikely(atomic_read(&page->_count) != 0))
957                 bad_reason = "nonzero _count";
958         if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
959                 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
960                 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
961         }
962 #ifdef CONFIG_MEMCG
963         if (unlikely(page->mem_cgroup))
964                 bad_reason = "page still charged to cgroup";
965 #endif
966         if (unlikely(bad_reason)) {
967                 bad_page(page, bad_reason, bad_flags);
968                 return 1;
969         }
970         return 0;
971 }
972 
973 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
974 {
975         int i;
976 
977         for (i = 0; i < (1 << order); i++) {
978                 struct page *p = page + i;
979                 if (unlikely(check_new_page(p)))
980                         return 1;
981         }
982 
983         set_page_private(page, 0);
984         set_page_refcounted(page);
985 
986         arch_alloc_page(page, order);
987         kernel_map_pages(page, 1 << order, 1);
988 
989         if (gfp_flags & __GFP_ZERO)
990                 prep_zero_page(page, order, gfp_flags);
991 
992         if (order && (gfp_flags & __GFP_COMP))
993                 prep_compound_page(page, order);
994 
995         set_page_owner(page, order, gfp_flags);
996 
997         return 0;
998 }
999 
1000 /*
1001  * Go through the free lists for the given migratetype and remove
1002  * the smallest available page from the freelists
1003  */
1004 static inline
1005 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1006                                                 int migratetype)
1007 {
1008         unsigned int current_order;
1009         struct free_area *area;
1010         struct page *page;
1011 
1012         /* Find a page of the appropriate size in the preferred list */
1013         for (current_order = order; current_order < MAX_ORDER; ++current_order) {
1014                 area = &(zone->free_area[current_order]);
1015                 if (list_empty(&area->free_list[migratetype]))
1016                         continue;
1017 
1018                 page = list_entry(area->free_list[migratetype].next,
1019                                                         struct page, lru);
1020                 list_del(&page->lru);
1021                 rmv_page_order(page);
1022                 area->nr_free--;
1023                 expand(zone, page, order, current_order, area, migratetype);
1024                 set_freepage_migratetype(page, migratetype);
1025                 return page;
1026         }
1027 
1028         return NULL;
1029 }
1030 
1031 
1032 /*
1033  * This array describes the order lists are fallen back to when
1034  * the free lists for the desirable migrate type are depleted
1035  */
1036 static int fallbacks[MIGRATE_TYPES][4] = {
1037         [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
1038         [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
1039 #ifdef CONFIG_CMA
1040         [MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
1041         [MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
1042 #else
1043         [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
1044 #endif
1045         [MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
1046 #ifdef CONFIG_MEMORY_ISOLATION
1047         [MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
1048 #endif
1049 };
1050 
1051 /*
1052  * Move the free pages in a range to the free lists of the requested type.
1053  * Note that start_page and end_pages are not aligned on a pageblock
1054  * boundary. If alignment is required, use move_freepages_block()
1055  */
1056 int move_freepages(struct zone *zone,
1057                           struct page *start_page, struct page *end_page,
1058                           int migratetype)
1059 {
1060         struct page *page;
1061         unsigned long order;
1062         int pages_moved = 0;
1063 
1064 #ifndef CONFIG_HOLES_IN_ZONE
1065         /*
1066          * page_zone is not safe to call in this context when
1067          * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
1068          * anyway as we check zone boundaries in move_freepages_block().
1069          * Remove at a later date when no bug reports exist related to
1070          * grouping pages by mobility
1071          */
1072         VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1073 #endif
1074 
1075         for (page = start_page; page <= end_page;) {
1076                 /* Make sure we are not inadvertently changing nodes */
1077                 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1078 
1079                 if (!pfn_valid_within(page_to_pfn(page))) {
1080                         page++;
1081                         continue;
1082                 }
1083 
1084                 if (!PageBuddy(page)) {
1085                         page++;
1086                         continue;
1087                 }
1088 
1089                 order = page_order(page);
1090                 list_move(&page->lru,
1091                           &zone->free_area[order].free_list[migratetype]);
1092                 set_freepage_migratetype(page, migratetype);
1093                 page += 1 << order;
1094                 pages_moved += 1 << order;
1095         }
1096 
1097         return pages_moved;
1098 }
1099 
1100 int move_freepages_block(struct zone *zone, struct page *page,
1101                                 int migratetype)
1102 {
1103         unsigned long start_pfn, end_pfn;
1104         struct page *start_page, *end_page;
1105 
1106         start_pfn = page_to_pfn(page);
1107         start_pfn = start_pfn & ~(pageblock_nr_pages-1);
1108         start_page = pfn_to_page(start_pfn);
1109         end_page = start_page + pageblock_nr_pages - 1;
1110         end_pfn = start_pfn + pageblock_nr_pages - 1;
1111 
1112         /* Do not cross zone boundaries */
1113         if (!zone_spans_pfn(zone, start_pfn))
1114                 start_page = page;
1115         if (!zone_spans_pfn(zone, end_pfn))
1116                 return 0;
1117 
1118         return move_freepages(zone, start_page, end_page, migratetype);
1119 }
1120 
1121 static void change_pageblock_range(struct page *pageblock_page,
1122                                         int start_order, int migratetype)
1123 {
1124         int nr_pageblocks = 1 << (start_order - pageblock_order);
1125 
1126         while (nr_pageblocks--) {
1127                 set_pageblock_migratetype(pageblock_page, migratetype);
1128                 pageblock_page += pageblock_nr_pages;
1129         }
1130 }
1131 
1132 /*
1133  * If breaking a large block of pages, move all free pages to the preferred
1134  * allocation list. If falling back for a reclaimable kernel allocation, be
1135  * more aggressive about taking ownership of free pages.
1136  *
1137  * On the other hand, never change migration type of MIGRATE_CMA pageblocks
1138  * nor move CMA pages to different free lists. We don't want unmovable pages
1139  * to be allocated from MIGRATE_CMA areas.
1140  *
1141  * Returns the new migratetype of the pageblock (or the same old migratetype
1142  * if it was unchanged).
1143  */
1144 static int try_to_steal_freepages(struct zone *zone, struct page *page,
1145                                   int start_type, int fallback_type)
1146 {
1147         int current_order = page_order(page);
1148 
1149         /*
1150          * When borrowing from MIGRATE_CMA, we need to release the excess
1151          * buddy pages to CMA itself. We also ensure the freepage_migratetype
1152          * is set to CMA so it is returned to the correct freelist in case
1153          * the page ends up being not actually allocated from the pcp lists.
1154          */
1155         if (is_migrate_cma(fallback_type))
1156                 return fallback_type;
1157 
1158         /* Take ownership for orders >= pageblock_order */
1159         if (current_order >= pageblock_order) {
1160                 change_pageblock_range(page, current_order, start_type);
1161                 return start_type;
1162         }
1163 
1164         if (current_order >= pageblock_order / 2 ||
1165             start_type == MIGRATE_RECLAIMABLE ||
1166             page_group_by_mobility_disabled) {
1167                 int pages;
1168 
1169                 pages = move_freepages_block(zone, page, start_type);
1170 
1171                 /* Claim the whole block if over half of it is free */
1172                 if (pages >= (1 << (pageblock_order-1)) ||
1173                                 page_group_by_mobility_disabled) {
1174 
1175                         set_pageblock_migratetype(page, start_type);
1176                         return start_type;
1177                 }
1178 
1179         }
1180 
1181         return fallback_type;
1182 }
1183 
1184 /* Remove an element from the buddy allocator from the fallback list */
1185 static inline struct page *
1186 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1187 {
1188         struct free_area *area;
1189         unsigned int current_order;
1190         struct page *page;
1191         int migratetype, new_type, i;
1192 
1193         /* Find the largest possible block of pages in the other list */
1194         for (current_order = MAX_ORDER-1;
1195                                 current_order >= order && current_order <= MAX_ORDER-1;
1196                                 --current_order) {
1197                 for (i = 0;; i++) {
1198                         migratetype = fallbacks[start_migratetype][i];
1199 
1200                         /* MIGRATE_RESERVE handled later if necessary */
1201                         if (migratetype == MIGRATE_RESERVE)
1202                                 break;
1203 
1204                         area = &(zone->free_area[current_order]);
1205                         if (list_empty(&area->free_list[migratetype]))
1206                                 continue;
1207 
1208                         page = list_entry(area->free_list[migratetype].next,
1209                                         struct page, lru);
1210                         area->nr_free--;
1211 
1212                         new_type = try_to_steal_freepages(zone, page,
1213                                                           start_migratetype,
1214                                                           migratetype);
1215 
1216                         /* Remove the page from the freelists */
1217                         list_del(&page->lru);
1218                         rmv_page_order(page);
1219 
1220                         expand(zone, page, order, current_order, area,
1221                                new_type);
1222                         /* The freepage_migratetype may differ from pageblock's
1223                          * migratetype depending on the decisions in
1224                          * try_to_steal_freepages. This is OK as long as it does
1225                          * not differ for MIGRATE_CMA type.
1226                          */
1227                         set_freepage_migratetype(page, new_type);
1228 
1229                         trace_mm_page_alloc_extfrag(page, order, current_order,
1230                                 start_migratetype, migratetype, new_type);
1231 
1232                         return page;
1233                 }
1234         }
1235 
1236         return NULL;
1237 }
1238 
1239 /*
1240  * Do the hard work of removing an element from the buddy allocator.
1241  * Call me with the zone->lock already held.
1242  */
1243 static struct page *__rmqueue(struct zone *zone, unsigned int order,
1244                                                 int migratetype)
1245 {
1246         struct page *page;
1247 
1248 retry_reserve:
1249         page = __rmqueue_smallest(zone, order, migratetype);
1250 
1251         if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1252                 page = __rmqueue_fallback(zone, order, migratetype);
1253 
1254                 /*
1255                  * Use MIGRATE_RESERVE rather than fail an allocation. goto
1256                  * is used because __rmqueue_smallest is an inline function
1257                  * and we want just one call site
1258                  */
1259                 if (!page) {
1260                         migratetype = MIGRATE_RESERVE;
1261                         goto retry_reserve;
1262                 }
1263         }
1264 
1265         trace_mm_page_alloc_zone_locked(page, order, migratetype);
1266         return page;
1267 }
1268 
1269 /*
1270  * Obtain a specified number of elements from the buddy allocator, all under
1271  * a single hold of the lock, for efficiency.  Add them to the supplied list.
1272  * Returns the number of new pages which were placed at *list.
1273  */
1274 static int rmqueue_bulk(struct zone *zone, unsigned int order,
1275                         unsigned long count, struct list_head *list,
1276                         int migratetype, bool cold)
1277 {
1278         int i;
1279 
1280         spin_lock(&zone->lock);
1281         for (i = 0; i < count; ++i) {
1282                 struct page *page = __rmqueue(zone, order, migratetype);
1283                 if (unlikely(page == NULL))
1284                         break;
1285 
1286                 /*
1287                  * Split buddy pages returned by expand() are received here
1288                  * in physical page order. The page is added to the callers and
1289                  * list and the list head then moves forward. From the callers
1290                  * perspective, the linked list is ordered by page number in
1291                  * some conditions. This is useful for IO devices that can
1292                  * merge IO requests if the physical pages are ordered
1293                  * properly.
1294                  */
1295                 if (likely(!cold))
1296                         list_add(&page->lru, list);
1297                 else
1298                         list_add_tail(&page->lru, list);
1299                 list = &page->lru;
1300                 if (is_migrate_cma(get_freepage_migratetype(page)))
1301                         __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1302                                               -(1 << order));
1303         }
1304         __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1305         spin_unlock(&zone->lock);
1306         return i;
1307 }
1308 
1309 #ifdef CONFIG_NUMA
1310 /*
1311  * Called from the vmstat counter updater to drain pagesets of this
1312  * currently executing processor on remote nodes after they have
1313  * expired.
1314  *
1315  * Note that this function must be called with the thread pinned to
1316  * a single processor.
1317  */
1318 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1319 {
1320         unsigned long flags;
1321         int to_drain, batch;
1322 
1323         local_irq_save(flags);
1324         batch = ACCESS_ONCE(pcp->batch);
1325         to_drain = min(pcp->count, batch);
1326         if (to_drain > 0) {
1327                 free_pcppages_bulk(zone, to_drain, pcp);
1328                 pcp->count -= to_drain;
1329         }
1330         local_irq_restore(flags);
1331 }
1332 #endif
1333 
1334 /*
1335  * Drain pcplists of the indicated processor and zone.
1336  *
1337  * The processor must either be the current processor and the
1338  * thread pinned to the current processor or a processor that
1339  * is not online.
1340  */
1341 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
1342 {
1343         unsigned long flags;
1344         struct per_cpu_pageset *pset;
1345         struct per_cpu_pages *pcp;
1346 
1347         local_irq_save(flags);
1348         pset = per_cpu_ptr(zone->pageset, cpu);
1349 
1350         pcp = &pset->pcp;
1351         if (pcp->count) {
1352                 free_pcppages_bulk(zone, pcp->count, pcp);
1353                 pcp->count = 0;
1354         }
1355         local_irq_restore(flags);
1356 }
1357 
1358 /*
1359  * Drain pcplists of all zones on the indicated processor.
1360  *
1361  * The processor must either be the current processor and the
1362  * thread pinned to the current processor or a processor that
1363  * is not online.
1364  */
1365 static void drain_pages(unsigned int cpu)
1366 {
1367         struct zone *zone;
1368 
1369         for_each_populated_zone(zone) {
1370                 drain_pages_zone(cpu, zone);
1371         }
1372 }
1373 
1374 /*
1375  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1376  *
1377  * The CPU has to be pinned. When zone parameter is non-NULL, spill just
1378  * the single zone's pages.
1379  */
1380 void drain_local_pages(struct zone *zone)
1381 {
1382         int cpu = smp_processor_id();
1383 
1384         if (zone)
1385                 drain_pages_zone(cpu, zone);
1386         else
1387                 drain_pages(cpu);
1388 }
1389 
1390 /*
1391  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1392  *
1393  * When zone parameter is non-NULL, spill just the single zone's pages.
1394  *
1395  * Note that this code is protected against sending an IPI to an offline
1396  * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1397  * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1398  * nothing keeps CPUs from showing up after we populated the cpumask and
1399  * before the call to on_each_cpu_mask().
1400  */
1401 void drain_all_pages(struct zone *zone)
1402 {
1403         int cpu;
1404 
1405         /*
1406          * Allocate in the BSS so we wont require allocation in
1407          * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1408          */
1409         static cpumask_t cpus_with_pcps;
1410 
1411         /*
1412          * We don't care about racing with CPU hotplug event
1413          * as offline notification will cause the notified
1414          * cpu to drain that CPU pcps and on_each_cpu_mask
1415          * disables preemption as part of its processing
1416          */
1417         for_each_online_cpu(cpu) {
1418                 struct per_cpu_pageset *pcp;
1419                 struct zone *z;
1420                 bool has_pcps = false;
1421 
1422                 if (zone) {
1423                         pcp = per_cpu_ptr(zone->pageset, cpu);
1424                         if (pcp->pcp.count)
1425                                 has_pcps = true;
1426                 } else {
1427                         for_each_populated_zone(z) {
1428                                 pcp = per_cpu_ptr(z->pageset, cpu);
1429                                 if (pcp->pcp.count) {
1430                                         has_pcps = true;
1431                                         break;
1432                                 }
1433                         }
1434                 }
1435 
1436                 if (has_pcps)
1437                         cpumask_set_cpu(cpu, &cpus_with_pcps);
1438                 else
1439                         cpumask_clear_cpu(cpu, &cpus_with_pcps);
1440         }
1441         on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
1442                                                                 zone, 1);
1443 }
1444 
1445 #ifdef CONFIG_HIBERNATION
1446 
1447 void mark_free_pages(struct zone *zone)
1448 {
1449         unsigned long pfn, max_zone_pfn;
1450         unsigned long flags;
1451         unsigned int order, t;
1452         struct list_head *curr;
1453 
1454         if (zone_is_empty(zone))
1455                 return;
1456 
1457         spin_lock_irqsave(&zone->lock, flags);
1458 
1459         max_zone_pfn = zone_end_pfn(zone);
1460         for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1461                 if (pfn_valid(pfn)) {
1462                         struct page *page = pfn_to_page(pfn);
1463 
1464                         if (!swsusp_page_is_forbidden(page))
1465                                 swsusp_unset_page_free(page);
1466                 }
1467 
1468         for_each_migratetype_order(order, t) {
1469                 list_for_each(curr, &zone->free_area[order].free_list[t]) {
1470                         unsigned long i;
1471 
1472                         pfn = page_to_pfn(list_entry(curr, struct page, lru));
1473                         for (i = 0; i < (1UL << order); i++)
1474                                 swsusp_set_page_free(pfn_to_page(pfn + i));
1475                 }
1476         }
1477         spin_unlock_irqrestore(&zone->lock, flags);
1478 }
1479 #endif /* CONFIG_PM */
1480 
1481 /*
1482  * Free a 0-order page
1483  * cold == true ? free a cold page : free a hot page
1484  */
1485 void free_hot_cold_page(struct page *page, bool cold)
1486 {
1487         struct zone *zone = page_zone(page);
1488         struct per_cpu_pages *pcp;
1489         unsigned long flags;
1490         unsigned long pfn = page_to_pfn(page);
1491         int migratetype;
1492 
1493         if (!free_pages_prepare(page, 0))
1494                 return;
1495 
1496         migratetype = get_pfnblock_migratetype(page, pfn);
1497         set_freepage_migratetype(page, migratetype);
1498         local_irq_save(flags);
1499         __count_vm_event(PGFREE);
1500 
1501         /*
1502          * We only track unmovable, reclaimable and movable on pcp lists.
1503          * Free ISOLATE pages back to the allocator because they are being
1504          * offlined but treat RESERVE as movable pages so we can get those
1505          * areas back if necessary. Otherwise, we may have to free
1506          * excessively into the page allocator
1507          */
1508         if (migratetype >= MIGRATE_PCPTYPES) {
1509                 if (unlikely(is_migrate_isolate(migratetype))) {
1510                         free_one_page(zone, page, pfn, 0, migratetype);
1511                         goto out;
1512                 }
1513                 migratetype = MIGRATE_MOVABLE;
1514         }
1515 
1516         pcp = &this_cpu_ptr(zone->pageset)->pcp;
1517         if (!cold)
1518                 list_add(&page->lru, &pcp->lists[migratetype]);
1519         else
1520                 list_add_tail(&page->lru, &pcp->lists[migratetype]);
1521         pcp->count++;
1522         if (pcp->count >= pcp->high) {
1523                 unsigned long batch = ACCESS_ONCE(pcp->batch);
1524                 free_pcppages_bulk(zone, batch, pcp);
1525                 pcp->count -= batch;
1526         }
1527 
1528 out:
1529         local_irq_restore(flags);
1530 }
1531 
1532 /*
1533  * Free a list of 0-order pages
1534  */
1535 void free_hot_cold_page_list(struct list_head *list, bool cold)
1536 {
1537         struct page *page, *next;
1538 
1539         list_for_each_entry_safe(page, next, list, lru) {
1540                 trace_mm_page_free_batched(page, cold);
1541                 free_hot_cold_page(page, cold);
1542         }
1543 }
1544 
1545 /*
1546  * split_page takes a non-compound higher-order page, and splits it into
1547  * n (1<<order) sub-pages: page[0..n]
1548  * Each sub-page must be freed individually.
1549  *
1550  * Note: this is probably too low level an operation for use in drivers.
1551  * Please consult with lkml before using this in your driver.
1552  */
1553 void split_page(struct page *page, unsigned int order)
1554 {
1555         int i;
1556 
1557         VM_BUG_ON_PAGE(PageCompound(page), page);
1558         VM_BUG_ON_PAGE(!page_count(page), page);
1559 
1560 #ifdef CONFIG_KMEMCHECK
1561         /*
1562          * Split shadow pages too, because free(page[0]) would
1563          * otherwise free the whole shadow.
1564          */
1565         if (kmemcheck_page_is_tracked(page))
1566                 split_page(virt_to_page(page[0].shadow), order);
1567 #endif
1568 
1569         set_page_owner(page, 0, 0);
1570         for (i = 1; i < (1 << order); i++) {
1571                 set_page_refcounted(page + i);
1572                 set_page_owner(page + i, 0, 0);
1573         }
1574 }
1575 EXPORT_SYMBOL_GPL(split_page);
1576 
1577 int __isolate_free_page(struct page *page, unsigned int order)
1578 {
1579         unsigned long watermark;
1580         struct zone *zone;
1581         int mt;
1582 
1583         BUG_ON(!PageBuddy(page));
1584 
1585         zone = page_zone(page);
1586         mt = get_pageblock_migratetype(page);
1587 
1588         if (!is_migrate_isolate(mt)) {
1589                 /* Obey watermarks as if the page was being allocated */
1590                 watermark = low_wmark_pages(zone) + (1 << order);
1591                 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1592                         return 0;
1593 
1594                 __mod_zone_freepage_state(zone, -(1UL << order), mt);
1595         }
1596 
1597         /* Remove page from free list */
1598         list_del(&page->lru);
1599         zone->free_area[order].nr_free--;
1600         rmv_page_order(page);
1601 
1602         /* Set the pageblock if the isolated page is at least a pageblock */
1603         if (order >= pageblock_order - 1) {
1604                 struct page *endpage = page + (1 << order) - 1;
1605                 for (; page < endpage; page += pageblock_nr_pages) {
1606                         int mt = get_pageblock_migratetype(page);
1607                         if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
1608                                 set_pageblock_migratetype(page,
1609                                                           MIGRATE_MOVABLE);
1610                 }
1611         }
1612 
1613         set_page_owner(page, order, 0);
1614         return 1UL << order;
1615 }
1616 
1617 /*
1618  * Similar to split_page except the page is already free. As this is only
1619  * being used for migration, the migratetype of the block also changes.
1620  * As this is called with interrupts disabled, the caller is responsible
1621  * for calling arch_alloc_page() and kernel_map_page() after interrupts
1622  * are enabled.
1623  *
1624  * Note: this is probably too low level an operation for use in drivers.
1625  * Please consult with lkml before using this in your driver.
1626  */
1627 int split_free_page(struct page *page)
1628 {
1629         unsigned int order;
1630         int nr_pages;
1631 
1632         order = page_order(page);
1633 
1634         nr_pages = __isolate_free_page(page, order);
1635         if (!nr_pages)
1636                 return 0;
1637 
1638         /* Split into individual pages */
1639         set_page_refcounted(page);
1640         split_page(page, order);
1641         return nr_pages;
1642 }
1643 
1644 /*
1645  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1646  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1647  * or two.
1648  */
1649 static inline
1650 struct page *buffered_rmqueue(struct zone *preferred_zone,
1651                         struct zone *zone, unsigned int order,
1652                         gfp_t gfp_flags, int migratetype)
1653 {
1654         unsigned long flags;
1655         struct page *page;
1656         bool cold = ((gfp_flags & __GFP_COLD) != 0);
1657 
1658 again:
1659         if (likely(order == 0)) {
1660                 struct per_cpu_pages *pcp;
1661                 struct list_head *list;
1662 
1663                 local_irq_save(flags);
1664                 pcp = &this_cpu_ptr(zone->pageset)->pcp;
1665                 list = &pcp->lists[migratetype];
1666                 if (list_empty(list)) {
1667                         pcp->count += rmqueue_bulk(zone, 0,
1668                                         pcp->batch, list,
1669                                         migratetype, cold);
1670                         if (unlikely(list_empty(list)))
1671                                 goto failed;
1672                 }
1673 
1674                 if (cold)
1675                         page = list_entry(list->prev, struct page, lru);
1676                 else
1677                         page = list_entry(list->next, struct page, lru);
1678 
1679                 list_del(&page->lru);
1680                 pcp->count--;
1681         } else {
1682                 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1683                         /*
1684                          * __GFP_NOFAIL is not to be used in new code.
1685                          *
1686                          * All __GFP_NOFAIL callers should be fixed so that they
1687                          * properly detect and handle allocation failures.
1688                          *
1689                          * We most definitely don't want callers attempting to
1690                          * allocate greater than order-1 page units with
1691                          * __GFP_NOFAIL.
1692                          */
1693                         WARN_ON_ONCE(order > 1);
1694                 }
1695                 spin_lock_irqsave(&zone->lock, flags);
1696                 page = __rmqueue(zone, order, migratetype);
1697                 spin_unlock(&zone->lock);
1698                 if (!page)
1699                         goto failed;
1700                 __mod_zone_freepage_state(zone, -(1 << order),
1701                                           get_freepage_migratetype(page));
1702         }
1703 
1704         __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
1705         if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
1706             !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
1707                 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
1708 
1709         __count_zone_vm_events(PGALLOC, zone, 1 << order);
1710         zone_statistics(preferred_zone, zone, gfp_flags);
1711         local_irq_restore(flags);
1712 
1713         VM_BUG_ON_PAGE(bad_range(zone, page), page);
1714         if (prep_new_page(page, order, gfp_flags))
1715                 goto again;
1716         return page;
1717 
1718 failed:
1719         local_irq_restore(flags);
1720         return NULL;
1721 }
1722 
1723 #ifdef CONFIG_FAIL_PAGE_ALLOC
1724 
1725 static struct {
1726         struct fault_attr attr;
1727 
1728         u32 ignore_gfp_highmem;
1729         u32 ignore_gfp_wait;
1730         u32 min_order;
1731 } fail_page_alloc = {
1732         .attr = FAULT_ATTR_INITIALIZER,
1733         .ignore_gfp_wait = 1,
1734         .ignore_gfp_highmem = 1,
1735         .min_order = 1,
1736 };
1737 
1738 static int __init setup_fail_page_alloc(char *str)
1739 {
1740         return setup_fault_attr(&fail_page_alloc.attr, str);
1741 }
1742 __setup("fail_page_alloc=", setup_fail_page_alloc);
1743 
1744 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1745 {
1746         if (order < fail_page_alloc.min_order)
1747                 return false;
1748         if (gfp_mask & __GFP_NOFAIL)
1749                 return false;
1750         if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1751                 return false;
1752         if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1753                 return false;
1754 
1755         return should_fail(&fail_page_alloc.attr, 1 << order);
1756 }
1757 
1758 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1759 
1760 static int __init fail_page_alloc_debugfs(void)
1761 {
1762         umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1763         struct dentry *dir;
1764 
1765         dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1766                                         &fail_page_alloc.attr);
1767         if (IS_ERR(dir))
1768                 return PTR_ERR(dir);
1769 
1770         if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1771                                 &fail_page_alloc.ignore_gfp_wait))
1772                 goto fail;
1773         if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1774                                 &fail_page_alloc.ignore_gfp_highmem))
1775                 goto fail;
1776         if (!debugfs_create_u32("min-order", mode, dir,
1777                                 &fail_page_alloc.min_order))
1778                 goto fail;
1779 
1780         return 0;
1781 fail:
1782         debugfs_remove_recursive(dir);
1783 
1784         return -ENOMEM;
1785 }
1786 
1787 late_initcall(fail_page_alloc_debugfs);
1788 
1789 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1790 
1791 #else /* CONFIG_FAIL_PAGE_ALLOC */
1792 
1793 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1794 {
1795         return false;
1796 }
1797 
1798 #endif /* CONFIG_FAIL_PAGE_ALLOC */
1799 
1800 /*
1801  * Return true if free pages are above 'mark'. This takes into account the order
1802  * of the allocation.
1803  */
1804 static bool __zone_watermark_ok(struct zone *z, unsigned int order,
1805                         unsigned long mark, int classzone_idx, int alloc_flags,
1806                         long free_pages)
1807 {
1808         /* free_pages may go negative - that's OK */
1809         long min = mark;
1810         int o;
1811         long free_cma = 0;
1812 
1813         free_pages -= (1 << order) - 1;
1814         if (alloc_flags & ALLOC_HIGH)
1815                 min -= min / 2;
1816         if (alloc_flags & ALLOC_HARDER)
1817                 min -= min / 4;
1818 #ifdef CONFIG_CMA
1819         /* If allocation can't use CMA areas don't use free CMA pages */
1820         if (!(alloc_flags & ALLOC_CMA))
1821                 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
1822 #endif
1823 
1824         if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
1825                 return false;
1826         for (o = 0; o < order; o++) {
1827                 /* At the next order, this order's pages become unavailable */
1828                 free_pages -= z->free_area[o].nr_free << o;
1829 
1830                 /* Require fewer higher order pages to be free */
1831                 min >>= 1;
1832 
1833                 if (free_pages <= min)
1834                         return false;
1835         }
1836         return true;
1837 }
1838 
1839 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1840                       int classzone_idx, int alloc_flags)
1841 {
1842         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1843                                         zone_page_state(z, NR_FREE_PAGES));
1844 }
1845 
1846 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
1847                         unsigned long mark, int classzone_idx, int alloc_flags)
1848 {
1849         long free_pages = zone_page_state(z, NR_FREE_PAGES);
1850 
1851         if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1852                 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1853 
1854         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1855                                                                 free_pages);
1856 }
1857 
1858 #ifdef CONFIG_NUMA
1859 /*
1860  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1861  * skip over zones that are not allowed by the cpuset, or that have
1862  * been recently (in last second) found to be nearly full.  See further
1863  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1864  * that have to skip over a lot of full or unallowed zones.
1865  *
1866  * If the zonelist cache is present in the passed zonelist, then
1867  * returns a pointer to the allowed node mask (either the current
1868  * tasks mems_allowed, or node_states[N_MEMORY].)
1869  *
1870  * If the zonelist cache is not available for this zonelist, does
1871  * nothing and returns NULL.
1872  *
1873  * If the fullzones BITMAP in the zonelist cache is stale (more than
1874  * a second since last zap'd) then we zap it out (clear its bits.)
1875  *
1876  * We hold off even calling zlc_setup, until after we've checked the
1877  * first zone in the zonelist, on the theory that most allocations will
1878  * be satisfied from that first zone, so best to examine that zone as
1879  * quickly as we can.
1880  */
1881 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1882 {
1883         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1884         nodemask_t *allowednodes;       /* zonelist_cache approximation */
1885 
1886         zlc = zonelist->zlcache_ptr;
1887         if (!zlc)
1888                 return NULL;
1889 
1890         if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1891                 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1892                 zlc->last_full_zap = jiffies;
1893         }
1894 
1895         allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1896                                         &cpuset_current_mems_allowed :
1897                                         &node_states[N_MEMORY];
1898         return allowednodes;
1899 }
1900 
1901 /*
1902  * Given 'z' scanning a zonelist, run a couple of quick checks to see
1903  * if it is worth looking at further for free memory:
1904  *  1) Check that the zone isn't thought to be full (doesn't have its
1905  *     bit set in the zonelist_cache fullzones BITMAP).
1906  *  2) Check that the zones node (obtained from the zonelist_cache
1907  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1908  * Return true (non-zero) if zone is worth looking at further, or
1909  * else return false (zero) if it is not.
1910  *
1911  * This check -ignores- the distinction between various watermarks,
1912  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1913  * found to be full for any variation of these watermarks, it will
1914  * be considered full for up to one second by all requests, unless
1915  * we are so low on memory on all allowed nodes that we are forced
1916  * into the second scan of the zonelist.
1917  *
1918  * In the second scan we ignore this zonelist cache and exactly
1919  * apply the watermarks to all zones, even it is slower to do so.
1920  * We are low on memory in the second scan, and should leave no stone
1921  * unturned looking for a free page.
1922  */
1923 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1924                                                 nodemask_t *allowednodes)
1925 {
1926         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1927         int i;                          /* index of *z in zonelist zones */
1928         int n;                          /* node that zone *z is on */
1929 
1930         zlc = zonelist->zlcache_ptr;
1931         if (!zlc)
1932                 return 1;
1933 
1934         i = z - zonelist->_zonerefs;
1935         n = zlc->z_to_n[i];
1936 
1937         /* This zone is worth trying if it is allowed but not full */
1938         return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1939 }
1940 
1941 /*
1942  * Given 'z' scanning a zonelist, set the corresponding bit in
1943  * zlc->fullzones, so that subsequent attempts to allocate a page
1944  * from that zone don't waste time re-examining it.
1945  */
1946 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1947 {
1948         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1949         int i;                          /* index of *z in zonelist zones */
1950 
1951         zlc = zonelist->zlcache_ptr;
1952         if (!zlc)
1953                 return;
1954 
1955         i = z - zonelist->_zonerefs;
1956 
1957         set_bit(i, zlc->fullzones);
1958 }
1959 
1960 /*
1961  * clear all zones full, called after direct reclaim makes progress so that
1962  * a zone that was recently full is not skipped over for up to a second
1963  */
1964 static void zlc_clear_zones_full(struct zonelist *zonelist)
1965 {
1966         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1967 
1968         zlc = zonelist->zlcache_ptr;
1969         if (!zlc)
1970                 return;
1971 
1972         bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1973 }
1974 
1975 static bool zone_local(struct zone *local_zone, struct zone *zone)
1976 {
1977         return local_zone->node == zone->node;
1978 }
1979 
1980 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1981 {
1982         return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
1983                                 RECLAIM_DISTANCE;
1984 }
1985 
1986 #else   /* CONFIG_NUMA */
1987 
1988 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1989 {
1990         return NULL;
1991 }
1992 
1993 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1994                                 nodemask_t *allowednodes)
1995 {
1996         return 1;
1997 }
1998 
1999 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
2000 {
2001 }
2002 
2003 static void zlc_clear_zones_full(struct zonelist *zonelist)
2004 {
2005 }
2006 
2007 static bool zone_local(struct zone *local_zone, struct zone *zone)
2008 {
2009         return true;
2010 }
2011 
2012 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
2013 {
2014         return true;
2015 }
2016 
2017 #endif  /* CONFIG_NUMA */
2018 
2019 static void reset_alloc_batches(struct zone *preferred_zone)
2020 {
2021         struct zone *zone = preferred_zone->zone_pgdat->node_zones;
2022 
2023         do {
2024                 mod_zone_page_state(zone, NR_ALLOC_BATCH,
2025                         high_wmark_pages(zone) - low_wmark_pages(zone) -
2026                         atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
2027                 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
2028         } while (zone++ != preferred_zone);
2029 }
2030 
2031 /*
2032  * get_page_from_freelist goes through the zonelist trying to allocate
2033  * a page.
2034  */
2035 static struct page *
2036 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
2037                 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
2038                 struct zone *preferred_zone, int classzone_idx, int migratetype)
2039 {
2040         struct zoneref *z;
2041         struct page *page = NULL;
2042         struct zone *zone;
2043         nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
2044         int zlc_active = 0;             /* set if using zonelist_cache */
2045         int did_zlc_setup = 0;          /* just call zlc_setup() one time */
2046         bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
2047                                 (gfp_mask & __GFP_WRITE);
2048         int nr_fair_skipped = 0;
2049         bool zonelist_rescan;
2050 
2051 zonelist_scan:
2052         zonelist_rescan = false;
2053 
2054         /*
2055          * Scan zonelist, looking for a zone with enough free.
2056          * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
2057          */
2058         for_each_zone_zonelist_nodemask(zone, z, zonelist,
2059                                                 high_zoneidx, nodemask) {
2060                 unsigned long mark;
2061 
2062                 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2063                         !zlc_zone_worth_trying(zonelist, z, allowednodes))
2064                                 continue;
2065                 if (cpusets_enabled() &&
2066                         (alloc_flags & ALLOC_CPUSET) &&
2067                         !cpuset_zone_allowed(zone, gfp_mask))
2068                                 continue;
2069                 /*
2070                  * Distribute pages in proportion to the individual
2071                  * zone size to ensure fair page aging.  The zone a
2072                  * page was allocated in should have no effect on the
2073                  * time the page has in memory before being reclaimed.
2074                  */
2075                 if (alloc_flags & ALLOC_FAIR) {
2076                         if (!zone_local(preferred_zone, zone))
2077                                 break;
2078                         if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
2079                                 nr_fair_skipped++;
2080                                 continue;
2081                         }
2082                 }
2083                 /*
2084                  * When allocating a page cache page for writing, we
2085                  * want to get it from a zone that is within its dirty
2086                  * limit, such that no single zone holds more than its
2087                  * proportional share of globally allowed dirty pages.
2088                  * The dirty limits take into account the zone's
2089                  * lowmem reserves and high watermark so that kswapd
2090                  * should be able to balance it without having to
2091                  * write pages from its LRU list.
2092                  *
2093                  * This may look like it could increase pressure on
2094                  * lower zones by failing allocations in higher zones
2095                  * before they are full.  But the pages that do spill
2096                  * over are limited as the lower zones are protected
2097                  * by this very same mechanism.  It should not become
2098                  * a practical burden to them.
2099                  *
2100                  * XXX: For now, allow allocations to potentially
2101                  * exceed the per-zone dirty limit in the slowpath
2102                  * (ALLOC_WMARK_LOW unset) before going into reclaim,
2103                  * which is important when on a NUMA setup the allowed
2104                  * zones are together not big enough to reach the
2105                  * global limit.  The proper fix for these situations
2106                  * will require awareness of zones in the
2107                  * dirty-throttling and the flusher threads.
2108                  */
2109                 if (consider_zone_dirty && !zone_dirty_ok(zone))
2110                         continue;
2111 
2112                 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
2113                 if (!zone_watermark_ok(zone, order, mark,
2114                                        classzone_idx, alloc_flags)) {
2115                         int ret;
2116 
2117                         /* Checked here to keep the fast path fast */
2118                         BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
2119                         if (alloc_flags & ALLOC_NO_WATERMARKS)
2120                                 goto try_this_zone;
2121 
2122                         if (IS_ENABLED(CONFIG_NUMA) &&
2123                                         !did_zlc_setup && nr_online_nodes > 1) {
2124                                 /*
2125                                  * we do zlc_setup if there are multiple nodes
2126                                  * and before considering the first zone allowed
2127                                  * by the cpuset.
2128                                  */
2129                                 allowednodes = zlc_setup(zonelist, alloc_flags);
2130                                 zlc_active = 1;
2131                                 did_zlc_setup = 1;
2132                         }
2133 
2134                         if (zone_reclaim_mode == 0 ||
2135                             !zone_allows_reclaim(preferred_zone, zone))
2136                                 goto this_zone_full;
2137 
2138                         /*
2139                          * As we may have just activated ZLC, check if the first
2140                          * eligible zone has failed zone_reclaim recently.
2141                          */
2142                         if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
2143                                 !zlc_zone_worth_trying(zonelist, z, allowednodes))
2144                                 continue;
2145 
2146                         ret = zone_reclaim(zone, gfp_mask, order);
2147                         switch (ret) {
2148                         case ZONE_RECLAIM_NOSCAN:
2149                                 /* did not scan */
2150                                 continue;
2151                         case ZONE_RECLAIM_FULL:
2152                                 /* scanned but unreclaimable */
2153                                 continue;
2154                         default:
2155                                 /* did we reclaim enough */
2156                                 if (zone_watermark_ok(zone, order, mark,
2157                                                 classzone_idx, alloc_flags))
2158                                         goto try_this_zone;
2159 
2160                                 /*
2161                                  * Failed to reclaim enough to meet watermark.
2162                                  * Only mark the zone full if checking the min
2163                                  * watermark or if we failed to reclaim just
2164                                  * 1<<order pages or else the page allocator
2165                                  * fastpath will prematurely mark zones full
2166                                  * when the watermark is between the low and
2167                                  * min watermarks.
2168                                  */
2169                                 if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
2170                                     ret == ZONE_RECLAIM_SOME)
2171                                         goto this_zone_full;
2172 
2173                                 continue;
2174                         }
2175                 }
2176 
2177 try_this_zone:
2178                 page = buffered_rmqueue(preferred_zone, zone, order,
2179                                                 gfp_mask, migratetype);
2180                 if (page)
2181                         break;
2182 this_zone_full:
2183                 if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
2184                         zlc_mark_zone_full(zonelist, z);
2185         }
2186 
2187         if (page) {
2188                 /*
2189                  * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
2190                  * necessary to allocate the page. The expectation is
2191                  * that the caller is taking steps that will free more
2192                  * memory. The caller should avoid the page being used
2193                  * for !PFMEMALLOC purposes.
2194                  */
2195                 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
2196                 return page;
2197         }
2198 
2199         /*
2200          * The first pass makes sure allocations are spread fairly within the
2201          * local node.  However, the local node might have free pages left
2202          * after the fairness batches are exhausted, and remote zones haven't
2203          * even been considered yet.  Try once more without fairness, and
2204          * include remote zones now, before entering the slowpath and waking
2205          * kswapd: prefer spilling to a remote zone over swapping locally.
2206          */
2207         if (alloc_flags & ALLOC_FAIR) {
2208                 alloc_flags &= ~ALLOC_FAIR;
2209                 if (nr_fair_skipped) {
2210                         zonelist_rescan = true;
2211                         reset_alloc_batches(preferred_zone);
2212                 }
2213                 if (nr_online_nodes > 1)
2214                         zonelist_rescan = true;
2215         }
2216 
2217         if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
2218                 /* Disable zlc cache for second zonelist scan */
2219                 zlc_active = 0;
2220                 zonelist_rescan = true;
2221         }
2222 
2223         if (zonelist_rescan)
2224                 goto zonelist_scan;
2225 
2226         return NULL;
2227 }
2228 
2229 /*
2230  * Large machines with many possible nodes should not always dump per-node
2231  * meminfo in irq context.
2232  */
2233 static inline bool should_suppress_show_mem(void)
2234 {
2235         bool ret = false;
2236 
2237 #if NODES_SHIFT > 8
2238         ret = in_interrupt();
2239 #endif
2240         return ret;
2241 }
2242 
2243 static DEFINE_RATELIMIT_STATE(nopage_rs,
2244                 DEFAULT_RATELIMIT_INTERVAL,
2245                 DEFAULT_RATELIMIT_BURST);
2246 
2247 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2248 {
2249         unsigned int filter = SHOW_MEM_FILTER_NODES;
2250 
2251         if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2252             debug_guardpage_minorder() > 0)
2253                 return;
2254 
2255         /*
2256          * This documents exceptions given to allocations in certain
2257          * contexts that are allowed to allocate outside current's set
2258          * of allowed nodes.
2259          */
2260         if (!(gfp_mask & __GFP_NOMEMALLOC))
2261                 if (test_thread_flag(TIF_MEMDIE) ||
2262                     (current->flags & (PF_MEMALLOC | PF_EXITING)))
2263                         filter &= ~SHOW_MEM_FILTER_NODES;
2264         if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2265                 filter &= ~SHOW_MEM_FILTER_NODES;
2266 
2267         if (fmt) {
2268                 struct va_format vaf;
2269                 va_list args;
2270 
2271                 va_start(args, fmt);
2272 
2273                 vaf.fmt = fmt;
2274                 vaf.va = &args;
2275 
2276                 pr_warn("%pV", &vaf);
2277 
2278                 va_end(args);
2279         }
2280 
2281         pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2282                 current->comm, order, gfp_mask);
2283 
2284         dump_stack();
2285         if (!should_suppress_show_mem())
2286                 show_mem(filter);
2287 }
2288 
2289 static inline int
2290 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
2291                                 unsigned long did_some_progress,
2292                                 unsigned long pages_reclaimed)
2293 {
2294         /* Do not loop if specifically requested */
2295         if (gfp_mask & __GFP_NORETRY)
2296                 return 0;
2297 
2298         /* Always retry if specifically requested */
2299         if (gfp_mask & __GFP_NOFAIL)
2300                 return 1;
2301 
2302         /*
2303          * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
2304          * making forward progress without invoking OOM. Suspend also disables
2305          * storage devices so kswapd will not help. Bail if we are suspending.
2306          */
2307         if (!did_some_progress && pm_suspended_storage())
2308                 return 0;
2309 
2310         /*
2311          * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
2312          * means __GFP_NOFAIL, but that may not be true in other
2313          * implementations.
2314          */
2315         if (order <= PAGE_ALLOC_COSTLY_ORDER)
2316                 return 1;
2317 
2318         /*
2319          * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
2320          * specified, then we retry until we no longer reclaim any pages
2321          * (above), or we've reclaimed an order of pages at least as
2322          * large as the allocation's order. In both cases, if the
2323          * allocation still fails, we stop retrying.
2324          */
2325         if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2326                 return 1;
2327 
2328         return 0;
2329 }
2330 
2331 static inline struct page *
2332 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2333         struct zonelist *zonelist, enum zone_type high_zoneidx,
2334         nodemask_t *nodemask, struct zone *preferred_zone,
2335         int classzone_idx, int migratetype, unsigned long *did_some_progress)
2336 {
2337         struct page *page;
2338 
2339         *did_some_progress = 0;
2340 
2341         if (oom_killer_disabled)
2342                 return NULL;
2343 
2344         /*
2345          * Acquire the per-zone oom lock for each zone.  If that
2346          * fails, somebody else is making progress for us.
2347          */
2348         if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
2349                 *did_some_progress = 1;
2350                 schedule_timeout_uninterruptible(1);
2351                 return NULL;
2352         }
2353 
2354         /*
2355          * PM-freezer should be notified that there might be an OOM killer on
2356          * its way to kill and wake somebody up. This is too early and we might
2357          * end up not killing anything but false positives are acceptable.
2358          * See freeze_processes.
2359          */
2360         note_oom_kill();
2361 
2362         /*
2363          * Go through the zonelist yet one more time, keep very high watermark
2364          * here, this is only to catch a parallel oom killing, we must fail if
2365          * we're still under heavy pressure.
2366          */
2367         page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
2368                 order, zonelist, high_zoneidx,
2369                 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
2370                 preferred_zone, classzone_idx, migratetype);
2371         if (page)
2372                 goto out;
2373 
2374         if (!(gfp_mask & __GFP_NOFAIL)) {
2375                 /* Coredumps can quickly deplete all memory reserves */
2376                 if (current->flags & PF_DUMPCORE)
2377                         goto out;
2378                 /* The OOM killer will not help higher order allocs */
2379                 if (order > PAGE_ALLOC_COSTLY_ORDER)
2380                         goto out;
2381                 /* The OOM killer does not needlessly kill tasks for lowmem */
2382                 if (high_zoneidx < ZONE_NORMAL)
2383                         goto out;
2384                 /* The OOM killer does not compensate for light reclaim */
2385                 if (!(gfp_mask & __GFP_FS))
2386                         goto out;
2387                 /*
2388                  * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2389                  * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
2390                  * The caller should handle page allocation failure by itself if
2391                  * it specifies __GFP_THISNODE.
2392                  * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
2393                  */
2394                 if (gfp_mask & __GFP_THISNODE)
2395                         goto out;
2396         }
2397         /* Exhausted what can be done so it's blamo time */
2398         out_of_memory(zonelist, gfp_mask, order, nodemask, false);
2399         *did_some_progress = 1;
2400 out:
2401         oom_zonelist_unlock(zonelist, gfp_mask);
2402         return page;
2403 }
2404 
2405 #ifdef CONFIG_COMPACTION
2406 /* Try memory compaction for high-order allocations before reclaim */
2407 static struct page *
2408 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2409         struct zonelist *zonelist, enum zone_type high_zoneidx,
2410         nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2411         int classzone_idx, int migratetype, enum migrate_mode mode,
2412         int *contended_compaction, bool *deferred_compaction)
2413 {
2414         unsigned long compact_result;
2415         struct page *page;
2416 
2417         if (!order)
2418                 return NULL;
2419 
2420         current->flags |= PF_MEMALLOC;
2421         compact_result = try_to_compact_pages(zonelist, order, gfp_mask,
2422                                                 nodemask, mode,
2423                                                 contended_compaction,
2424                                                 alloc_flags, classzone_idx);
2425         current->flags &= ~PF_MEMALLOC;
2426 
2427         switch (compact_result) {
2428         case COMPACT_DEFERRED:
2429                 *deferred_compaction = true;
2430                 /* fall-through */
2431         case COMPACT_SKIPPED:
2432                 return NULL;
2433         default:
2434                 break;
2435         }
2436 
2437         /*
2438          * At least in one zone compaction wasn't deferred or skipped, so let's
2439          * count a compaction stall
2440          */
2441         count_vm_event(COMPACTSTALL);
2442 
2443         page = get_page_from_freelist(gfp_mask, nodemask,
2444                         order, zonelist, high_zoneidx,
2445                         alloc_flags & ~ALLOC_NO_WATERMARKS,
2446                         preferred_zone, classzone_idx, migratetype);
2447 
2448         if (page) {
2449                 struct zone *zone = page_zone(page);
2450 
2451                 zone->compact_blockskip_flush = false;
2452                 compaction_defer_reset(zone, order, true);
2453                 count_vm_event(COMPACTSUCCESS);
2454                 return page;
2455         }
2456 
2457         /*
2458          * It's bad if compaction run occurs and fails. The most likely reason
2459          * is that pages exist, but not enough to satisfy watermarks.
2460          */
2461         count_vm_event(COMPACTFAIL);
2462 
2463         cond_resched();
2464 
2465         return NULL;
2466 }
2467 #else
2468 static inline struct page *
2469 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2470         struct zonelist *zonelist, enum zone_type high_zoneidx,
2471         nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2472         int classzone_idx, int migratetype, enum migrate_mode mode,
2473         int *contended_compaction, bool *deferred_compaction)
2474 {
2475         return NULL;
2476 }
2477 #endif /* CONFIG_COMPACTION */
2478 
2479 /* Perform direct synchronous page reclaim */
2480 static int
2481 __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2482                   nodemask_t *nodemask)
2483 {
2484         struct reclaim_state reclaim_state;
2485         int progress;
2486 
2487         cond_resched();
2488 
2489         /* We now go into synchronous reclaim */
2490         cpuset_memory_pressure_bump();
2491         current->flags |= PF_MEMALLOC;
2492         lockdep_set_current_reclaim_state(gfp_mask);
2493         reclaim_state.reclaimed_slab = 0;
2494         current->reclaim_state = &reclaim_state;
2495 
2496         progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2497 
2498         current->reclaim_state = NULL;
2499         lockdep_clear_current_reclaim_state();
2500         current->flags &= ~PF_MEMALLOC;
2501 
2502         cond_resched();
2503 
2504         return progress;
2505 }
2506 
2507 /* The really slow allocator path where we enter direct reclaim */
2508 static inline struct page *
2509 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2510         struct zonelist *zonelist, enum zone_type high_zoneidx,
2511         nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2512         int classzone_idx, int migratetype, unsigned long *did_some_progress)
2513 {
2514         struct page *page = NULL;
2515         bool drained = false;
2516 
2517         *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2518                                                nodemask);
2519         if (unlikely(!(*did_some_progress)))
2520                 return NULL;
2521 
2522         /* After successful reclaim, reconsider all zones for allocation */
2523         if (IS_ENABLED(CONFIG_NUMA))
2524                 zlc_clear_zones_full(zonelist);
2525 
2526 retry:
2527         page = get_page_from_freelist(gfp_mask, nodemask, order,
2528                                         zonelist, high_zoneidx,
2529                                         alloc_flags & ~ALLOC_NO_WATERMARKS,
2530                                         preferred_zone, classzone_idx,
2531                                         migratetype);
2532 
2533         /*
2534          * If an allocation failed after direct reclaim, it could be because
2535          * pages are pinned on the per-cpu lists. Drain them and try again
2536          */
2537         if (!page && !drained) {
2538                 drain_all_pages(NULL);
2539                 drained = true;
2540                 goto retry;
2541         }
2542 
2543         return page;
2544 }
2545 
2546 /*
2547  * This is called in the allocator slow-path if the allocation request is of
2548  * sufficient urgency to ignore watermarks and take other desperate measures
2549  */
2550 static inline struct page *
2551 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2552         struct zonelist *zonelist, enum zone_type high_zoneidx,
2553         nodemask_t *nodemask, struct zone *preferred_zone,
2554         int classzone_idx, int migratetype)
2555 {
2556         struct page *page;
2557 
2558         do {
2559                 page = get_page_from_freelist(gfp_mask, nodemask, order,
2560                         zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2561                         preferred_zone, classzone_idx, migratetype);
2562 
2563                 if (!page && gfp_mask & __GFP_NOFAIL)
2564                         wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2565         } while (!page && (gfp_mask & __GFP_NOFAIL));
2566 
2567         return page;
2568 }
2569 
2570 static void wake_all_kswapds(unsigned int order,
2571                              struct zonelist *zonelist,
2572                              enum zone_type high_zoneidx,
2573                              struct zone *preferred_zone,
2574                              nodemask_t *nodemask)
2575 {
2576         struct zoneref *z;
2577         struct zone *zone;
2578 
2579         for_each_zone_zonelist_nodemask(zone, z, zonelist,
2580                                                 high_zoneidx, nodemask)
2581                 wakeup_kswapd(zone, order, zone_idx(preferred_zone));
2582 }
2583 
2584 static inline int
2585 gfp_to_alloc_flags(gfp_t gfp_mask)
2586 {
2587         int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2588         const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
2589 
2590         /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2591         BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2592 
2593         /*
2594          * The caller may dip into page reserves a bit more if the caller
2595          * cannot run direct reclaim, or if the caller has realtime scheduling
2596          * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2597          * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
2598          */
2599         alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2600 
2601         if (atomic) {
2602                 /*
2603                  * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
2604                  * if it can't schedule.
2605                  */
2606                 if (!(gfp_mask & __GFP_NOMEMALLOC))
2607                         alloc_flags |= ALLOC_HARDER;
2608                 /*
2609                  * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2610                  * comment for __cpuset_node_allowed().
2611                  */
2612                 alloc_flags &= ~ALLOC_CPUSET;
2613         } else if (unlikely(rt_task(current)) && !in_interrupt())
2614                 alloc_flags |= ALLOC_HARDER;
2615 
2616         if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2617                 if (gfp_mask & __GFP_MEMALLOC)
2618                         alloc_flags |= ALLOC_NO_WATERMARKS;
2619                 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2620                         alloc_flags |= ALLOC_NO_WATERMARKS;
2621                 else if (!in_interrupt() &&
2622                                 ((current->flags & PF_MEMALLOC) ||
2623                                  unlikely(test_thread_flag(TIF_MEMDIE))))
2624                         alloc_flags |= ALLOC_NO_WATERMARKS;
2625         }
2626 #ifdef CONFIG_CMA
2627         if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2628                 alloc_flags |= ALLOC_CMA;
2629 #endif
2630         return alloc_flags;
2631 }
2632 
2633 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2634 {
2635         return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2636 }
2637 
2638 static inline struct page *
2639 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2640         struct zonelist *zonelist, enum zone_type high_zoneidx,
2641         nodemask_t *nodemask, struct zone *preferred_zone,
2642         int classzone_idx, int migratetype)
2643 {
2644         const gfp_t wait = gfp_mask & __GFP_WAIT;
2645         struct page *page = NULL;
2646         int alloc_flags;
2647         unsigned long pages_reclaimed = 0;
2648         unsigned long did_some_progress;
2649         enum migrate_mode migration_mode = MIGRATE_ASYNC;
2650         bool deferred_compaction = false;
2651         int contended_compaction = COMPACT_CONTENDED_NONE;
2652 
2653         /*
2654          * In the slowpath, we sanity check order to avoid ever trying to
2655          * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2656          * be using allocators in order of preference for an area that is
2657          * too large.
2658          */
2659         if (order >= MAX_ORDER) {
2660                 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2661                 return NULL;
2662         }
2663 
2664         /*
2665          * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2666          * __GFP_NOWARN set) should not cause reclaim since the subsystem
2667          * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2668          * using a larger set of nodes after it has established that the
2669          * allowed per node queues are empty and that nodes are
2670          * over allocated.
2671          */
2672         if (IS_ENABLED(CONFIG_NUMA) &&
2673             (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2674                 goto nopage;
2675 
2676 retry:
2677         if (!(gfp_mask & __GFP_NO_KSWAPD))
2678                 wake_all_kswapds(order, zonelist, high_zoneidx,
2679                                 preferred_zone, nodemask);
2680 
2681         /*
2682          * OK, we're below the kswapd watermark and have kicked background
2683          * reclaim. Now things get more complex, so set up alloc_flags according
2684          * to how we want to proceed.
2685          */
2686         alloc_flags = gfp_to_alloc_flags(gfp_mask);
2687 
2688         /*
2689          * Find the true preferred zone if the allocation is unconstrained by
2690          * cpusets.
2691          */
2692         if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
2693                 struct zoneref *preferred_zoneref;
2694                 preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
2695                                 NULL, &preferred_zone);
2696                 classzone_idx = zonelist_zone_idx(preferred_zoneref);
2697         }
2698 
2699         /* This is the last chance, in general, before the goto nopage. */
2700         page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2701                         high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2702                         preferred_zone, classzone_idx, migratetype);
2703         if (page)
2704                 goto got_pg;
2705 
2706         /* Allocate without watermarks if the context allows */
2707         if (alloc_flags & ALLOC_NO_WATERMARKS) {
2708                 /*
2709                  * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
2710                  * the allocation is high priority and these type of
2711                  * allocations are system rather than user orientated
2712                  */
2713                 zonelist = node_zonelist(numa_node_id(), gfp_mask);
2714 
2715                 page = __alloc_pages_high_priority(gfp_mask, order,
2716                                 zonelist, high_zoneidx, nodemask,
2717                                 preferred_zone, classzone_idx, migratetype);
2718                 if (page) {
2719                         goto got_pg;
2720                 }
2721         }
2722 
2723         /* Atomic allocations - we can't balance anything */
2724         if (!wait) {
2725                 /*
2726                  * All existing users of the deprecated __GFP_NOFAIL are
2727                  * blockable, so warn of any new users that actually allow this
2728                  * type of allocation to fail.
2729                  */
2730                 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
2731                 goto nopage;
2732         }
2733 
2734         /* Avoid recursion of direct reclaim */
2735         if (current->flags & PF_MEMALLOC)
2736                 goto nopage;
2737 
2738         /* Avoid allocations with no watermarks from looping endlessly */
2739         if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2740                 goto nopage;
2741 
2742         /*
2743          * Try direct compaction. The first pass is asynchronous. Subsequent
2744          * attempts after direct reclaim are synchronous
2745          */
2746         page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
2747                                         high_zoneidx, nodemask, alloc_flags,
2748                                         preferred_zone,
2749                                         classzone_idx, migratetype,
2750                                         migration_mode, &contended_compaction,
2751                                         &deferred_compaction);
2752         if (page)
2753                 goto got_pg;
2754 
2755         /* Checks for THP-specific high-order allocations */
2756         if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) {
2757                 /*
2758                  * If compaction is deferred for high-order allocations, it is
2759                  * because sync compaction recently failed. If this is the case
2760                  * and the caller requested a THP allocation, we do not want
2761                  * to heavily disrupt the system, so we fail the allocation
2762                  * instead of entering direct reclaim.
2763                  */
2764                 if (deferred_compaction)
2765                         goto nopage;
2766 
2767                 /*
2768                  * In all zones where compaction was attempted (and not
2769                  * deferred or skipped), lock contention has been detected.
2770                  * For THP allocation we do not want to disrupt the others
2771                  * so we fallback to base pages instead.
2772                  */
2773                 if (contended_compaction == COMPACT_CONTENDED_LOCK)
2774                         goto nopage;
2775 
2776                 /*
2777                  * If compaction was aborted due to need_resched(), we do not
2778                  * want to further increase allocation latency, unless it is
2779                  * khugepaged trying to collapse.
2780                  */
2781                 if (contended_compaction == COMPACT_CONTENDED_SCHED
2782                         && !(current->flags & PF_KTHREAD))
2783                         goto nopage;
2784         }
2785 
2786         /*
2787          * It can become very expensive to allocate transparent hugepages at
2788          * fault, so use asynchronous memory compaction for THP unless it is
2789          * khugepaged trying to collapse.
2790          */
2791         if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE ||
2792                                                 (current->flags & PF_KTHREAD))
2793                 migration_mode = MIGRATE_SYNC_LIGHT;
2794 
2795         /* Try direct reclaim and then allocating */
2796         page = __alloc_pages_direct_reclaim(gfp_mask, order,
2797                                         zonelist, high_zoneidx,
2798                                         nodemask,
2799                                         alloc_flags, preferred_zone,
2800                                         classzone_idx, migratetype,
2801                                         &did_some_progress);
2802         if (page)
2803                 goto got_pg;
2804 
2805         /* Check if we should retry the allocation */
2806         pages_reclaimed += did_some_progress;
2807         if (should_alloc_retry(gfp_mask, order, did_some_progress,
2808                                                 pages_reclaimed)) {
2809                 /*
2810                  * If we fail to make progress by freeing individual
2811                  * pages, but the allocation wants us to keep going,
2812                  * start OOM killing tasks.
2813                  */
2814                 if (!did_some_progress) {
2815                         page = __alloc_pages_may_oom(gfp_mask, order, zonelist,
2816                                                 high_zoneidx, nodemask,
2817                                                 preferred_zone, classzone_idx,
2818                                                 migratetype,&did_some_progress);
2819                         if (page)
2820                                 goto got_pg;
2821                         if (!did_some_progress)
2822                                 goto nopage;
2823                 }
2824                 /* Wait for some write requests to complete then retry */
2825                 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2826                 goto retry;
2827         } else {
2828                 /*
2829                  * High-order allocations do not necessarily loop after
2830                  * direct reclaim and reclaim/compaction depends on compaction
2831                  * being called after reclaim so call directly if necessary
2832                  */
2833                 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
2834                                         high_zoneidx, nodemask, alloc_flags,
2835                                         preferred_zone,
2836                                         classzone_idx, migratetype,
2837                                         migration_mode, &contended_compaction,
2838                                         &deferred_compaction);
2839                 if (page)
2840                         goto got_pg;
2841         }
2842 
2843 nopage:
2844         warn_alloc_failed(gfp_mask, order, NULL);
2845         return page;
2846 got_pg:
2847         if (kmemcheck_enabled)
2848                 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2849 
2850         return page;
2851 }
2852 
2853 /*
2854  * This is the 'heart' of the zoned buddy allocator.
2855  */
2856 struct page *
2857 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2858                         struct zonelist *zonelist, nodemask_t *nodemask)
2859 {
2860         enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2861         struct zone *preferred_zone;
2862         struct zoneref *preferred_zoneref;
2863         struct page *page = NULL;
2864         int migratetype = gfpflags_to_migratetype(gfp_mask);
2865         unsigned int cpuset_mems_cookie;
2866         int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
2867         int classzone_idx;
2868 
2869         gfp_mask &= gfp_allowed_mask;
2870 
2871         lockdep_trace_alloc(gfp_mask);
2872 
2873         might_sleep_if(gfp_mask & __GFP_WAIT);
2874 
2875         if (should_fail_alloc_page(gfp_mask, order))
2876                 return NULL;
2877 
2878         /*
2879          * Check the zones suitable for the gfp_mask contain at least one
2880          * valid zone. It's possible to have an empty zonelist as a result
2881          * of GFP_THISNODE and a memoryless node
2882          */
2883         if (unlikely(!zonelist->_zonerefs->zone))
2884                 return NULL;
2885 
2886         if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE)
2887                 alloc_flags |= ALLOC_CMA;
2888 
2889 retry_cpuset:
2890         cpuset_mems_cookie = read_mems_allowed_begin();
2891 
2892         /* The preferred zone is used for statistics later */
2893         preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
2894                                 nodemask ? : &cpuset_current_mems_allowed,
2895                                 &preferred_zone);
2896         if (!preferred_zone)
2897                 goto out;
2898         classzone_idx = zonelist_zone_idx(preferred_zoneref);
2899 
2900         /* First allocation attempt */
2901         page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2902                         zonelist, high_zoneidx, alloc_flags,
2903                         preferred_zone, classzone_idx, migratetype);
2904         if (unlikely(!page)) {
2905                 /*
2906                  * Runtime PM, block IO and its error handling path
2907                  * can deadlock because I/O on the device might not
2908                  * complete.
2909                  */
2910                 gfp_mask = memalloc_noio_flags(gfp_mask);
2911                 page = __alloc_pages_slowpath(gfp_mask, order,
2912                                 zonelist, high_zoneidx, nodemask,
2913                                 preferred_zone, classzone_idx, migratetype);
2914         }
2915 
2916         trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2917 
2918 out:
2919         /*
2920          * When updating a task's mems_allowed, it is possible to race with
2921          * parallel threads in such a way that an allocation can fail while
2922          * the mask is being updated. If a page allocation is about to fail,
2923          * check if the cpuset changed during allocation and if so, retry.
2924          */
2925         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2926                 goto retry_cpuset;
2927 
2928         return page;
2929 }
2930 EXPORT_SYMBOL(__alloc_pages_nodemask);
2931 
2932 /*
2933  * Common helper functions.
2934  */
2935 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2936 {
2937         struct page *page;
2938 
2939         /*
2940          * __get_free_pages() returns a 32-bit address, which cannot represent
2941          * a highmem page
2942          */
2943         VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2944 
2945         page = alloc_pages(gfp_mask, order);
2946         if (!page)
2947                 return 0;
2948         return (unsigned long) page_address(page);
2949 }
2950 EXPORT_SYMBOL(__get_free_pages);
2951 
2952 unsigned long get_zeroed_page(gfp_t gfp_mask)
2953 {
2954         return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2955 }
2956 EXPORT_SYMBOL(get_zeroed_page);
2957 
2958 void __free_pages(struct page *page, unsigned int order)
2959 {
2960         if (put_page_testzero(page)) {
2961                 if (order == 0)
2962                         free_hot_cold_page(page, false);
2963                 else
2964                         __free_pages_ok(page, order);
2965         }
2966 }
2967 
2968 EXPORT_SYMBOL(__free_pages);
2969 
2970 void free_pages(unsigned long addr, unsigned int order)
2971 {
2972         if (addr != 0) {
2973                 VM_BUG_ON(!virt_addr_valid((void *)addr));
2974                 __free_pages(virt_to_page((void *)addr), order);
2975         }
2976 }
2977 
2978 EXPORT_SYMBOL(free_pages);
2979 
2980 /*
2981  * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
2982  * of the current memory cgroup.
2983  *
2984  * It should be used when the caller would like to use kmalloc, but since the
2985  * allocation is large, it has to fall back to the page allocator.
2986  */
2987 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
2988 {
2989         struct page *page;
2990         struct mem_cgroup *memcg = NULL;
2991 
2992         if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
2993                 return NULL;
2994         page = alloc_pages(gfp_mask, order);
2995         memcg_kmem_commit_charge(page, memcg, order);
2996         return page;
2997 }
2998 
2999 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
3000 {
3001         struct page *page;
3002         struct mem_cgroup *memcg = NULL;
3003 
3004         if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
3005                 return NULL;
3006         page = alloc_pages_node(nid, gfp_mask, order);
3007         memcg_kmem_commit_charge(page, memcg, order);
3008         return page;
3009 }
3010 
3011 /*
3012  * __free_kmem_pages and free_kmem_pages will free pages allocated with
3013  * alloc_kmem_pages.
3014  */
3015 void __free_kmem_pages(struct page *page, unsigned int order)
3016 {
3017         memcg_kmem_uncharge_pages(page, order);
3018         __free_pages(page, order);
3019 }
3020 
3021 void free_kmem_pages(unsigned long addr, unsigned int order)
3022 {
3023         if (addr != 0) {
3024                 VM_BUG_ON(!virt_addr_valid((void *)addr));
3025                 __free_kmem_pages(virt_to_page((void *)addr), order);
3026         }
3027 }
3028 
3029 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
3030 {
3031         if (addr) {
3032                 unsigned long alloc_end = addr + (PAGE_SIZE << order);
3033                 unsigned long used = addr + PAGE_ALIGN(size);
3034 
3035                 split_page(virt_to_page((void *)addr), order);
3036                 while (used < alloc_end) {
3037                         free_page(used);
3038                         used += PAGE_SIZE;
3039                 }
3040         }
3041         return (void *)addr;
3042 }
3043 
3044 /**
3045  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
3046  * @size: the number of bytes to allocate
3047  * @gfp_mask: GFP flags for the allocation
3048  *
3049  * This function is similar to alloc_pages(), except that it allocates the
3050  * minimum number of pages to satisfy the request.  alloc_pages() can only
3051  * allocate memory in power-of-two pages.
3052  *
3053  * This function is also limited by MAX_ORDER.
3054  *
3055  * Memory allocated by this function must be released by free_pages_exact().
3056  */
3057 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
3058 {
3059         unsigned int order = get_order(size);
3060         unsigned long addr;
3061 
3062         addr = __get_free_pages(gfp_mask, order);
3063         return make_alloc_exact(addr, order, size);
3064 }
3065 EXPORT_SYMBOL(alloc_pages_exact);
3066 
3067 /**
3068  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
3069  *                         pages on a node.
3070  * @nid: the preferred node ID where memory should be allocated
3071  * @size: the number of bytes to allocate
3072  * @gfp_mask: GFP flags for the allocation
3073  *
3074  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
3075  * back.
3076  * Note this is not alloc_pages_exact_node() which allocates on a specific node,
3077  * but is not exact.
3078  */
3079 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
3080 {
3081         unsigned order = get_order(size);
3082         struct page *p = alloc_pages_node(nid, gfp_mask, order);
3083         if (!p)
3084                 return NULL;
3085         return make_alloc_exact((unsigned long)page_address(p), order, size);
3086 }
3087 
3088 /**
3089  * free_pages_exact - release memory allocated via alloc_pages_exact()
3090  * @virt: the value returned by alloc_pages_exact.
3091  * @size: size of allocation, same value as passed to alloc_pages_exact().
3092  *
3093  * Release the memory allocated by a previous call to alloc_pages_exact.
3094  */
3095 void free_pages_exact(void *virt, size_t size)
3096 {
3097         unsigned long addr = (unsigned long)virt;
3098         unsigned long end = addr + PAGE_ALIGN(size);
3099 
3100         while (addr < end) {
3101                 free_page(addr);
3102                 addr += PAGE_SIZE;
3103         }
3104 }
3105 EXPORT_SYMBOL(free_pages_exact);
3106 
3107 /**
3108  * nr_free_zone_pages - count number of pages beyond high watermark
3109  * @offset: The zone index of the highest zone
3110  *
3111  * nr_free_zone_pages() counts the number of counts pages which are beyond the
3112  * high watermark within all zones at or below a given zone index.  For each
3113  * zone, the number of pages is calculated as:
3114  *     managed_pages - high_pages
3115  */
3116 static unsigned long nr_free_zone_pages(int offset)
3117 {
3118         struct zoneref *z;
3119         struct zone *zone;
3120 
3121         /* Just pick one node, since fallback list is circular */
3122         unsigned long sum = 0;
3123 
3124         struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
3125 
3126         for_each_zone_zonelist(zone, z, zonelist, offset) {
3127                 unsigned long size = zone->managed_pages;
3128                 unsigned long high = high_wmark_pages(zone);
3129                 if (size > high)
3130                         sum += size - high;
3131         }
3132 
3133         return sum;
3134 }
3135 
3136 /**
3137  * nr_free_buffer_pages - count number of pages beyond high watermark
3138  *
3139  * nr_free_buffer_pages() counts the number of pages which are beyond the high
3140  * watermark within ZONE_DMA and ZONE_NORMAL.
3141  */
3142 unsigned long nr_free_buffer_pages(void)
3143 {
3144         return nr_free_zone_pages(gfp_zone(GFP_USER));
3145 }
3146 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
3147 
3148 /**
3149  * nr_free_pagecache_pages - count number of pages beyond high watermark
3150  *
3151  * nr_free_pagecache_pages() counts the number of pages which are beyond the
3152  * high watermark within all zones.
3153  */
3154 unsigned long nr_free_pagecache_pages(void)
3155 {
3156         return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
3157 }
3158 
3159 static inline void show_node(struct zone *zone)
3160 {
3161         if (IS_ENABLED(CONFIG_NUMA))
3162                 printk("Node %d ", zone_to_nid(zone));
3163 }
3164 
3165 void si_meminfo(struct sysinfo *val)
3166 {
3167         val->totalram = totalram_pages;
3168         val->sharedram = global_page_state(NR_SHMEM);
3169         val->freeram = global_page_state(NR_FREE_PAGES);
3170         val->bufferram = nr_blockdev_pages();
3171         val->totalhigh = totalhigh_pages;
3172         val->freehigh = nr_free_highpages();
3173         val->mem_unit = PAGE_SIZE;
3174 }
3175 
3176 EXPORT_SYMBOL(si_meminfo);
3177 
3178 #ifdef CONFIG_NUMA
3179 void si_meminfo_node(struct sysinfo *val, int nid)
3180 {
3181         int zone_type;          /* needs to be signed */
3182         unsigned long managed_pages = 0;
3183         pg_data_t *pgdat = NODE_DATA(nid);
3184 
3185         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
3186                 managed_pages += pgdat->node_zones[zone_type].managed_pages;
3187         val->totalram = managed_pages;
3188         val->sharedram = node_page_state(nid, NR_SHMEM);
3189         val->freeram = node_page_state(nid, NR_FREE_PAGES);
3190 #ifdef CONFIG_HIGHMEM
3191         val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
3192         val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
3193                         NR_FREE_PAGES);
3194 #else
3195         val->totalhigh = 0;
3196         val->freehigh = 0;
3197 #endif
3198         val->mem_unit = PAGE_SIZE;
3199 }
3200 #endif
3201 
3202 /*
3203  * Determine whether the node should be displayed or not, depending on whether
3204  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
3205  */
3206 bool skip_free_areas_node(unsigned int flags, int nid)
3207 {
3208         bool ret = false;
3209         unsigned int cpuset_mems_cookie;
3210 
3211         if (!(flags & SHOW_MEM_FILTER_NODES))
3212                 goto out;
3213 
3214         do {
3215                 cpuset_mems_cookie = read_mems_allowed_begin();
3216                 ret = !node_isset(nid, cpuset_current_mems_allowed);
3217         } while (read_mems_allowed_retry(cpuset_mems_cookie));
3218 out:
3219         return ret;
3220 }
3221 
3222 #define K(x) ((x) << (PAGE_SHIFT-10))
3223 
3224 static void show_migration_types(unsigned char type)
3225 {
3226         static const char types[MIGRATE_TYPES] = {
3227                 [MIGRATE_UNMOVABLE]     = 'U',
3228                 [MIGRATE_RECLAIMABLE]   = 'E',
3229                 [MIGRATE_MOVABLE]       = 'M',
3230                 [MIGRATE_RESERVE]       = 'R',
3231 #ifdef CONFIG_CMA
3232                 [MIGRATE_CMA]           = 'C',
3233 #endif
3234 #ifdef CONFIG_MEMORY_ISOLATION
3235                 [MIGRATE_ISOLATE]       = 'I',
3236 #endif
3237         };
3238         char tmp[MIGRATE_TYPES + 1];
3239         char *p = tmp;
3240         int i;
3241 
3242         for (i = 0; i < MIGRATE_TYPES; i++) {
3243                 if (type & (1 << i))
3244                         *p++ = types[i];
3245         }
3246 
3247         *p = '\0';
3248         printk("(%s) ", tmp);
3249 }
3250 
3251 /*
3252  * Show free area list (used inside shift_scroll-lock stuff)
3253  * We also calculate the percentage fragmentation. We do this by counting the
3254  * memory on each free list with the exception of the first item on the list.
3255  * Suppresses nodes that are not allowed by current's cpuset if
3256  * SHOW_MEM_FILTER_NODES is passed.
3257  */
3258 void show_free_areas(unsigned int filter)
3259 {
3260         int cpu;
3261         struct zone *zone;
3262 
3263         for_each_populated_zone(zone) {
3264                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3265                         continue;
3266                 show_node(zone);
3267                 printk("%s per-cpu:\n", zone->name);
3268 
3269                 for_each_online_cpu(cpu) {
3270                         struct per_cpu_pageset *pageset;
3271 
3272                         pageset = per_cpu_ptr(zone->pageset, cpu);
3273 
3274                         printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
3275                                cpu, pageset->pcp.high,
3276                                pageset->pcp.batch, pageset->pcp.count);
3277                 }
3278         }
3279 
3280         printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
3281                 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
3282                 " unevictable:%lu"
3283                 " dirty:%lu writeback:%lu unstable:%lu\n"
3284                 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
3285                 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
3286                 " free_cma:%lu\n",
3287                 global_page_state(NR_ACTIVE_ANON),
3288                 global_page_state(NR_INACTIVE_ANON),
3289                 global_page_state(NR_ISOLATED_ANON),
3290                 global_page_state(NR_ACTIVE_FILE),
3291                 global_page_state(NR_INACTIVE_FILE),
3292                 global_page_state(NR_ISOLATED_FILE),
3293                 global_page_state(NR_UNEVICTABLE),
3294                 global_page_state(NR_FILE_DIRTY),
3295                 global_page_state(NR_WRITEBACK),
3296                 global_page_state(NR_UNSTABLE_NFS),
3297                 global_page_state(NR_FREE_PAGES),
3298                 global_page_state(NR_SLAB_RECLAIMABLE),
3299                 global_page_state(NR_SLAB_UNRECLAIMABLE),
3300                 global_page_state(NR_FILE_MAPPED),
3301                 global_page_state(NR_SHMEM),
3302                 global_page_state(NR_PAGETABLE),
3303                 global_page_state(NR_BOUNCE),
3304                 global_page_state(NR_FREE_CMA_PAGES));
3305 
3306         for_each_populated_zone(zone) {
3307                 int i;
3308 
3309                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3310                         continue;
3311                 show_node(zone);
3312                 printk("%s"
3313                         " free:%lukB"
3314                         " min:%lukB"
3315                         " low:%lukB"
3316                         " high:%lukB"
3317                         " active_anon:%lukB"
3318                         " inactive_anon:%lukB"
3319                         " active_file:%lukB"
3320                         " inactive_file:%lukB"
3321                         " unevictable:%lukB"
3322                         " isolated(anon):%lukB"
3323                         " isolated(file):%lukB"
3324                         " present:%lukB"
3325                         " managed:%lukB"
3326                         " mlocked:%lukB"
3327                         " dirty:%lukB"
3328                         " writeback:%lukB"
3329                         " mapped:%lukB"
3330                         " shmem:%lukB"
3331                         " slab_reclaimable:%lukB"
3332                         " slab_unreclaimable:%lukB"
3333                         " kernel_stack:%lukB"
3334                         " pagetables:%lukB"
3335                         " unstable:%lukB"
3336                         " bounce:%lukB"
3337                         " free_cma:%lukB"
3338                         " writeback_tmp:%lukB"
3339                         " pages_scanned:%lu"
3340                         " all_unreclaimable? %s"
3341                         "\n",
3342                         zone->name,
3343                         K(zone_page_state(zone, NR_FREE_PAGES)),
3344                         K(min_wmark_pages(zone)),
3345                         K(low_wmark_pages(zone)),
3346                         K(high_wmark_pages(zone)),
3347                         K(zone_page_state(zone, NR_ACTIVE_ANON)),
3348                         K(zone_page_state(zone, NR_INACTIVE_ANON)),
3349                         K(zone_page_state(zone, NR_ACTIVE_FILE)),
3350                         K(zone_page_state(zone, NR_INACTIVE_FILE)),
3351                         K(zone_page_state(zone, NR_UNEVICTABLE)),
3352                         K(zone_page_state(zone, NR_ISOLATED_ANON)),
3353                         K(zone_page_state(zone, NR_ISOLATED_FILE)),
3354                         K(zone->present_pages),
3355                         K(zone->managed_pages),
3356                         K(zone_page_state(zone, NR_MLOCK)),
3357                         K(zone_page_state(zone, NR_FILE_DIRTY)),
3358                         K(zone_page_state(zone, NR_WRITEBACK)),
3359                         K(zone_page_state(zone, NR_FILE_MAPPED)),
3360                         K(zone_page_state(zone, NR_SHMEM)),
3361                         K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
3362                         K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
3363                         zone_page_state(zone, NR_KERNEL_STACK) *
3364                                 THREAD_SIZE / 1024,
3365                         K(zone_page_state(zone, NR_PAGETABLE)),
3366                         K(zone_page_state(zone, NR_UNSTABLE_NFS)),
3367                         K(zone_page_state(zone, NR_BOUNCE)),
3368                         K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3369                         K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3370                         K(zone_page_state(zone, NR_PAGES_SCANNED)),
3371                         (!zone_reclaimable(zone) ? "yes" : "no")
3372                         );
3373                 printk("lowmem_reserve[]:");
3374                 for (i = 0; i < MAX_NR_ZONES; i++)
3375                         printk(" %ld", zone->lowmem_reserve[i]);
3376                 printk("\n");
3377         }
3378 
3379         for_each_populated_zone(zone) {
3380                 unsigned long nr[MAX_ORDER], flags, order, total = 0;
3381                 unsigned char types[MAX_ORDER];
3382 
3383                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
3384                         continue;
3385                 show_node(zone);
3386                 printk("%s: ", zone->name);
3387 
3388                 spin_lock_irqsave(&zone->lock, flags);
3389                 for (order = 0; order < MAX_ORDER; order++) {
3390                         struct free_area *area = &zone->free_area[order];
3391                         int type;
3392 
3393                         nr[order] = area->nr_free;
3394                         total += nr[order] << order;
3395 
3396                         types[order] = 0;
3397                         for (type = 0; type < MIGRATE_TYPES; type++) {
3398                                 if (!list_empty(&area->free_list[type]))
3399                                         types[order] |= 1 << type;
3400                         }
3401                 }
3402                 spin_unlock_irqrestore(&zone->lock, flags);
3403                 for (order = 0; order < MAX_ORDER; order++) {
3404                         printk("%lu*%lukB ", nr[order], K(1UL) << order);
3405                         if (nr[order])
3406                                 show_migration_types(types[order]);
3407                 }
3408                 printk("= %lukB\n", K(total));
3409         }
3410 
3411         hugetlb_show_meminfo();
3412 
3413         printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3414 
3415         show_swap_cache_info();
3416 }
3417 
3418 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3419 {
3420         zoneref->zone = zone;
3421         zoneref->zone_idx = zone_idx(zone);
3422 }
3423 
3424 /*
3425  * Builds allocation fallback zone lists.
3426  *
3427  * Add all populated zones of a node to the zonelist.
3428  */
3429 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3430                                 int nr_zones)
3431 {
3432         struct zone *zone;
3433         enum zone_type zone_type = MAX_NR_ZONES;
3434 
3435         do {
3436                 zone_type--;
3437                 zone = pgdat->node_zones + zone_type;
3438                 if (populated_zone(zone)) {
3439                         zoneref_set_zone(zone,
3440                                 &zonelist->_zonerefs[nr_zones++]);
3441                         check_highest_zone(zone_type);
3442                 }
3443         } while (zone_type);
3444 
3445         return nr_zones;
3446 }
3447 
3448 
3449 /*
3450  *  zonelist_order:
3451  *  0 = automatic detection of better ordering.
3452  *  1 = order by ([node] distance, -zonetype)
3453  *  2 = order by (-zonetype, [node] distance)
3454  *
3455  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3456  *  the same zonelist. So only NUMA can configure this param.
3457  */
3458 #define ZONELIST_ORDER_DEFAULT  0
3459 #define ZONELIST_ORDER_NODE     1
3460 #define ZONELIST_ORDER_ZONE     2
3461 
3462 /* zonelist order in the kernel.
3463  * set_zonelist_order() will set this to NODE or ZONE.
3464  */
3465 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3466 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3467 
3468 
3469 #ifdef CONFIG_NUMA
3470 /* The value user specified ....changed by config */
3471 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3472 /* string for sysctl */
3473 #define NUMA_ZONELIST_ORDER_LEN 16
3474 char numa_zonelist_order[16] = "default";
3475 
3476 /*
3477  * interface for configure zonelist ordering.
3478  * command line option "numa_zonelist_order"
3479  *      = "[dD]efault   - default, automatic configuration.
3480  *      = "[nN]ode      - order by node locality, then by zone within node
3481  *      = "[zZ]one      - order by zone, then by locality within zone
3482  */
3483 
3484 static int __parse_numa_zonelist_order(char *s)
3485 {
3486         if (*s == 'd' || *s == 'D') {
3487                 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3488         } else if (*s == 'n' || *s == 'N') {
3489                 user_zonelist_order = ZONELIST_ORDER_NODE;
3490         } else if (*s == 'z' || *s == 'Z') {
3491                 user_zonelist_order = ZONELIST_ORDER_ZONE;
3492         } else {
3493                 printk(KERN_WARNING
3494                         "Ignoring invalid numa_zonelist_order value:  "
3495                         "%s\n", s);
3496                 return -EINVAL;
3497         }
3498         return 0;
3499 }
3500 
3501 static __init int setup_numa_zonelist_order(char *s)
3502 {
3503         int ret;
3504 
3505         if (!s)
3506                 return 0;
3507 
3508         ret = __parse_numa_zonelist_order(s);
3509         if (ret == 0)
3510                 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3511 
3512         return ret;
3513 }
3514 early_param("numa_zonelist_order", setup_numa_zonelist_order);
3515 
3516 /*
3517  * sysctl handler for numa_zonelist_order
3518  */
3519 int numa_zonelist_order_handler(struct ctl_table *table, int write,
3520                 void __user *buffer, size_t *length,
3521                 loff_t *ppos)
3522 {
3523         char saved_string[NUMA_ZONELIST_ORDER_LEN];
3524         int ret;
3525         static DEFINE_MUTEX(zl_order_mutex);
3526 
3527         mutex_lock(&zl_order_mutex);
3528         if (write) {
3529                 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
3530                         ret = -EINVAL;
3531                         goto out;
3532                 }
3533                 strcpy(saved_string, (char *)table->data);
3534         }
3535         ret = proc_dostring(table, write, buffer, length, ppos);
3536         if (ret)
3537                 goto out;
3538         if (write) {
3539                 int oldval = user_zonelist_order;
3540 
3541                 ret = __parse_numa_zonelist_order((char *)table->data);
3542                 if (ret) {
3543                         /*
3544                          * bogus value.  restore saved string
3545                          */
3546                         strncpy((char *)table->data, saved_string,
3547                                 NUMA_ZONELIST_ORDER_LEN);
3548                         user_zonelist_order = oldval;
3549                 } else if (oldval != user_zonelist_order) {
3550                         mutex_lock(&zonelists_mutex);
3551                         build_all_zonelists(NULL, NULL);
3552                         mutex_unlock(&zonelists_mutex);
3553                 }
3554         }
3555 out:
3556         mutex_unlock(&zl_order_mutex);
3557         return ret;
3558 }
3559 
3560 
3561 #define MAX_NODE_LOAD (nr_online_nodes)
3562 static int node_load[MAX_NUMNODES];
3563 
3564 /**
3565  * find_next_best_node - find the next node that should appear in a given node's fallback list
3566  * @node: node whose fallback list we're appending
3567  * @used_node_mask: nodemask_t of already used nodes
3568  *
3569  * We use a number of factors to determine which is the next node that should
3570  * appear on a given node's fallback list.  The node should not have appeared
3571  * already in @node's fallback list, and it should be the next closest node
3572  * according to the distance array (which contains arbitrary distance values
3573  * from each node to each node in the system), and should also prefer nodes
3574  * with no CPUs, since presumably they'll have very little allocation pressure
3575  * on them otherwise.
3576  * It returns -1 if no node is found.
3577  */
3578 static int find_next_best_node(int node, nodemask_t *used_node_mask)
3579 {
3580         int n, val;
3581         int min_val = INT_MAX;
3582         int best_node = NUMA_NO_NODE;
3583         const struct cpumask *tmp = cpumask_of_node(0);
3584 
3585         /* Use the local node if we haven't already */
3586         if (!node_isset(node, *used_node_mask)) {
3587                 node_set(node, *used_node_mask);
3588                 return node;
3589         }
3590 
3591         for_each_node_state(n, N_MEMORY) {
3592 
3593                 /* Don't want a node to appear more than once */
3594                 if (node_isset(n, *used_node_mask))
3595                         continue;
3596 
3597                 /* Use the distance array to find the distance */
3598                 val = node_distance(node, n);
3599 
3600                 /* Penalize nodes under us ("prefer the next node") */
3601                 val += (n < node);
3602 
3603                 /* Give preference to headless and unused nodes */
3604                 tmp = cpumask_of_node(n);
3605                 if (!cpumask_empty(tmp))
3606                         val += PENALTY_FOR_NODE_WITH_CPUS;
3607 
3608                 /* Slight preference for less loaded node */
3609                 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3610                 val += node_load[n];
3611 
3612                 if (val < min_val) {
3613                         min_val = val;
3614                         best_node = n;
3615                 }
3616         }
3617 
3618         if (best_node >= 0)
3619                 node_set(best_node, *used_node_mask);
3620 
3621         return best_node;
3622 }
3623 
3624 
3625 /*
3626  * Build zonelists ordered by node and zones within node.
3627  * This results in maximum locality--normal zone overflows into local
3628  * DMA zone, if any--but risks exhausting DMA zone.
3629  */
3630 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3631 {
3632         int j;
3633         struct zonelist *zonelist;
3634 
3635         zonelist = &pgdat->node_zonelists[0];
3636         for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3637                 ;
3638         j = build_zonelists_node(NODE_DATA(node), zonelist, j);
3639         zonelist->_zonerefs[j].zone = NULL;
3640         zonelist->_zonerefs[j].zone_idx = 0;
3641 }
3642 
3643 /*
3644  * Build gfp_thisnode zonelists
3645  */
3646 static void build_thisnode_zonelists(pg_data_t *pgdat)
3647 {
3648         int j;
3649         struct zonelist *zonelist;
3650 
3651         zonelist = &pgdat->node_zonelists[1];
3652         j = build_zonelists_node(pgdat, zonelist, 0);
3653         zonelist->_zonerefs[j].zone = NULL;
3654         zonelist->_zonerefs[j].zone_idx = 0;
3655 }
3656 
3657 /*
3658  * Build zonelists ordered by zone and nodes within zones.
3659  * This results in conserving DMA zone[s] until all Normal memory is
3660  * exhausted, but results in overflowing to remote node while memory
3661  * may still exist in local DMA zone.
3662  */
3663 static int node_order[MAX_NUMNODES];
3664 
3665 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3666 {
3667         int pos, j, node;
3668         int zone_type;          /* needs to be signed */
3669         struct zone *z;
3670         struct zonelist *zonelist;
3671 
3672         zonelist = &pgdat->node_zonelists[0];
3673         pos = 0;
3674         for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3675                 for (j = 0; j < nr_nodes; j++) {
3676                         node = node_order[j];
3677                         z = &NODE_DATA(node)->node_zones[zone_type];
3678                         if (populated_zone(z)) {
3679                                 zoneref_set_zone(z,
3680                                         &zonelist->_zonerefs[pos++]);
3681                                 check_highest_zone(zone_type);
3682                         }
3683                 }
3684         }
3685         zonelist->_zonerefs[pos].zone = NULL;
3686         zonelist->_zonerefs[pos].zone_idx = 0;
3687 }
3688 
3689 #if defined(CONFIG_64BIT)
3690 /*
3691  * Devices that require DMA32/DMA are relatively rare and do not justify a
3692  * penalty to every machine in case the specialised case applies. Default
3693  * to Node-ordering on 64-bit NUMA machines
3694  */
3695 static int default_zonelist_order(void)
3696 {
3697         return ZONELIST_ORDER_NODE;
3698 }
3699 #else
3700 /*
3701  * On 32-bit, the Normal zone needs to be preserved for allocations accessible
3702  * by the kernel. If processes running on node 0 deplete the low memory zone
3703  * then reclaim will occur more frequency increasing stalls and potentially
3704  * be easier to OOM if a large percentage of the zone is under writeback or
3705  * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
3706  * Hence, default to zone ordering on 32-bit.
3707  */
3708 static int default_zonelist_order(void)
3709 {
3710         return ZONELIST_ORDER_ZONE;
3711 }
3712 #endif /* CONFIG_64BIT */
3713 
3714 static void set_zonelist_order(void)
3715 {
3716         if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3717                 current_zonelist_order = default_zonelist_order();
3718         else
3719                 current_zonelist_order = user_zonelist_order;
3720 }
3721 
3722 static void build_zonelists(pg_data_t *pgdat)
3723 {
3724         int j, node, load;
3725         enum zone_type i;
3726         nodemask_t used_mask;
3727         int local_node, prev_node;
3728         struct zonelist *zonelist;
3729         int order = current_zonelist_order;
3730 
3731         /* initialize zonelists */
3732         for (i = 0; i < MAX_ZONELISTS; i++) {
3733                 zonelist = pgdat->node_zonelists + i;
3734                 zonelist->_zonerefs[0].zone = NULL;
3735                 zonelist->_zonerefs[0].zone_idx = 0;
3736         }
3737 
3738         /* NUMA-aware ordering of nodes */
3739         local_node = pgdat->node_id;
3740         load = nr_online_nodes;
3741         prev_node = local_node;
3742         nodes_clear(used_mask);
3743 
3744         memset(node_order, 0, sizeof(node_order));
3745         j = 0;
3746 
3747         while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3748                 /*
3749                  * We don't want to pressure a particular node.
3750                  * So adding penalty to the first node in same
3751                  * distance group to make it round-robin.
3752                  */
3753                 if (node_distance(local_node, node) !=
3754                     node_distance(local_node, prev_node))
3755                         node_load[node] = load;
3756 
3757                 prev_node = node;
3758                 load--;
3759                 if (order == ZONELIST_ORDER_NODE)
3760                         build_zonelists_in_node_order(pgdat, node);
3761                 else
3762                         node_order[j++] = node; /* remember order */
3763         }
3764 
3765         if (order == ZONELIST_ORDER_ZONE) {
3766                 /* calculate node order -- i.e., DMA last! */
3767                 build_zonelists_in_zone_order(pgdat, j);
3768         }
3769 
3770         build_thisnode_zonelists(pgdat);
3771 }
3772 
3773 /* Construct the zonelist performance cache - see further mmzone.h */
3774 static void build_zonelist_cache(pg_data_t *pgdat)
3775 {
3776         struct zonelist *zonelist;
3777         struct zonelist_cache *zlc;
3778         struct zoneref *z;
3779 
3780         zonelist = &pgdat->node_zonelists[0];
3781         zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3782         bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3783         for (z = zonelist->_zonerefs; z->zone; z++)
3784                 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3785 }
3786 
3787 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3788 /*
3789  * Return node id of node used for "local" allocations.
3790  * I.e., first node id of first zone in arg node's generic zonelist.
3791  * Used for initializing percpu 'numa_mem', which is used primarily
3792  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3793  */
3794 int local_memory_node(int node)
3795 {
3796         struct zone *zone;
3797 
3798         (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3799                                    gfp_zone(GFP_KERNEL),
3800                                    NULL,
3801                                    &zone);
3802         return zone->node;
3803 }
3804 #endif
3805 
3806 #else   /* CONFIG_NUMA */
3807 
3808 static void set_zonelist_order(void)
3809 {
3810         current_zonelist_order = ZONELIST_ORDER_ZONE;
3811 }
3812 
3813 static void build_zonelists(pg_data_t *pgdat)
3814 {
3815         int node, local_node;
3816         enum zone_type j;
3817         struct zonelist *zonelist;
3818 
3819         local_node = pgdat->node_id;
3820 
3821         zonelist = &pgdat->node_zonelists[0];
3822         j = build_zonelists_node(pgdat, zonelist, 0);
3823 
3824         /*
3825          * Now we build the zonelist so that it contains the zones
3826          * of all the other nodes.
3827          * We don't want to pressure a particular node, so when
3828          * building the zones for node N, we make sure that the
3829          * zones coming right after the local ones are those from
3830          * node N+1 (modulo N)
3831          */
3832         for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3833                 if (!node_online(node))
3834                         continue;
3835                 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
3836         }
3837         for (node = 0; node < local_node; node++) {
3838                 if (!node_online(node))
3839                         continue;
3840                 j = build_zonelists_node(NODE_DATA(node), zonelist, j);
3841         }
3842 
3843         zonelist->_zonerefs[j].zone = NULL;
3844         zonelist->_zonerefs[j].zone_idx = 0;
3845 }
3846 
3847 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3848 static void build_zonelist_cache(pg_data_t *pgdat)
3849 {
3850         pgdat->node_zonelists[0].zlcache_ptr = NULL;
3851 }
3852 
3853 #endif  /* CONFIG_NUMA */
3854 
3855 /*
3856  * Boot pageset table. One per cpu which is going to be used for all
3857  * zones and all nodes. The parameters will be set in such a way
3858  * that an item put on a list will immediately be handed over to
3859  * the buddy list. This is safe since pageset manipulation is done
3860  * with interrupts disabled.
3861  *
3862  * The boot_pagesets must be kept even after bootup is complete for
3863  * unused processors and/or zones. They do play a role for bootstrapping
3864  * hotplugged processors.
3865  *
3866  * zoneinfo_show() and maybe other functions do
3867  * not check if the processor is online before following the pageset pointer.
3868  * Other parts of the kernel may not check if the zone is available.
3869  */
3870 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3871 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3872 static void setup_zone_pageset(struct zone *zone);
3873 
3874 /*
3875  * Global mutex to protect against size modification of zonelists
3876  * as well as to serialize pageset setup for the new populated zone.
3877  */
3878 DEFINE_MUTEX(zonelists_mutex);
3879 
3880 /* return values int ....just for stop_machine() */
3881 static int __build_all_zonelists(void *data)
3882 {
3883         int nid;
3884         int cpu;
3885         pg_data_t *self = data;
3886 
3887 #ifdef CONFIG_NUMA
3888         memset(node_load, 0, sizeof(node_load));
3889 #endif
3890 
3891         if (self && !node_online(self->node_id)) {
3892                 build_zonelists(self);
3893                 build_zonelist_cache(self);
3894         }
3895 
3896         for_each_online_node(nid) {
3897                 pg_data_t *pgdat = NODE_DATA(nid);
3898 
3899                 build_zonelists(pgdat);
3900                 build_zonelist_cache(pgdat);
3901         }
3902 
3903         /*
3904          * Initialize the boot_pagesets that are going to be used
3905          * for bootstrapping processors. The real pagesets for
3906          * each zone will be allocated later when the per cpu
3907          * allocator is available.
3908          *
3909          * boot_pagesets are used also for bootstrapping offline
3910          * cpus if the system is already booted because the pagesets
3911          * are needed to initialize allocators on a specific cpu too.
3912          * F.e. the percpu allocator needs the page allocator which
3913          * needs the percpu allocator in order to allocate its pagesets
3914          * (a chicken-egg dilemma).
3915          */
3916         for_each_possible_cpu(cpu) {
3917                 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3918 
3919 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
3920                 /*
3921                  * We now know the "local memory node" for each node--
3922                  * i.e., the node of the first zone in the generic zonelist.
3923                  * Set up numa_mem percpu variable for on-line cpus.  During
3924                  * boot, only the boot cpu should be on-line;  we'll init the
3925                  * secondary cpus' numa_mem as they come on-line.  During
3926                  * node/memory hotplug, we'll fixup all on-line cpus.
3927                  */
3928                 if (cpu_online(cpu))
3929                         set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3930 #endif
3931         }
3932 
3933         return 0;
3934 }
3935 
3936 /*
3937  * Called with zonelists_mutex held always
3938  * unless system_state == SYSTEM_BOOTING.
3939  */
3940 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
3941 {
3942         set_zonelist_order();
3943 
3944         if (system_state == SYSTEM_BOOTING) {
3945                 __build_all_zonelists(NULL);
3946                 mminit_verify_zonelist();
3947                 cpuset_init_current_mems_allowed();
3948         } else {
3949 #ifdef CONFIG_MEMORY_HOTPLUG
3950                 if (zone)
3951                         setup_zone_pageset(zone);
3952 #endif
3953                 /* we have to stop all cpus to guarantee there is no user
3954                    of zonelist */
3955                 stop_machine(__build_all_zonelists, pgdat, NULL);
3956                 /* cpuset refresh routine should be here */
3957         }
3958         vm_total_pages = nr_free_pagecache_pages();
3959         /*
3960          * Disable grouping by mobility if the number of pages in the
3961          * system is too low to allow the mechanism to work. It would be
3962          * more accurate, but expensive to check per-zone. This check is
3963          * made on memory-hotadd so a system can start with mobility
3964          * disabled and enable it later
3965          */
3966         if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3967                 page_group_by_mobility_disabled = 1;
3968         else
3969                 page_group_by_mobility_disabled = 0;
3970 
3971         pr_info("Built %i zonelists in %s order, mobility grouping %s.  "
3972                 "Total pages: %ld\n",
3973                         nr_online_nodes,
3974                         zonelist_order_name[current_zonelist_order],
3975                         page_group_by_mobility_disabled ? "off" : "on",
3976                         vm_total_pages);
3977 #ifdef CONFIG_NUMA
3978         pr_info("Policy zone: %s\n", zone_names[policy_zone]);
3979 #endif
3980 }
3981 
3982 /*
3983  * Helper functions to size the waitqueue hash table.
3984  * Essentially these want to choose hash table sizes sufficiently
3985  * large so that collisions trying to wait on pages are rare.
3986  * But in fact, the number of active page waitqueues on typical
3987  * systems is ridiculously low, less than 200. So this is even
3988  * conservative, even though it seems large.
3989  *
3990  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3991  * waitqueues, i.e. the size of the waitq table given the number of pages.
3992  */
3993 #define PAGES_PER_WAITQUEUE     256
3994 
3995 #ifndef CONFIG_MEMORY_HOTPLUG
3996 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3997 {
3998         unsigned long size = 1;
3999 
4000         pages /= PAGES_PER_WAITQUEUE;
4001 
4002         while (size < pages)
4003                 size <<= 1;
4004 
4005         /*
4006          * Once we have dozens or even hundreds of threads sleeping
4007          * on IO we've got bigger problems than wait queue collision.
4008          * Limit the size of the wait table to a reasonable size.
4009          */
4010         size = min(size, 4096UL);
4011 
4012         return max(size, 4UL);
4013 }
4014 #else
4015 /*
4016  * A zone's size might be changed by hot-add, so it is not possible to determine
4017  * a suitable size for its wait_table.  So we use the maximum size now.
4018  *
4019  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
4020  *
4021  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
4022  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
4023  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
4024  *
4025  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
4026  * or more by the traditional way. (See above).  It equals:
4027  *
4028  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
4029  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
4030  *    powerpc (64K page size)             : =  (32G +16M)byte.
4031  */
4032 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
4033 {
4034         return 4096UL;
4035 }
4036 #endif
4037 
4038 /*
4039  * This is an integer logarithm so that shifts can be used later
4040  * to extract the more random high bits from the multiplicative
4041  * hash function before the remainder is taken.
4042  */
4043 static inline unsigned long wait_table_bits(unsigned long size)
4044 {
4045         return ffz(~size);
4046 }
4047 
4048 /*
4049  * Check if a pageblock contains reserved pages
4050  */
4051 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
4052 {
4053         unsigned long pfn;
4054 
4055         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4056                 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
4057                         return 1;
4058         }
4059         return 0;
4060 }
4061 
4062 /*
4063  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
4064  * of blocks reserved is based on min_wmark_pages(zone). The memory within
4065  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
4066  * higher will lead to a bigger reserve which will get freed as contiguous
4067  * blocks as reclaim kicks in
4068  */
4069 static void setup_zone_migrate_reserve(struct zone *zone)
4070 {
4071         unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
4072         struct page *page;
4073         unsigned long block_migratetype;
4074         int reserve;
4075         int old_reserve;
4076 
4077         /*
4078          * Get the start pfn, end pfn and the number of blocks to reserve
4079          * We have to be careful to be aligned to pageblock_nr_pages to
4080          * make sure that we always check pfn_valid for the first page in
4081          * the block.
4082          */
4083         start_pfn = zone->zone_start_pfn;
4084         end_pfn = zone_end_pfn(zone);
4085         start_pfn = roundup(start_pfn, pageblock_nr_pages);
4086         reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
4087                                                         pageblock_order;
4088 
4089         /*
4090          * Reserve blocks are generally in place to help high-order atomic
4091          * allocations that are short-lived. A min_free_kbytes value that
4092          * would result in more than 2 reserve blocks for atomic allocations
4093          * is assumed to be in place to help anti-fragmentation for the
4094          * future allocation of hugepages at runtime.
4095          */
4096         reserve = min(2, reserve);
4097         old_reserve = zone->nr_migrate_reserve_block;
4098 
4099         /* When memory hot-add, we almost always need to do nothing */
4100         if (reserve == old_reserve)
4101                 return;
4102         zone->nr_migrate_reserve_block = reserve;
4103 
4104         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
4105                 if (!pfn_valid(pfn))
4106                         continue;
4107                 page = pfn_to_page(pfn);
4108 
4109                 /* Watch out for overlapping nodes */
4110                 if (page_to_nid(page) != zone_to_nid(zone))
4111                         continue;
4112 
4113                 block_migratetype = get_pageblock_migratetype(page);
4114 
4115                 /* Only test what is necessary when the reserves are not met */
4116                 if (reserve > 0) {
4117                         /*
4118                          * Blocks with reserved pages will never free, skip
4119                          * them.
4120                          */
4121                         block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
4122                         if (pageblock_is_reserved(pfn, block_end_pfn))
4123                                 continue;
4124 
4125                         /* If this block is reserved, account for it */
4126                         if (block_migratetype == MIGRATE_RESERVE) {
4127                                 reserve--;
4128                                 continue;
4129                         }
4130 
4131                         /* Suitable for reserving if this block is movable */
4132                         if (block_migratetype == MIGRATE_MOVABLE) {
4133                                 set_pageblock_migratetype(page,
4134                                                         MIGRATE_RESERVE);
4135                                 move_freepages_block(zone, page,
4136                                                         MIGRATE_RESERVE);
4137                                 reserve--;
4138                                 continue;
4139                         }
4140                 } else if (!old_reserve) {
4141                         /*
4142                          * At boot time we don't need to scan the whole zone
4143                          * for turning off MIGRATE_RESERVE.
4144                          */
4145                         break;
4146                 }
4147 
4148                 /*
4149                  * If the reserve is met and this is a previous reserved block,
4150                  * take it back
4151                  */
4152                 if (block_migratetype == MIGRATE_RESERVE) {
4153                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4154                         move_freepages_block(zone, page, MIGRATE_MOVABLE);
4155                 }
4156         }
4157 }
4158 
4159 /*
4160  * Initially all pages are reserved - free ones are freed
4161  * up by free_all_bootmem() once the early boot process is
4162  * done. Non-atomic initialization, single-pass.
4163  */
4164 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4165                 unsigned long start_pfn, enum memmap_context context)
4166 {
4167         struct page *page;
4168         unsigned long end_pfn = start_pfn + size;
4169         unsigned long pfn;
4170         struct zone *z;
4171 
4172         if (highest_memmap_pfn < end_pfn - 1)
4173                 highest_memmap_pfn = end_pfn - 1;
4174 
4175         z = &NODE_DATA(nid)->node_zones[zone];
4176         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
4177                 /*
4178                  * There can be holes in boot-time mem_map[]s
4179                  * handed to this function.  They do not
4180                  * exist on hotplugged memory.
4181                  */
4182                 if (context == MEMMAP_EARLY) {
4183                         if (!early_pfn_valid(pfn))
4184                                 continue;
4185                         if (!early_pfn_in_nid(pfn, nid))
4186                                 continue;
4187                 }
4188                 page = pfn_to_page(pfn);
4189                 set_page_links(page, zone, nid, pfn);
4190                 mminit_verify_page_links(page, zone, nid, pfn);
4191                 init_page_count(page);
4192                 page_mapcount_reset(page);
4193                 page_cpupid_reset_last(page);
4194                 SetPageReserved(page);
4195                 /*
4196                  * Mark the block movable so that blocks are reserved for
4197                  * movable at startup. This will force kernel allocations
4198                  * to reserve their blocks rather than leaking throughout
4199                  * the address space during boot when many long-lived
4200                  * kernel allocations are made. Later some blocks near
4201                  * the start are marked MIGRATE_RESERVE by
4202                  * setup_zone_migrate_reserve()
4203                  *
4204                  * bitmap is created for zone's valid pfn range. but memmap
4205                  * can be created for invalid pages (for alignment)
4206                  * check here not to call set_pageblock_migratetype() against
4207                  * pfn out of zone.
4208                  */
4209                 if ((z->zone_start_pfn <= pfn)
4210                     && (pfn < zone_end_pfn(z))
4211                     && !(pfn & (pageblock_nr_pages - 1)))
4212                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4213 
4214                 INIT_LIST_HEAD(&page->lru);
4215 #ifdef WANT_PAGE_VIRTUAL
4216                 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
4217                 if (!is_highmem_idx(zone))
4218                         set_page_address(page, __va(pfn << PAGE_SHIFT));
4219 #endif
4220         }
4221 }
4222 
4223 static void __meminit zone_init_free_lists(struct zone *zone)
4224 {
4225         unsigned int order, t;
4226         for_each_migratetype_order(order, t) {
4227                 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
4228                 zone->free_area[order].nr_free = 0;
4229         }
4230 }
4231 
4232 #ifndef __HAVE_ARCH_MEMMAP_INIT
4233 #define memmap_init(size, nid, zone, start_pfn) \
4234         memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
4235 #endif
4236 
4237 static int zone_batchsize(struct zone *zone)
4238 {
4239 #ifdef CONFIG_MMU
4240         int batch;
4241 
4242         /*
4243          * The per-cpu-pages pools are set to around 1000th of the
4244          * size of the zone.  But no more than 1/2 of a meg.
4245          *
4246          * OK, so we don't know how big the cache is.  So guess.
4247          */
4248         batch = zone->managed_pages / 1024;
4249         if (batch * PAGE_SIZE > 512 * 1024)
4250                 batch = (512 * 1024) / PAGE_SIZE;
4251         batch /= 4;             /* We effectively *= 4 below */
4252         if (batch < 1)
4253                 batch = 1;
4254 
4255         /*
4256          * Clamp the batch to a 2^n - 1 value. Having a power
4257          * of 2 value was found to be more likely to have
4258          * suboptimal cache aliasing properties in some cases.
4259          *
4260          * For example if 2 tasks are alternately allocating
4261          * batches of pages, one task can end up with a lot
4262          * of pages of one half of the possible page colors
4263          * and the other with pages of the other colors.
4264          */
4265         batch = rounddown_pow_of_two(batch + batch/2) - 1;
4266 
4267         return batch;
4268 
4269 #else
4270         /* The deferral and batching of frees should be suppressed under NOMMU
4271          * conditions.
4272          *
4273          * The problem is that NOMMU needs to be able to allocate large chunks
4274          * of contiguous memory as there's no hardware page translation to
4275          * assemble apparent contiguous memory from discontiguous pages.
4276          *
4277          * Queueing large contiguous runs of pages for batching, however,
4278          * causes the pages to actually be freed in smaller chunks.  As there
4279          * can be a significant delay between the individual batches being
4280          * recycled, this leads to the once large chunks of space being
4281          * fragmented and becoming unavailable for high-order allocations.
4282          */
4283         return 0;
4284 #endif
4285 }
4286 
4287 /*
4288  * pcp->high and pcp->batch values are related and dependent on one another:
4289  * ->batch must never be higher then ->high.
4290  * The following function updates them in a safe manner without read side
4291  * locking.
4292  *
4293  * Any new users of pcp->batch and pcp->high should ensure they can cope with
4294  * those fields changing asynchronously (acording the the above rule).
4295  *
4296  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
4297  * outside of boot time (or some other assurance that no concurrent updaters
4298  * exist).
4299  */
4300 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
4301                 unsigned long batch)
4302 {
4303        /* start with a fail safe value for batch */
4304         pcp->batch = 1;
4305         smp_wmb();
4306 
4307        /* Update high, then batch, in order */
4308         pcp->high = high;
4309         smp_wmb();
4310 
4311         pcp->batch = batch;
4312 }
4313 
4314 /* a companion to pageset_set_high() */
4315 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
4316 {
4317         pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
4318 }
4319 
4320 static void pageset_init(struct per_cpu_pageset *p)
4321 {
4322         struct per_cpu_pages *pcp;
4323         int migratetype;
4324 
4325         memset(p, 0, sizeof(*p));
4326 
4327         pcp = &p->pcp;
4328         pcp->count = 0;
4329         for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
4330                 INIT_LIST_HEAD(&pcp->lists[migratetype]);
4331 }
4332 
4333 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
4334 {
4335         pageset_init(p);
4336         pageset_set_batch(p, batch);
4337 }
4338 
4339 /*
4340  * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
4341  * to the value high for the pageset p.
4342  */
4343 static void pageset_set_high(struct per_cpu_pageset *p,
4344                                 unsigned long high)
4345 {
4346         unsigned long batch = max(1UL, high / 4);
4347         if ((high / 4) > (PAGE_SHIFT * 8))
4348                 batch = PAGE_SHIFT * 8;
4349 
4350         pageset_update(&p->pcp, high, batch);
4351 }
4352 
4353 static void pageset_set_high_and_batch(struct zone *zone,
4354                                        struct per_cpu_pageset *pcp)
4355 {
4356         if (percpu_pagelist_fraction)
4357                 pageset_set_high(pcp,
4358                         (zone->managed_pages /
4359                                 percpu_pagelist_fraction));
4360         else
4361                 pageset_set_batch(pcp, zone_batchsize(zone));
4362 }
4363 
4364 static void __meminit zone_pageset_init(struct zone *zone, int cpu)
4365 {
4366         struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
4367 
4368         pageset_init(pcp);
4369         pageset_set_high_and_batch(zone, pcp);
4370 }
4371 
4372 static void __meminit setup_zone_pageset(struct zone *zone)
4373 {
4374         int cpu;
4375         zone->pageset = alloc_percpu(struct per_cpu_pageset);
4376         for_each_possible_cpu(cpu)
4377                 zone_pageset_init(zone, cpu);
4378 }
4379 
4380 /*
4381  * Allocate per cpu pagesets and initialize them.
4382  * Before this call only boot pagesets were available.
4383  */
4384 void __init setup_per_cpu_pageset(void)
4385 {
4386         struct zone *zone;
4387 
4388         for_each_populated_zone(zone)
4389                 setup_zone_pageset(zone);
4390 }
4391 
4392 static noinline __init_refok
4393 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
4394 {
4395         int i;
4396         size_t alloc_size;
4397 
4398         /*
4399          * The per-page waitqueue mechanism uses hashed waitqueues
4400          * per zone.
4401          */
4402         zone->wait_table_hash_nr_entries =
4403                  wait_table_hash_nr_entries(zone_size_pages);
4404         zone->wait_table_bits =
4405                 wait_table_bits(zone->wait_table_hash_nr_entries);
4406         alloc_size = zone->wait_table_hash_nr_entries
4407                                         * sizeof(wait_queue_head_t);
4408 
4409         if (!slab_is_available()) {
4410                 zone->wait_table = (wait_queue_head_t *)
4411                         memblock_virt_alloc_node_nopanic(
4412                                 alloc_size, zone->zone_pgdat->node_id);
4413         } else {
4414                 /*
4415                  * This case means that a zone whose size was 0 gets new memory
4416                  * via memory hot-add.
4417                  * But it may be the case that a new node was hot-added.  In
4418                  * this case vmalloc() will not be able to use this new node's
4419                  * memory - this wait_table must be initialized to use this new
4420                  * node itself as well.
4421                  * To use this new node's memory, further consideration will be
4422                  * necessary.
4423                  */
4424                 zone->wait_table = vmalloc(alloc_size);
4425         }
4426         if (!zone->wait_table)
4427                 return -ENOMEM;
4428 
4429         for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
4430                 init_waitqueue_head(zone->wait_table + i);
4431 
4432         return 0;
4433 }
4434 
4435 static __meminit void zone_pcp_init(struct zone *zone)
4436 {
4437         /*
4438          * per cpu subsystem is not up at this point. The following code
4439          * relies on the ability of the linker to provide the
4440          * offset of a (static) per cpu variable into the per cpu area.
4441          */
4442         zone->pageset = &boot_pageset;
4443 
4444         if (populated_zone(zone))
4445                 printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
4446                         zone->name, zone->present_pages,
4447                                          zone_batchsize(zone));
4448 }
4449 
4450 int __meminit init_currently_empty_zone(struct zone *zone,
4451                                         unsigned long zone_start_pfn,
4452                                         unsigned long size,
4453                                         enum memmap_context context)
4454 {
4455         struct pglist_data *pgdat = zone->zone_pgdat;
4456         int ret;
4457         ret = zone_wait_table_init(zone, size);
4458         if (ret)
4459                 return ret;
4460         pgdat->nr_zones = zone_idx(zone) + 1;
4461 
4462         zone->zone_start_pfn = zone_start_pfn;
4463 
4464         mminit_dprintk(MMINIT_TRACE, "memmap_init",
4465                         "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4466                         pgdat->node_id,
4467                         (unsigned long)zone_idx(zone),
4468                         zone_start_pfn, (zone_start_pfn + size));
4469 
4470         zone_init_free_lists(zone);
4471 
4472         return 0;
4473 }
4474 
4475 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4476 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4477 /*
4478  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4479  */
4480 int __meminit __early_pfn_to_nid(unsigned long pfn)
4481 {
4482         unsigned long start_pfn, end_pfn;
4483         int nid;
4484         /*
4485          * NOTE: The following SMP-unsafe globals are only used early in boot
4486          * when the kernel is running single-threaded.
4487          */
4488         static unsigned long __meminitdata last_start_pfn, last_end_pfn;
4489         static int __meminitdata last_nid;
4490 
4491         if (last_start_pfn <= pfn && pfn < last_end_pfn)
4492                 return last_nid;
4493 
4494         nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
4495         if (nid != -1) {
4496                 last_start_pfn = start_pfn;
4497                 last_end_pfn = end_pfn;
4498                 last_nid = nid;
4499         }
4500 
4501         return nid;
4502 }
4503 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4504 
4505 int __meminit early_pfn_to_nid(unsigned long pfn)
4506 {
4507         int nid;
4508 
4509         nid = __early_pfn_to_nid(pfn);
4510         if (nid >= 0)
4511                 return nid;
4512         /* just returns 0 */
4513         return 0;
4514 }
4515 
4516 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
4517 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4518 {
4519         int nid;
4520 
4521         nid = __early_pfn_to_nid(pfn);
4522         if (nid >= 0 && nid != node)
4523                 return false;
4524         return true;
4525 }
4526 #endif
4527 
4528 /**
4529  * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
4530  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4531  * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
4532  *
4533  * If an architecture guarantees that all ranges registered contain no holes
4534  * and may be freed, this this function may be used instead of calling
4535  * memblock_free_early_nid() manually.
4536  */
4537 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4538 {
4539         unsigned long start_pfn, end_pfn;
4540         int i, this_nid;
4541 
4542         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4543                 start_pfn = min(start_pfn, max_low_pfn);
4544                 end_pfn = min(end_pfn, max_low_pfn);
4545 
4546                 if (start_pfn < end_pfn)
4547                         memblock_free_early_nid(PFN_PHYS(start_pfn),
4548                                         (end_pfn - start_pfn) << PAGE_SHIFT,
4549                                         this_nid);
4550         }
4551 }
4552 
4553 /**
4554  * sparse_memory_present_with_active_regions - Call memory_present for each active range
4555  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
4556  *
4557  * If an architecture guarantees that all ranges registered contain no holes and may
4558  * be freed, this function may be used instead of calling memory_present() manually.
4559  */
4560 void __init sparse_memory_present_with_active_regions(int nid)
4561 {
4562         unsigned long start_pfn, end_pfn;
4563         int i, this_nid;
4564 
4565         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4566                 memory_present(this_nid, start_pfn, end_pfn);
4567 }
4568 
4569 /**
4570  * get_pfn_range_for_nid - Return the start and end page frames for a node
4571  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4572  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4573  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
4574  *
4575  * It returns the start and end page frame of a node based on information
4576  * provided by memblock_set_node(). If called for a node
4577  * with no available memory, a warning is printed and the start and end
4578  * PFNs will be 0.
4579  */
4580 void __meminit get_pfn_range_for_nid(unsigned int nid,
4581                         unsigned long *start_pfn, unsigned long *end_pfn)
4582 {
4583         unsigned long this_start_pfn, this_end_pfn;
4584         int i;
4585 
4586         *start_pfn = -1UL;
4587         *end_pfn = 0;
4588 
4589         for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4590                 *start_pfn = min(*start_pfn, this_start_pfn);
4591                 *end_pfn = max(*end_pfn, this_end_pfn);
4592         }
4593 
4594         if (*start_pfn == -1UL)
4595                 *start_pfn = 0;
4596 }
4597 
4598 /*
4599  * This finds a zone that can be used for ZONE_MOVABLE pages. The
4600  * assumption is made that zones within a node are ordered in monotonic
4601  * increasing memory addresses so that the "highest" populated zone is used
4602  */
4603 static void __init find_usable_zone_for_movable(void)
4604 {
4605         int zone_index;
4606         for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4607                 if (zone_index == ZONE_MOVABLE)
4608                         continue;
4609 
4610                 if (arch_zone_highest_possible_pfn[zone_index] >
4611                                 arch_zone_lowest_possible_pfn[zone_index])
4612                         break;
4613         }
4614 
4615         VM_BUG_ON(zone_index == -1);
4616         movable_zone = zone_index;
4617 }
4618 
4619 /*
4620  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4621  * because it is sized independent of architecture. Unlike the other zones,
4622  * the starting point for ZONE_MOVABLE is not fixed. It may be different
4623  * in each node depending on the size of each node and how evenly kernelcore
4624  * is distributed. This helper function adjusts the zone ranges
4625  * provided by the architecture for a given node by using the end of the
4626  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4627  * zones within a node are in order of monotonic increases memory addresses
4628  */
4629 static void __meminit adjust_zone_range_for_zone_movable(int nid,
4630                                         unsigned long zone_type,
4631                                         unsigned long node_start_pfn,
4632                                         unsigned long node_end_pfn,
4633                                         unsigned long *zone_start_pfn,
4634                                         unsigned long *zone_end_pfn)
4635 {
4636         /* Only adjust if ZONE_MOVABLE is on this node */
4637         if (zone_movable_pfn[nid]) {
4638                 /* Size ZONE_MOVABLE */
4639                 if (zone_type == ZONE_MOVABLE) {
4640                         *zone_start_pfn = zone_movable_pfn[nid];
4641                         *zone_end_pfn = min(node_end_pfn,
4642                                 arch_zone_highest_possible_pfn[movable_zone]);
4643 
4644                 /* Adjust for ZONE_MOVABLE starting within this range */
4645                 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4646                                 *zone_end_pfn > zone_movable_pfn[nid]) {
4647                         *zone_end_pfn = zone_movable_pfn[nid];
4648 
4649                 /* Check if this whole range is within ZONE_MOVABLE */
4650                 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4651                         *zone_start_pfn = *zone_end_pfn;
4652         }
4653 }
4654 
4655 /*
4656  * Return the number of pages a zone spans in a node, including holes
4657  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4658  */
4659 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4660                                         unsigned long zone_type,
4661                                         unsigned long node_start_pfn,
4662                                         unsigned long node_end_pfn,
4663                                         unsigned long *ignored)
4664 {
4665         unsigned long zone_start_pfn, zone_end_pfn;
4666 
4667         /* Get the start and end of the zone */
4668         zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4669         zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4670         adjust_zone_range_for_zone_movable(nid, zone_type,
4671                                 node_start_pfn, node_end_pfn,
4672                                 &zone_start_pfn, &zone_end_pfn);
4673 
4674         /* Check that this node has pages within the zone's required range */
4675         if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4676                 return 0;
4677 
4678         /* Move the zone boundaries inside the node if necessary */
4679         zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4680         zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4681 
4682         /* Return the spanned pages */
4683         return zone_end_pfn - zone_start_pfn;
4684 }
4685 
4686 /*
4687  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4688  * then all holes in the requested range will be accounted for.
4689  */
4690 unsigned long __meminit __absent_pages_in_range(int nid,
4691                                 unsigned long range_start_pfn,
4692                                 unsigned long range_end_pfn)
4693 {
4694         unsigned long nr_absent = range_end_pfn - range_start_pfn;
4695         unsigned long start_pfn, end_pfn;
4696         int i;
4697 
4698         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4699                 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4700                 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4701                 nr_absent -= end_pfn - start_pfn;
4702         }
4703         return nr_absent;
4704 }
4705 
4706 /**
4707  * absent_pages_in_range - Return number of page frames in holes within a range
4708  * @start_pfn: The start PFN to start searching for holes
4709  * @end_pfn: The end PFN to stop searching for holes
4710  *
4711  * It returns the number of pages frames in memory holes within a range.
4712  */
4713 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4714                                                         unsigned long end_pfn)
4715 {
4716         return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4717 }
4718 
4719 /* Return the number of page frames in holes in a zone on a node */
4720 static unsigned long __meminit zone_absent_pages_in_node(int nid,
4721                                         unsigned long zone_type,
4722                                         unsigned long node_start_pfn,
4723                                         unsigned long node_end_pfn,
4724                                         unsigned long *ignored)
4725 {
4726         unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4727         unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
4728         unsigned long zone_start_pfn, zone_end_pfn;
4729 
4730         zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4731         zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
4732 
4733         adjust_zone_range_for_zone_movable(nid, zone_type,
4734                         node_start_pfn, node_end_pfn,
4735                         &zone_start_pfn, &zone_end_pfn);
4736         return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4737 }
4738 
4739 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4740 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4741                                         unsigned long zone_type,
4742                                         unsigned long node_start_pfn,
4743                                         unsigned long node_end_pfn,
4744                                         unsigned long *zones_size)
4745 {
4746         return zones_size[zone_type];
4747 }
4748 
4749 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4750                                                 unsigned long zone_type,
4751                                                 unsigned long node_start_pfn,
4752                                                 unsigned long node_end_pfn,
4753                                                 unsigned long *zholes_size)
4754 {
4755         if (!zholes_size)
4756                 return 0;
4757 
4758         return zholes_size[zone_type];
4759 }
4760 
4761 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4762 
4763 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4764                                                 unsigned long node_start_pfn,
4765                                                 unsigned long node_end_pfn,
4766                                                 unsigned long *zones_size,
4767                                                 unsigned long *zholes_size)
4768 {
4769         unsigned long realtotalpages, totalpages = 0;
4770         enum zone_type i;
4771 
4772         for (i = 0; i < MAX_NR_ZONES; i++)
4773                 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4774                                                          node_start_pfn,
4775                                                          node_end_pfn,
4776                                                          zones_size);
4777         pgdat->node_spanned_pages = totalpages;
4778 
4779         realtotalpages = totalpages;
4780         for (i = 0; i < MAX_NR_ZONES; i++)
4781                 realtotalpages -=
4782                         zone_absent_pages_in_node(pgdat->node_id, i,
4783                                                   node_start_pfn, node_end_pfn,
4784                                                   zholes_size);
4785         pgdat->node_present_pages = realtotalpages;
4786         printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4787                                                         realtotalpages);
4788 }
4789 
4790 #ifndef CONFIG_SPARSEMEM
4791 /*
4792  * Calculate the size of the zone->blockflags rounded to an unsigned long
4793  * Start by making sure zonesize is a multiple of pageblock_order by rounding
4794  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4795  * round what is now in bits to nearest long in bits, then return it in
4796  * bytes.
4797  */
4798 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
4799 {
4800         unsigned long usemapsize;
4801 
4802         zonesize += zone_start_pfn & (pageblock_nr_pages-1);
4803         usemapsize = roundup(zonesize, pageblock_nr_pages);
4804         usemapsize = usemapsize >> pageblock_order;
4805         usemapsize *= NR_PAGEBLOCK_BITS;
4806         usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4807 
4808         return usemapsize / 8;
4809 }
4810 
4811 static void __init setup_usemap(struct pglist_data *pgdat,
4812                                 struct zone *zone,
4813                                 unsigned long zone_start_pfn,
4814                                 unsigned long zonesize)
4815 {
4816         unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
4817         zone->pageblock_flags = NULL;
4818         if (usemapsize)
4819                 zone->pageblock_flags =
4820                         memblock_virt_alloc_node_nopanic(usemapsize,
4821                                                          pgdat->node_id);
4822 }
4823 #else
4824 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4825                                 unsigned long zone_start_pfn, unsigned long zonesize) {}
4826 #endif /* CONFIG_SPARSEMEM */
4827 
4828 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4829 
4830 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4831 void __paginginit set_pageblock_order(void)
4832 {
4833         unsigned int order;
4834 
4835         /* Check that pageblock_nr_pages has not already been setup */
4836         if (pageblock_order)
4837                 return;
4838 
4839         if (HPAGE_SHIFT > PAGE_SHIFT)
4840                 order = HUGETLB_PAGE_ORDER;
4841         else
4842                 order = MAX_ORDER - 1;
4843 
4844         /*
4845          * Assume the largest contiguous order of interest is a huge page.
4846          * This value may be variable depending on boot parameters on IA64 and
4847          * powerpc.
4848          */
4849         pageblock_order = order;
4850 }
4851 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4852 
4853 /*
4854  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4855  * is unused as pageblock_order is set at compile-time. See
4856  * include/linux/pageblock-flags.h for the values of pageblock_order based on
4857  * the kernel config
4858  */
4859 void __paginginit set_pageblock_order(void)
4860 {
4861 }
4862 
4863 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4864 
4865 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
4866                                                    unsigned long present_pages)
4867 {
4868         unsigned long pages = spanned_pages;
4869 
4870         /*
4871          * Provide a more accurate estimation if there are holes within
4872          * the zone and SPARSEMEM is in use. If there are holes within the
4873          * zone, each populated memory region may cost us one or two extra
4874          * memmap pages due to alignment because memmap pages for each
4875          * populated regions may not naturally algined on page boundary.
4876          * So the (present_pages >> 4) heuristic is a tradeoff for that.
4877          */
4878         if (spanned_pages > present_pages + (present_pages >> 4) &&
4879             IS_ENABLED(CONFIG_SPARSEMEM))
4880                 pages = present_pages;
4881 
4882         return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
4883 }
4884 
4885 /*
4886  * Set up the zone data structures:
4887  *   - mark all pages reserved
4888  *   - mark all memory queues empty
4889  *   - clear the memory bitmaps
4890  *
4891  * NOTE: pgdat should get zeroed by caller.
4892  */
4893 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4894                 unsigned long node_start_pfn, unsigned long node_end_pfn,
4895                 unsigned long *zones_size, unsigned long *zholes_size)
4896 {
4897         enum zone_type j;
4898         int nid = pgdat->node_id;
4899         unsigned long zone_start_pfn = pgdat->node_start_pfn;
4900         int ret;
4901 
4902         pgdat_resize_init(pgdat);
4903 #ifdef CONFIG_NUMA_BALANCING
4904         spin_lock_init(&pgdat->numabalancing_migrate_lock);
4905         pgdat->numabalancing_migrate_nr_pages = 0;
4906         pgdat->numabalancing_migrate_next_window = jiffies;
4907 #endif
4908         init_waitqueue_head(&pgdat->kswapd_wait);
4909         init_waitqueue_head(&pgdat->pfmemalloc_wait);
4910         pgdat_page_ext_init(pgdat);
4911 
4912         for (j = 0; j < MAX_NR_ZONES; j++) {
4913                 struct zone *zone = pgdat->node_zones + j;
4914                 unsigned long size, realsize, freesize, memmap_pages;
4915 
4916                 size = zone_spanned_pages_in_node(nid, j, node_start_pfn,
4917                                                   node_end_pfn, zones_size);
4918                 realsize = freesize = size - zone_absent_pages_in_node(nid, j,
4919                                                                 node_start_pfn,
4920                                                                 node_end_pfn,
4921                                                                 zholes_size);
4922 
4923                 /*
4924                  * Adjust freesize so that it accounts for how much memory
4925                  * is used by this zone for memmap. This affects the watermark
4926                  * and per-cpu initialisations
4927                  */
4928                 memmap_pages = calc_memmap_size(size, realsize);
4929                 if (!is_highmem_idx(j)) {
4930                         if (freesize >= memmap_pages) {
4931                                 freesize -= memmap_pages;
4932                                 if (memmap_pages)
4933                                         printk(KERN_DEBUG
4934                                                "  %s zone: %lu pages used for memmap\n",
4935                                                zone_names[j], memmap_pages);
4936                         } else
4937                                 printk(KERN_WARNING
4938                                         "  %s zone: %lu pages exceeds freesize %lu\n",
4939                                         zone_names[j], memmap_pages, freesize);
4940                 }
4941 
4942                 /* Account for reserved pages */
4943                 if (j == 0 && freesize > dma_reserve) {
4944                         freesize -= dma_reserve;
4945                         printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4946                                         zone_names[0], dma_reserve);
4947                 }
4948 
4949                 if (!is_highmem_idx(j))
4950                         nr_kernel_pages += freesize;
4951                 /* Charge for highmem memmap if there are enough kernel pages */
4952                 else if (nr_kernel_pages > memmap_pages * 2)
4953                         nr_kernel_pages -= memmap_pages;
4954                 nr_all_pages += freesize;
4955 
4956                 zone->spanned_pages = size;
4957                 zone->present_pages = realsize;
4958                 /*
4959                  * Set an approximate value for lowmem here, it will be adjusted
4960                  * when the bootmem allocator frees pages into the buddy system.
4961                  * And all highmem pages will be managed by the buddy system.
4962                  */
4963                 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
4964 #ifdef CONFIG_NUMA
4965                 zone->node = nid;
4966                 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
4967                                                 / 100;
4968                 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
4969 #endif
4970                 zone->name = zone_names[j];
4971                 spin_lock_init(&zone->lock);
4972                 spin_lock_init(&zone->lru_lock);
4973                 zone_seqlock_init(zone);
4974                 zone->zone_pgdat = pgdat;
4975                 zone_pcp_init(zone);
4976 
4977                 /* For bootup, initialized properly in watermark setup */
4978                 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
4979 
4980                 lruvec_init(&zone->lruvec);
4981                 if (!size)
4982                         continue;
4983 
4984                 set_pageblock_order();
4985                 setup_usemap(pgdat, zone, zone_start_pfn, size);
4986                 ret = init_currently_empty_zone(zone, zone_start_pfn,
4987                                                 size, MEMMAP_EARLY);
4988                 BUG_ON(ret);
4989                 memmap_init(size, nid, j, zone_start_pfn);
4990                 zone_start_pfn += size;
4991         }
4992 }
4993 
4994 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4995 {
4996         /* Skip empty nodes */
4997         if (!pgdat->node_spanned_pages)
4998                 return;
4999 
5000 #ifdef CONFIG_FLAT_NODE_MEM_MAP
5001         /* ia64 gets its own node_mem_map, before this, without bootmem */
5002         if (!pgdat->node_mem_map) {
5003                 unsigned long size, start, end;
5004                 struct page *map;
5005 
5006                 /*
5007                  * The zone's endpoints aren't required to be MAX_ORDER
5008                  * aligned but the node_mem_map endpoints must be in order
5009                  * for the buddy allocator to function correctly.
5010                  */
5011                 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
5012                 end = pgdat_end_pfn(pgdat);
5013                 end = ALIGN(end, MAX_ORDER_NR_PAGES);
5014                 size =  (end - start) * sizeof(struct page);
5015                 map = alloc_remap(pgdat->node_id, size);
5016                 if (!map)
5017                         map = memblock_virt_alloc_node_nopanic(size,
5018                                                                pgdat->node_id);
5019                 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
5020         }
5021 #ifndef CONFIG_NEED_MULTIPLE_NODES
5022         /*
5023          * With no DISCONTIG, the global mem_map is just set as node 0's
5024          */
5025         if (pgdat == NODE_DATA(0)) {
5026                 mem_map = NODE_DATA(0)->node_mem_map;
5027 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5028                 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
5029                         mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
5030 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5031         }
5032 #endif
5033 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
5034 }
5035 
5036 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
5037                 unsigned long node_start_pfn, unsigned long *zholes_size)
5038 {
5039         pg_data_t *pgdat = NODE_DATA(nid);
5040         unsigned long start_pfn = 0;
5041         unsigned long end_pfn = 0;
5042 
5043         /* pg_data_t should be reset to zero when it's allocated */
5044         WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
5045 
5046         pgdat->node_id = nid;
5047         pgdat->node_start_pfn = node_start_pfn;
5048 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5049         get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
5050         printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
5051                         (u64) start_pfn << PAGE_SHIFT, (u64) (end_pfn << PAGE_SHIFT) - 1);
5052 #endif
5053         calculate_node_totalpages(pgdat, start_pfn, end_pfn,
5054                                   zones_size, zholes_size);
5055 
5056         alloc_node_mem_map(pgdat);
5057 #ifdef CONFIG_FLAT_NODE_MEM_MAP
5058         printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
5059                 nid, (unsigned long)pgdat,
5060                 (unsigned long)pgdat->node_mem_map);
5061 #endif
5062 
5063         free_area_init_core(pgdat, start_pfn, end_pfn,
5064                             zones_size, zholes_size);
5065 }
5066 
5067 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5068 
5069 #if MAX_NUMNODES > 1
5070 /*
5071  * Figure out the number of possible node ids.
5072  */
5073 void __init setup_nr_node_ids(void)
5074 {
5075         unsigned int node;
5076         unsigned int highest = 0;
5077 
5078         for_each_node_mask(node, node_possible_map)
5079                 highest = node;
5080         nr_node_ids = highest + 1;
5081 }
5082 #endif
5083 
5084 /**
5085  * node_map_pfn_alignment - determine the maximum internode alignment
5086  *
5087  * This function should be called after node map is populated and sorted.
5088  * It calculates the maximum power of two alignment which can distinguish
5089  * all the nodes.
5090  *
5091  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
5092  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
5093  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
5094  * shifted, 1GiB is enough and this function will indicate so.
5095  *
5096  * This is used to test whether pfn -> nid mapping of the chosen memory
5097  * model has fine enough granularity to avoid incorrect mapping for the
5098  * populated node map.
5099  *
5100  * Returns the determined alignment in pfn's.  0 if there is no alignment
5101  * requirement (single node).
5102  */
5103 unsigned long __init node_map_pfn_alignment(void)
5104 {
5105         unsigned long accl_mask = 0, last_end = 0;
5106         unsigned long start, end, mask;
5107         int last_nid = -1;
5108         int i, nid;
5109 
5110         for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
5111                 if (!start || last_nid < 0 || last_nid == nid) {
5112                         last_nid = nid;
5113                         last_end = end;
5114                         continue;
5115                 }
5116 
5117                 /*
5118                  * Start with a mask granular enough to pin-point to the
5119                  * start pfn and tick off bits one-by-one until it becomes
5120                  * too coarse to separate the current node from the last.
5121                  */
5122                 mask = ~((1 << __ffs(start)) - 1);
5123                 while (mask && last_end <= (start & (mask << 1)))
5124                         mask <<= 1;
5125 
5126                 /* accumulate all internode masks */
5127                 accl_mask |= mask;
5128         }
5129 
5130         /* convert mask to number of pages */
5131         return ~accl_mask + 1;
5132 }
5133 
5134 /* Find the lowest pfn for a node */
5135 static unsigned long __init find_min_pfn_for_node(int nid)
5136 {
5137         unsigned long min_pfn = ULONG_MAX;
5138         unsigned long start_pfn;
5139         int i;
5140 
5141         for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
5142                 min_pfn = min(min_pfn, start_pfn);
5143 
5144         if (min_pfn == ULONG_MAX) {
5145                 printk(KERN_WARNING
5146                         "Could not find start_pfn for node %d\n", nid);
5147                 return 0;
5148         }
5149 
5150         return min_pfn;
5151 }
5152 
5153 /**
5154  * find_min_pfn_with_active_regions - Find the minimum PFN registered
5155  *
5156  * It returns the minimum PFN based on information provided via
5157  * memblock_set_node().
5158  */
5159 unsigned long __init find_min_pfn_with_active_regions(void)
5160 {
5161         return find_min_pfn_for_node(MAX_NUMNODES);
5162 }
5163 
5164 /*
5165  * early_calculate_totalpages()
5166  * Sum pages in active regions for movable zone.
5167  * Populate N_MEMORY for calculating usable_nodes.
5168  */
5169 static unsigned long __init early_calculate_totalpages(void)
5170 {
5171         unsigned long totalpages = 0;
5172         unsigned long start_pfn, end_pfn;
5173         int i, nid;
5174 
5175         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
5176                 unsigned long pages = end_pfn - start_pfn;
5177 
5178                 totalpages += pages;
5179                 if (pages)
5180                         node_set_state(nid, N_MEMORY);
5181         }
5182         return totalpages;
5183 }
5184 
5185 /*
5186  * Find the PFN the Movable zone begins in each node. Kernel memory
5187  * is spread evenly between nodes as long as the nodes have enough
5188  * memory. When they don't, some nodes will have more kernelcore than
5189  * others
5190  */
5191 static void __init find_zone_movable_pfns_for_nodes(void)
5192 {
5193         int i, nid;
5194         unsigned long usable_startpfn;
5195         unsigned long kernelcore_node, kernelcore_remaining;
5196         /* save the state before borrow the nodemask */
5197         nodemask_t saved_node_state = node_states[N_MEMORY];
5198         unsigned long totalpages = early_calculate_totalpages();
5199         int usable_nodes = nodes_weight(node_states[N_MEMORY]);
5200         struct memblock_region *r;
5201 
5202         /* Need to find movable_zone earlier when movable_node is specified. */
5203         find_usable_zone_for_movable();
5204 
5205         /*
5206          * If movable_node is specified, ignore kernelcore and movablecore
5207          * options.
5208          */
5209         if (movable_node_is_enabled()) {
5210                 for_each_memblock(memory, r) {
5211                         if (!memblock_is_hotpluggable(r))
5212                                 continue;
5213 
5214                         nid = r->nid;
5215 
5216                         usable_startpfn = PFN_DOWN(r->base);
5217                         zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
5218                                 min(usable_startpfn, zone_movable_pfn[nid]) :
5219                                 usable_startpfn;
5220                 }
5221 
5222                 goto out2;
5223         }
5224 
5225         /*
5226          * If movablecore=nn[KMG] was specified, calculate what size of
5227          * kernelcore that corresponds so that memory usable for
5228          * any allocation type is evenly spread. If both kernelcore
5229          * and movablecore are specified, then the value of kernelcore
5230          * will be used for required_kernelcore if it's greater than
5231          * what movablecore would have allowed.
5232          */
5233         if (required_movablecore) {
5234                 unsigned long corepages;
5235 
5236                 /*
5237                  * Round-up so that ZONE_MOVABLE is at least as large as what
5238                  * was requested by the user
5239                  */
5240                 required_movablecore =
5241                         roundup(required_movablecore, MAX_ORDER_NR_PAGES);
5242                 corepages = totalpages - required_movablecore;
5243 
5244                 required_kernelcore = max(required_kernelcore, corepages);
5245         }
5246 
5247         /* If kernelcore was not specified, there is no ZONE_MOVABLE */
5248         if (!required_kernelcore)
5249                 goto out;
5250 
5251         /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
5252         usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
5253 
5254 restart:
5255         /* Spread kernelcore memory as evenly as possible throughout nodes */
5256         kernelcore_node = required_kernelcore / usable_nodes;
5257         for_each_node_state(nid, N_MEMORY) {
5258                 unsigned long start_pfn, end_pfn;
5259 
5260                 /*
5261                  * Recalculate kernelcore_node if the division per node
5262                  * now exceeds what is necessary to satisfy the requested
5263                  * amount of memory for the kernel
5264                  */
5265                 if (required_kernelcore < kernelcore_node)
5266                         kernelcore_node = required_kernelcore / usable_nodes;
5267 
5268                 /*
5269                  * As the map is walked, we track how much memory is usable
5270                  * by the kernel using kernelcore_remaining. When it is
5271                  * 0, the rest of the node is usable by ZONE_MOVABLE
5272                  */
5273                 kernelcore_remaining = kernelcore_node;
5274 
5275                 /* Go through each range of PFNs within this node */
5276                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
5277                         unsigned long size_pages;
5278 
5279                         start_pfn = max(start_pfn, zone_movable_pfn[nid]);
5280                         if (start_pfn >= end_pfn)
5281                                 continue;
5282 
5283                         /* Account for what is only usable for kernelcore */
5284                         if (start_pfn < usable_startpfn) {
5285                                 unsigned long kernel_pages;
5286                                 kernel_pages = min(end_pfn, usable_startpfn)
5287                                                                 - start_pfn;
5288 
5289                                 kernelcore_remaining -= min(kernel_pages,
5290                                                         kernelcore_remaining);
5291                                 required_kernelcore -= min(kernel_pages,
5292                                                         required_kernelcore);
5293 
5294                                 /* Continue if range is now fully accounted */
5295                                 if (end_pfn <= usable_startpfn) {
5296 
5297                                         /*
5298                                          * Push zone_movable_pfn to the end so
5299                                          * that if we have to rebalance
5300                                          * kernelcore across nodes, we will
5301                                          * not double account here
5302                                          */
5303                                         zone_movable_pfn[nid] = end_pfn;
5304                                         continue;
5305                                 }
5306                                 start_pfn = usable_startpfn;
5307                         }
5308 
5309                         /*
5310                          * The usable PFN range for ZONE_MOVABLE is from
5311                          * start_pfn->end_pfn. Calculate size_pages as the
5312                          * number of pages used as kernelcore
5313                          */
5314                         size_pages = end_pfn - start_pfn;
5315                         if (size_pages > kernelcore_remaining)
5316                                 size_pages = kernelcore_remaining;
5317                         zone_movable_pfn[nid] = start_pfn + size_pages;
5318 
5319                         /*
5320                          * Some kernelcore has been met, update counts and
5321                          * break if the kernelcore for this node has been
5322                          * satisfied
5323                          */
5324                         required_kernelcore -= min(required_kernelcore,
5325                                                                 size_pages);
5326                         kernelcore_remaining -= size_pages;
5327                         if (!kernelcore_remaining)
5328                                 break;
5329                 }
5330         }
5331 
5332         /*
5333          * If there is still required_kernelcore, we do another pass with one
5334          * less node in the count. This will push zone_movable_pfn[nid] further
5335          * along on the nodes that still have memory until kernelcore is
5336          * satisfied
5337          */
5338         usable_nodes--;
5339         if (usable_nodes && required_kernelcore > usable_nodes)
5340                 goto restart;
5341 
5342 out2:
5343         /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
5344         for (nid = 0; nid < MAX_NUMNODES; nid++)
5345                 zone_movable_pfn[nid] =
5346                         roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
5347 
5348 out:
5349         /* restore the node_state */
5350         node_states[N_MEMORY] = saved_node_state;
5351 }
5352 
5353 /* Any regular or high memory on that node ? */
5354 static void check_for_memory(pg_data_t *pgdat, int nid)
5355 {
5356         enum zone_type zone_type;
5357 
5358         if (N_MEMORY == N_NORMAL_MEMORY)
5359                 return;
5360 
5361         for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
5362                 struct zone *zone = &pgdat->node_zones[zone_type];
5363                 if (populated_zone(zone)) {
5364                         node_set_state(nid, N_HIGH_MEMORY);
5365                         if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
5366                             zone_type <= ZONE_NORMAL)
5367                                 node_set_state(nid, N_NORMAL_MEMORY);
5368                         break;
5369                 }
5370         }
5371 }
5372 
5373 /**
5374  * free_area_init_nodes - Initialise all pg_data_t and zone data
5375  * @max_zone_pfn: an array of max PFNs for each zone
5376  *
5377  * This will call free_area_init_node() for each active node in the system.
5378  * Using the page ranges provided by memblock_set_node(), the size of each
5379  * zone in each node and their holes is calculated. If the maximum PFN
5380  * between two adjacent zones match, it is assumed that the zone is empty.
5381  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
5382  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
5383  * starts where the previous one ended. For example, ZONE_DMA32 starts
5384  * at arch_max_dma_pfn.
5385  */
5386 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
5387 {
5388         unsigned long start_pfn, end_pfn;
5389         int i, nid;
5390 
5391         /* Record where the zone boundaries are */
5392         memset(arch_zone_lowest_possible_pfn, 0,
5393                                 sizeof(arch_zone_lowest_possible_pfn));
5394         memset(arch_zone_highest_possible_pfn, 0,
5395                                 sizeof(arch_zone_highest_possible_pfn));
5396         arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
5397         arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
5398         for (i = 1; i < MAX_NR_ZONES; i++) {
5399                 if (i == ZONE_MOVABLE)
5400                         continue;
5401                 arch_zone_lowest_possible_pfn[i] =
5402                         arch_zone_highest_possible_pfn[i-1];
5403                 arch_zone_highest_possible_pfn[i] =
5404                         max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
5405         }
5406         arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
5407         arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
5408 
5409         /* Find the PFNs that ZONE_MOVABLE begins at in each node */
5410         memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
5411         find_zone_movable_pfns_for_nodes();
5412 
5413         /* Print out the zone ranges */
5414         pr_info("Zone ranges:\n");
5415         for (i = 0; i < MAX_NR_ZONES; i++) {
5416                 if (i == ZONE_MOVABLE)
5417                         continue;
5418                 pr_info("  %-8s ", zone_names[i]);
5419                 if (arch_zone_lowest_possible_pfn[i] ==
5420                                 arch_zone_highest_possible_pfn[i])
5421                         pr_cont("empty\n");
5422                 else
5423                         pr_cont("[mem %0#10lx-%0#10lx]\n",
5424                                 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
5425                                 (arch_zone_highest_possible_pfn[i]
5426                                         << PAGE_SHIFT) - 1);
5427         }
5428 
5429         /* Print out the PFNs ZONE_MOVABLE begins at in each node */
5430         pr_info("Movable zone start for each node\n");
5431         for (i = 0; i < MAX_NUMNODES; i++) {
5432                 if (zone_movable_pfn[i])
5433                         pr_info("  Node %d: %#010lx\n", i,
5434                                zone_movable_pfn[i] << PAGE_SHIFT);
5435         }
5436 
5437         /* Print out the early node map */
5438         pr_info("Early memory node ranges\n");
5439         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
5440                 pr_info("  node %3d: [mem %#010lx-%#010lx]\n", nid,
5441                        start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
5442 
5443         /* Initialise every node */
5444         mminit_verify_pageflags_layout();
5445         setup_nr_node_ids();
5446         for_each_online_node(nid) {
5447                 pg_data_t *pgdat = NODE_DATA(nid);
5448                 free_area_init_node(nid, NULL,
5449                                 find_min_pfn_for_node(nid), NULL);
5450 
5451                 /* Any memory on that node */
5452                 if (pgdat->node_present_pages)
5453                         node_set_state(nid, N_MEMORY);
5454                 check_for_memory(pgdat, nid);
5455         }
5456 }
5457 
5458 static int __init cmdline_parse_core(char *p, unsigned long *core)
5459 {
5460         unsigned long long coremem;
5461         if (!p)
5462                 return -EINVAL;
5463 
5464         coremem = memparse(p, &p);
5465         *core = coremem >> PAGE_SHIFT;
5466 
5467         /* Paranoid check that UL is enough for the coremem value */
5468         WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
5469 
5470         return 0;
5471 }
5472 
5473 /*
5474  * kernelcore=size sets the amount of memory for use for allocations that
5475  * cannot be reclaimed or migrated.
5476  */
5477 static int __init cmdline_parse_kernelcore(char *p)
5478 {
5479         return cmdline_parse_core(p, &required_kernelcore);
5480 }
5481 
5482 /*
5483  * movablecore=size sets the amount of memory for use for allocations that
5484  * can be reclaimed or migrated.
5485  */
5486 static int __init cmdline_parse_movablecore(char *p)
5487 {
5488         return cmdline_parse_core(p, &required_movablecore);
5489 }
5490 
5491 early_param("kernelcore", cmdline_parse_kernelcore);
5492 early_param("movablecore", cmdline_parse_movablecore);
5493 
5494 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5495 
5496 void adjust_managed_page_count(struct page *page, long count)
5497 {
5498         spin_lock(&managed_page_count_lock);
5499         page_zone(page)->managed_pages += count;
5500         totalram_pages += count;
5501 #ifdef CONFIG_HIGHMEM
5502         if (PageHighMem(page))
5503                 totalhigh_pages += count;
5504 #endif
5505         spin_unlock(&managed_page_count_lock);
5506 }
5507 EXPORT_SYMBOL(adjust_managed_page_count);
5508 
5509 unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
5510 {
5511         void *pos;
5512         unsigned long pages = 0;
5513 
5514         start = (void *)PAGE_ALIGN((unsigned long)start);
5515         end = (void *)((unsigned long)end & PAGE_MASK);
5516         for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
5517                 if ((unsigned int)poison <= 0xFF)
5518                         memset(pos, poison, PAGE_SIZE);
5519                 free_reserved_page(virt_to_page(pos));
5520         }
5521 
5522         if (pages && s)
5523                 pr_info("Freeing %s memory: %ldK (%p - %p)\n",
5524                         s, pages << (PAGE_SHIFT - 10), start, end);
5525 
5526         return pages;
5527 }
5528 EXPORT_SYMBOL(free_reserved_area);
5529 
5530 #ifdef  CONFIG_HIGHMEM
5531 void free_highmem_page(struct page *page)
5532 {
5533         __free_reserved_page(page);
5534         totalram_pages++;
5535         page_zone(page)->managed_pages++;
5536         totalhigh_pages++;
5537 }
5538 #endif
5539 
5540 
5541 void __init mem_init_print_info(const char *str)
5542 {
5543         unsigned long physpages, codesize, datasize, rosize, bss_size;
5544         unsigned long init_code_size, init_data_size;
5545 
5546         physpages = get_num_physpages();
5547         codesize = _etext - _stext;
5548         datasize = _edata - _sdata;
5549         rosize = __end_rodata - __start_rodata;
5550         bss_size = __bss_stop - __bss_start;
5551         init_data_size = __init_end - __init_begin;
5552         init_code_size = _einittext - _sinittext;
5553 
5554         /*
5555          * Detect special cases and adjust section sizes accordingly:
5556          * 1) .init.* may be embedded into .data sections
5557          * 2) .init.text.* may be out of [__init_begin, __init_end],
5558          *    please refer to arch/tile/kernel/vmlinux.lds.S.
5559          * 3) .rodata.* may be embedded into .text or .data sections.
5560          */
5561 #define adj_init_size(start, end, size, pos, adj) \
5562         do { \
5563                 if (start <= pos && pos < end && size > adj) \
5564                         size -= adj; \
5565         } while (0)
5566 
5567         adj_init_size(__init_begin, __init_end, init_data_size,
5568                      _sinittext, init_code_size);
5569         adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
5570         adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
5571         adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
5572         adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
5573 
5574 #undef  adj_init_size
5575 
5576         pr_info("Memory: %luK/%luK available "
5577                "(%luK kernel code, %luK rwdata, %luK rodata, "
5578                "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
5579 #ifdef  CONFIG_HIGHMEM
5580                ", %luK highmem"
5581 #endif
5582                "%s%s)\n",
5583                nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
5584                codesize >> 10, datasize >> 10, rosize >> 10,
5585                (init_data_size + init_code_size) >> 10, bss_size >> 10,
5586                (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
5587                totalcma_pages << (PAGE_SHIFT-10),
5588 #ifdef  CONFIG_HIGHMEM
5589                totalhigh_pages << (PAGE_SHIFT-10),
5590 #endif
5591                str ? ", " : "", str ? str : "");
5592 }
5593 
5594 /**
5595  * set_dma_reserve - set the specified number of pages reserved in the first zone
5596  * @new_dma_reserve: The number of pages to mark reserved
5597  *
5598  * The per-cpu batchsize and zone watermarks are determined by present_pages.
5599  * In the DMA zone, a significant percentage may be consumed by kernel image
5600  * and other unfreeable allocations which can skew the watermarks badly. This
5601  * function may optionally be used to account for unfreeable pages in the
5602  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5603  * smaller per-cpu batchsize.
5604  */
5605 void __init set_dma_reserve(unsigned long new_dma_reserve)
5606 {
5607         dma_reserve = new_dma_reserve;
5608 }
5609 
5610 void __init free_area_init(unsigned long *zones_size)
5611 {
5612         free_area_init_node(0, zones_size,
5613                         __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5614 }
5615 
5616 static int page_alloc_cpu_notify(struct notifier_block *self,
5617                                  unsigned long action, void *hcpu)
5618 {
5619         int cpu = (unsigned long)hcpu;
5620 
5621         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
5622                 lru_add_drain_cpu(cpu);
5623                 drain_pages(cpu);
5624 
5625                 /*
5626                  * Spill the event counters of the dead processor
5627                  * into the current processors event counters.
5628                  * This artificially elevates the count of the current
5629                  * processor.
5630                  */
5631                 vm_events_fold_cpu(cpu);
5632 
5633                 /*
5634                  * Zero the differential counters of the dead processor
5635                  * so that the vm statistics are consistent.
5636                  *
5637                  * This is only okay since the processor is dead and cannot
5638                  * race with what we are doing.
5639                  */
5640                 cpu_vm_stats_fold(cpu);
5641         }
5642         return NOTIFY_OK;
5643 }
5644 
5645 void __init page_alloc_init(void)
5646 {
5647         hotcpu_notifier(page_alloc_cpu_notify, 0);
5648 }
5649 
5650 /*
5651  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5652  *      or min_free_kbytes changes.
5653  */
5654 static void calculate_totalreserve_pages(void)
5655 {
5656         struct pglist_data *pgdat;
5657         unsigned long reserve_pages = 0;
5658         enum zone_type i, j;
5659 
5660         for_each_online_pgdat(pgdat) {
5661                 for (i = 0; i < MAX_NR_ZONES; i++) {
5662                         struct zone *zone = pgdat->node_zones + i;
5663                         long max = 0;
5664 
5665                         /* Find valid and maximum lowmem_reserve in the zone */
5666                         for (j = i; j < MAX_NR_ZONES; j++) {
5667                                 if (zone->lowmem_reserve[j] > max)
5668                                         max = zone->lowmem_reserve[j];
5669                         }
5670 
5671                         /* we treat the high watermark as reserved pages. */
5672                         max += high_wmark_pages(zone);
5673 
5674                         if (max > zone->managed_pages)
5675                                 max = zone->managed_pages;
5676                         reserve_pages += max;
5677                         /*
5678                          * Lowmem reserves are not available to
5679                          * GFP_HIGHUSER page cache allocations and
5680                          * kswapd tries to balance zones to their high
5681                          * watermark.  As a result, neither should be
5682                          * regarded as dirtyable memory, to prevent a
5683                          * situation where reclaim has to clean pages
5684                          * in order to balance the zones.
5685                          */
5686                         zone->dirty_balance_reserve = max;
5687                 }
5688         }
5689         dirty_balance_reserve = reserve_pages;
5690         totalreserve_pages = reserve_pages;
5691 }
5692 
5693 /*
5694  * setup_per_zone_lowmem_reserve - called whenever
5695  *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
5696  *      has a correct pages reserved value, so an adequate number of
5697  *      pages are left in the zone after a successful __alloc_pages().
5698  */
5699 static void setup_per_zone_lowmem_reserve(void)
5700 {
5701         struct pglist_data *pgdat;
5702         enum zone_type j, idx;
5703 
5704         for_each_online_pgdat(pgdat) {
5705                 for (j = 0; j < MAX_NR_ZONES; j++) {
5706                         struct zone *zone = pgdat->node_zones + j;
5707                         unsigned long managed_pages = zone->managed_pages;
5708 
5709                         zone->lowmem_reserve[j] = 0;
5710 
5711                         idx = j;
5712                         while (idx) {
5713                                 struct zone *lower_zone;
5714 
5715                                 idx--;
5716 
5717                                 if (sysctl_lowmem_reserve_ratio[idx] < 1)
5718                                         sysctl_lowmem_reserve_ratio[idx] = 1;
5719 
5720                                 lower_zone = pgdat->node_zones + idx;
5721                                 lower_zone->lowmem_reserve[j] = managed_pages /
5722                                         sysctl_lowmem_reserve_ratio[idx];
5723                                 managed_pages += lower_zone->managed_pages;
5724                         }
5725                 }
5726         }
5727 
5728         /* update totalreserve_pages */
5729         calculate_totalreserve_pages();
5730 }
5731 
5732 static void __setup_per_zone_wmarks(void)
5733 {
5734         unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5735         unsigned long lowmem_pages = 0;
5736         struct zone *zone;
5737         unsigned long flags;
5738 
5739         /* Calculate total number of !ZONE_HIGHMEM pages */
5740         for_each_zone(zone) {
5741                 if (!is_highmem(zone))
5742                         lowmem_pages += zone->managed_pages;
5743         }
5744 
5745         for_each_zone(zone) {
5746                 u64 tmp;
5747 
5748                 spin_lock_irqsave(&zone->lock, flags);
5749                 tmp = (u64)pages_min * zone->managed_pages;
5750                 do_div(tmp, lowmem_pages);
5751                 if (is_highmem(zone)) {
5752                         /*
5753                          * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5754                          * need highmem pages, so cap pages_min to a small
5755                          * value here.
5756                          *
5757                          * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5758                          * deltas controls asynch page reclaim, and so should
5759                          * not be capped for highmem.
5760                          */
5761                         unsigned long min_pages;
5762 
5763                         min_pages = zone->managed_pages / 1024;
5764                         min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
5765                         zone->watermark[WMARK_MIN] = min_pages;
5766                 } else {
5767                         /*
5768                          * If it's a lowmem zone, reserve a number of pages
5769                          * proportionate to the zone's size.
5770                          */
5771                         zone->watermark[WMARK_MIN] = tmp;
5772                 }
5773 
5774                 zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
5775                 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5776 
5777                 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
5778                         high_wmark_pages(zone) - low_wmark_pages(zone) -
5779                         atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
5780 
5781                 setup_zone_migrate_reserve(zone);
5782                 spin_unlock_irqrestore(&zone->lock, flags);
5783         }
5784 
5785         /* update totalreserve_pages */
5786         calculate_totalreserve_pages();
5787 }
5788 
5789 /**
5790  * setup_per_zone_wmarks - called when min_free_kbytes changes
5791  * or when memory is hot-{added|removed}
5792  *
5793  * Ensures that the watermark[min,low,high] values for each zone are set
5794  * correctly with respect to min_free_kbytes.
5795  */
5796 void setup_per_zone_wmarks(void)
5797 {
5798         mutex_lock(&zonelists_mutex);
5799         __setup_per_zone_wmarks();
5800         mutex_unlock(&zonelists_mutex);
5801 }
5802 
5803 /*
5804  * The inactive anon list should be small enough that the VM never has to
5805  * do too much work, but large enough that each inactive page has a chance
5806  * to be referenced again before it is swapped out.
5807  *
5808  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5809  * INACTIVE_ANON pages on this zone's LRU, maintained by the
5810  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5811  * the anonymous pages are kept on the inactive list.
5812  *
5813  * total     target    max
5814  * memory    ratio     inactive anon
5815  * -------------------------------------
5816  *   10MB       1         5MB
5817  *  100MB       1        50MB
5818  *    1GB       3       250MB
5819  *   10GB      10       0.9GB
5820  *  100GB      31         3GB
5821  *    1TB     101        10GB
5822  *   10TB     320        32GB
5823  */
5824 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5825 {
5826         unsigned int gb, ratio;
5827 
5828         /* Zone size in gigabytes */
5829         gb = zone->managed_pages >> (30 - PAGE_SHIFT);
5830         if (gb)
5831                 ratio = int_sqrt(10 * gb);
5832         else
5833                 ratio = 1;
5834 
5835         zone->inactive_ratio = ratio;
5836 }
5837 
5838 static void __meminit setup_per_zone_inactive_ratio(void)
5839 {
5840         struct zone *zone;
5841 
5842         for_each_zone(zone)
5843                 calculate_zone_inactive_ratio(zone);
5844 }
5845 
5846 /*
5847  * Initialise min_free_kbytes.
5848  *
5849  * For small machines we want it small (128k min).  For large machines
5850  * we want it large (64MB max).  But it is not linear, because network
5851  * bandwidth does not increase linearly with machine size.  We use
5852  *
5853  *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5854  *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
5855  *
5856  * which yields
5857  *
5858  * 16MB:        512k
5859  * 32MB:        724k
5860  * 64MB:        1024k
5861  * 128MB:       1448k
5862  * 256MB:       2048k
5863  * 512MB:       2896k
5864  * 1024MB:      4096k
5865  * 2048MB:      5792k
5866  * 4096MB:      8192k
5867  * 8192MB:      11584k
5868  * 16384MB:     16384k
5869  */
5870 int __meminit init_per_zone_wmark_min(void)
5871 {
5872         unsigned long lowmem_kbytes;
5873         int new_min_free_kbytes;
5874 
5875         lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5876         new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5877 
5878         if (new_min_free_kbytes > user_min_free_kbytes) {
5879                 min_free_kbytes = new_min_free_kbytes;
5880                 if (min_free_kbytes < 128)
5881                         min_free_kbytes = 128;
5882                 if (min_free_kbytes > 65536)
5883                         min_free_kbytes = 65536;
5884         } else {
5885                 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
5886                                 new_min_free_kbytes, user_min_free_kbytes);
5887         }
5888         setup_per_zone_wmarks();
5889         refresh_zone_stat_thresholds();
5890         setup_per_zone_lowmem_reserve();
5891         setup_per_zone_inactive_ratio();
5892         return 0;
5893 }
5894 module_init(init_per_zone_wmark_min)
5895 
5896 /*
5897  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
5898  *      that we can call two helper functions whenever min_free_kbytes
5899  *      changes.
5900  */
5901 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
5902         void __user *buffer, size_t *length, loff_t *ppos)
5903 {
5904         int rc;
5905 
5906         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5907         if (rc)
5908                 return rc;
5909 
5910         if (write) {
5911                 user_min_free_kbytes = min_free_kbytes;
5912                 setup_per_zone_wmarks();
5913         }
5914         return 0;
5915 }
5916 
5917 #ifdef CONFIG_NUMA
5918 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
5919         void __user *buffer, size_t *length, loff_t *ppos)
5920 {
5921         struct zone *zone;
5922         int rc;
5923 
5924         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5925         if (rc)
5926                 return rc;
5927 
5928         for_each_zone(zone)
5929                 zone->min_unmapped_pages = (zone->managed_pages *
5930                                 sysctl_min_unmapped_ratio) / 100;
5931         return 0;
5932 }
5933 
5934 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
5935         void __user *buffer, size_t *length, loff_t *ppos)
5936 {
5937         struct zone *zone;
5938         int rc;
5939 
5940         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5941         if (rc)
5942                 return rc;
5943 
5944         for_each_zone(zone)
5945                 zone->min_slab_pages = (zone->managed_pages *
5946                                 sysctl_min_slab_ratio) / 100;
5947         return 0;
5948 }
5949 #endif
5950 
5951 /*
5952  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5953  *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5954  *      whenever sysctl_lowmem_reserve_ratio changes.
5955  *
5956  * The reserve ratio obviously has absolutely no relation with the
5957  * minimum watermarks. The lowmem reserve ratio can only make sense
5958  * if in function of the boot time zone sizes.
5959  */
5960 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
5961         void __user *buffer, size_t *length, loff_t *ppos)
5962 {
5963         proc_dointvec_minmax(table, write, buffer, length, ppos);
5964         setup_per_zone_lowmem_reserve();
5965         return 0;
5966 }
5967 
5968 /*
5969  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5970  * cpu.  It is the fraction of total pages in each zone that a hot per cpu
5971  * pagelist can have before it gets flushed back to buddy allocator.
5972  */
5973 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
5974         void __user *buffer, size_t *length, loff_t *ppos)
5975 {
5976         struct zone *zone;
5977         int old_percpu_pagelist_fraction;
5978         int ret;
5979 
5980         mutex_lock(&pcp_batch_high_lock);
5981         old_percpu_pagelist_fraction = percpu_pagelist_fraction;
5982 
5983         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5984         if (!write || ret < 0)
5985                 goto out;
5986 
5987         /* Sanity checking to avoid pcp imbalance */
5988         if (percpu_pagelist_fraction &&
5989             percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
5990                 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
5991                 ret = -EINVAL;
5992                 goto out;
5993         }
5994 
5995         /* No change? */
5996         if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
5997                 goto out;
5998 
5999         for_each_populated_zone(zone) {
6000                 unsigned int cpu;
6001 
6002                 for_each_possible_cpu(cpu)
6003                         pageset_set_high_and_batch(zone,
6004                                         per_cpu_ptr(zone->pageset, cpu));
6005         }
6006 out:
6007         mutex_unlock(&pcp_batch_high_lock);
6008         return ret;
6009 }
6010 
6011 int hashdist = HASHDIST_DEFAULT;
6012 
6013 #ifdef CONFIG_NUMA
6014 static int __init set_hashdist(char *str)
6015 {
6016         if (!str)
6017                 return 0;
6018         hashdist = simple_strtoul(str, &str, 0);
6019         return 1;
6020 }
6021 __setup("hashdist=", set_hashdist);
6022 #endif
6023 
6024 /*
6025  * allocate a large system hash table from bootmem
6026  * - it is assumed that the hash table must contain an exact power-of-2
6027  *   quantity of entries
6028  * - limit is the number of hash buckets, not the total allocation size
6029  */
6030 void *__init alloc_large_system_hash(const char *tablename,
6031                                      unsigned long bucketsize,
6032                                      unsigned long numentries,
6033                                      int scale,
6034                                      int flags,
6035                                      unsigned int *_hash_shift,
6036                                      unsigned int *_hash_mask,
6037                                      unsigned long low_limit,
6038                                      unsigned long high_limit)
6039 {
6040         unsigned long long max = high_limit;
6041         unsigned long log2qty, size;
6042         void *table = NULL;
6043 
6044         /* allow the kernel cmdline to have a say */
6045         if (!numentries) {
6046                 /* round applicable memory size up to nearest megabyte */
6047                 numentries = nr_kernel_pages;
6048 
6049                 /* It isn't necessary when PAGE_SIZE >= 1MB */
6050                 if (PAGE_SHIFT < 20)
6051                         numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
6052 
6053                 /* limit to 1 bucket per 2^scale bytes of low memory */
6054                 if (scale > PAGE_SHIFT)
6055                         numentries >>= (scale - PAGE_SHIFT);
6056                 else
6057                         numentries <<= (PAGE_SHIFT - scale);
6058 
6059                 /* Make sure we've got at least a 0-order allocation.. */
6060                 if (unlikely(flags & HASH_SMALL)) {
6061                         /* Makes no sense without HASH_EARLY */
6062                         WARN_ON(!(flags & HASH_EARLY));
6063                         if (!(numentries >> *_hash_shift)) {
6064                                 numentries = 1UL << *_hash_shift;
6065                                 BUG_ON(!numentries);
6066                         }
6067                 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
6068                         numentries = PAGE_SIZE / bucketsize;
6069         }
6070         numentries = roundup_pow_of_two(numentries);
6071 
6072         /* limit allocation size to 1/16 total memory by default */
6073         if (max == 0) {
6074                 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
6075                 do_div(max, bucketsize);
6076         }
6077         max = min(max, 0x80000000ULL);
6078 
6079         if (numentries < low_limit)
6080                 numentries = low_limit;
6081         if (numentries > max)
6082                 numentries = max;
6083 
6084         log2qty = ilog2(numentries);
6085 
6086         do {
6087                 size = bucketsize << log2qty;
6088                 if (flags & HASH_EARLY)
6089                         table = memblock_virt_alloc_nopanic(size, 0);
6090                 else if (hashdist)
6091                         table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
6092                 else {
6093                         /*
6094                          * If bucketsize is not a power-of-two, we may free
6095                          * some pages at the end of hash table which
6096                          * alloc_pages_exact() automatically does
6097                          */
6098                         if (get_order(size) < MAX_ORDER) {
6099                                 table = alloc_pages_exact(size, GFP_ATOMIC);
6100                                 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
6101                         }
6102                 }
6103         } while (!table && size > PAGE_SIZE && --log2qty);
6104 
6105         if (!table)
6106                 panic("Failed to allocate %s hash table\n", tablename);
6107 
6108         printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
6109                tablename,
6110                (1UL << log2qty),
6111                ilog2(size) - PAGE_SHIFT,
6112                size);
6113 
6114         if (_hash_shift)
6115                 *_hash_shift = log2qty;
6116         if (_hash_mask)
6117                 *_hash_mask = (1 << log2qty) - 1;
6118 
6119         return table;
6120 }
6121 
6122 /* Return a pointer to the bitmap storing bits affecting a block of pages */
6123 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
6124                                                         unsigned long pfn)
6125 {
6126 #ifdef CONFIG_SPARSEMEM
6127         return __pfn_to_section(pfn)->pageblock_flags;
6128 #else
6129         return zone->pageblock_flags;
6130 #endif /* CONFIG_SPARSEMEM */
6131 }
6132 
6133 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6134 {
6135 #ifdef CONFIG_SPARSEMEM
6136         pfn &= (PAGES_PER_SECTION-1);
6137         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6138 #else
6139         pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
6140         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
6141 #endif /* CONFIG_SPARSEMEM */
6142 }
6143 
6144 /**
6145  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
6146  * @page: The page within the block of interest
6147  * @pfn: The target page frame number
6148  * @end_bitidx: The last bit of interest to retrieve
6149  * @mask: mask of bits that the caller is interested in
6150  *
6151  * Return: pageblock_bits flags
6152  */
6153 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6154                                         unsigned long end_bitidx,
6155                                         unsigned long mask)
6156 {
6157         struct zone *zone;
6158         unsigned long *bitmap;
6159         unsigned long bitidx, word_bitidx;
6160         unsigned long word;
6161 
6162         zone = page_zone(page);
6163         bitmap = get_pageblock_bitmap(zone, pfn);
6164         bitidx = pfn_to_bitidx(zone, pfn);
6165         word_bitidx = bitidx / BITS_PER_LONG;
6166         bitidx &= (BITS_PER_LONG-1);
6167 
6168         word = bitmap[word_bitidx];
6169         bitidx += end_bitidx;
6170         return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
6171 }
6172 
6173 /**
6174  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
6175  * @page: The page within the block of interest
6176  * @flags: The flags to set
6177  * @pfn: The target page frame number
6178  * @end_bitidx: The last bit of interest
6179  * @mask: mask of bits that the caller is interested in
6180  */
6181 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6182                                         unsigned long pfn,
6183                                         unsigned long end_bitidx,
6184                                         unsigned long mask)
6185 {
6186         struct zone *zone;
6187         unsigned long *bitmap;
6188         unsigned long bitidx, word_bitidx;
6189         unsigned long old_word, word;
6190 
6191         BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
6192 
6193         zone = page_zone(page);
6194         bitmap = get_pageblock_bitmap(zone, pfn);
6195         bitidx = pfn_to_bitidx(zone, pfn);
6196         word_bitidx = bitidx / BITS_PER_LONG;
6197         bitidx &= (BITS_PER_LONG-1);
6198 
6199         VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
6200 
6201         bitidx += end_bitidx;
6202         mask <<= (BITS_PER_LONG - bitidx - 1);
6203         flags <<= (BITS_PER_LONG - bitidx - 1);
6204 
6205         word = ACCESS_ONCE(bitmap[word_bitidx]);
6206         for (;;) {
6207                 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
6208                 if (word == old_word)
6209                         break;
6210                 word = old_word;
6211         }
6212 }
6213 
6214 /*
6215  * This function checks whether pageblock includes unmovable pages or not.
6216  * If @count is not zero, it is okay to include less @count unmovable pages
6217  *
6218  * PageLRU check without isolation or lru_lock could race so that
6219  * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
6220  * expect this function should be exact.
6221  */
6222 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
6223                          bool skip_hwpoisoned_pages)
6224 {
6225         unsigned long pfn, iter, found;
6226         int mt;
6227 
6228         /*
6229          * For avoiding noise data, lru_add_drain_all() should be called
6230          * If ZONE_MOVABLE, the zone never contains unmovable pages
6231          */
6232         if (zone_idx(zone) == ZONE_MOVABLE)
6233                 return false;
6234         mt = get_pageblock_migratetype(page);
6235         if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
6236                 return false;
6237 
6238         pfn = page_to_pfn(page);
6239         for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
6240                 unsigned long check = pfn + iter;
6241 
6242                 if (!pfn_valid_within(check))
6243                         continue;
6244 
6245                 page = pfn_to_page(check);
6246 
6247                 /*
6248                  * Hugepages are not in LRU lists, but they're movable.
6249                  * We need not scan over tail pages bacause we don't
6250                  * handle each tail page individually in migration.
6251                  */
6252                 if (PageHuge(page)) {
6253                         iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
6254                         continue;
6255                 }
6256 
6257                 /*
6258                  * We can't use page_count without pin a page
6259                  * because another CPU can free compound page.
6260                  * This check already skips compound tails of THP
6261                  * because their page->_count is zero at all time.
6262                  */
6263                 if (!atomic_read(&page->_count)) {
6264                         if (PageBuddy(page))
6265                                 iter += (1 << page_order(page)) - 1;
6266                         continue;
6267                 }
6268 
6269                 /*
6270                  * The HWPoisoned page may be not in buddy system, and
6271                  * page_count() is not 0.
6272                  */
6273                 if (skip_hwpoisoned_pages && PageHWPoison(page))
6274                         continue;
6275 
6276                 if (!PageLRU(page))
6277                         found++;
6278                 /*
6279                  * If there are RECLAIMABLE pages, we need to check
6280                  * it.  But now, memory offline itself doesn't call
6281                  * shrink_node_slabs() and it still to be fixed.
6282                  */
6283                 /*
6284                  * If the page is not RAM, page_count()should be 0.
6285                  * we don't need more check. This is an _used_ not-movable page.
6286                  *
6287                  * The problematic thing here is PG_reserved pages. PG_reserved
6288                  * is set to both of a memory hole page and a _used_ kernel
6289                  * page at boot.
6290                  */
6291                 if (found > count)
6292                         return true;
6293         }
6294         return false;
6295 }
6296 
6297 bool is_pageblock_removable_nolock(struct page *page)
6298 {
6299         struct zone *zone;
6300         unsigned long pfn;
6301 
6302         /*
6303          * We have to be careful here because we are iterating over memory
6304          * sections which are not zone aware so we might end up outside of
6305          * the zone but still within the section.
6306          * We have to take care about the node as well. If the node is offline
6307          * its NODE_DATA will be NULL - see page_zone.
6308          */
6309         if (!node_online(page_to_nid(page)))
6310                 return false;
6311 
6312         zone = page_zone(page);
6313         pfn = page_to_pfn(page);
6314         if (!zone_spans_pfn(zone, pfn))
6315                 return false;
6316 
6317         return !has_unmovable_pages(zone, page, 0, true);
6318 }
6319 
6320 #ifdef CONFIG_CMA
6321 
6322 static unsigned long pfn_max_align_down(unsigned long pfn)
6323 {
6324         return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
6325                              pageblock_nr_pages) - 1);
6326 }
6327 
6328 static unsigned long pfn_max_align_up(unsigned long pfn)
6329 {
6330         return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
6331                                 pageblock_nr_pages));
6332 }
6333 
6334 /* [start, end) must belong to a single zone. */
6335 static int __alloc_contig_migrate_range(struct compact_control *cc,
6336                                         unsigned long start, unsigned long end)
6337 {
6338         /* This function is based on compact_zone() from compaction.c. */
6339         unsigned long nr_reclaimed;
6340         unsigned long pfn = start;
6341         unsigned int tries = 0;
6342         int ret = 0;
6343 
6344         migrate_prep();
6345 
6346         while (pfn < end || !list_empty(&cc->migratepages)) {
6347                 if (fatal_signal_pending(current)) {
6348                         ret = -EINTR;
6349                         break;
6350                 }
6351 
6352                 if (list_empty(&cc->migratepages)) {
6353                         cc->nr_migratepages = 0;
6354                         pfn = isolate_migratepages_range(cc, pfn, end);
6355                         if (!pfn) {
6356                                 ret = -EINTR;
6357                                 break;
6358                         }
6359                         tries = 0;
6360                 } else if (++tries == 5) {
6361                         ret = ret < 0 ? ret : -EBUSY;
6362                         break;
6363                 }
6364 
6365                 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6366                                                         &cc->migratepages);
6367                 cc->nr_migratepages -= nr_reclaimed;
6368 
6369                 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
6370                                     NULL, 0, cc->mode, MR_CMA);
6371         }
6372         if (ret < 0) {
6373                 putback_movable_pages(&cc->migratepages);
6374                 return ret;
6375         }
6376         return 0;
6377 }
6378 
6379 /**
6380  * alloc_contig_range() -- tries to allocate given range of pages
6381  * @start:      start PFN to allocate
6382  * @end:        one-past-the-last PFN to allocate
6383  * @migratetype:        migratetype of the underlaying pageblocks (either
6384  *                      #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
6385  *                      in range must have the same migratetype and it must
6386  *                      be either of the two.
6387  *
6388  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
6389  * aligned, however it's the caller's responsibility to guarantee that
6390  * we are the only thread that changes migrate type of pageblocks the
6391  * pages fall in.
6392  *
6393  * The PFN range must belong to a single zone.
6394  *
6395  * Returns zero on success or negative error code.  On success all
6396  * pages which PFN is in [start, end) are allocated for the caller and
6397  * need to be freed with free_contig_range().
6398  */
6399 int alloc_contig_range(unsigned long start, unsigned long end,
6400                        unsigned migratetype)
6401 {
6402         unsigned long outer_start, outer_end;
6403         int ret = 0, order;
6404 
6405         struct compact_control cc = {
6406                 .nr_migratepages = 0,
6407                 .order = -1,
6408                 .zone = page_zone(pfn_to_page(start)),
6409                 .mode = MIGRATE_SYNC,
6410                 .ignore_skip_hint = true,
6411         };
6412         INIT_LIST_HEAD(&cc.migratepages);
6413 
6414         /*
6415          * What we do here is we mark all pageblocks in range as
6416          * MIGRATE_ISOLATE.  Because pageblock and max order pages may
6417          * have different sizes, and due to the way page allocator
6418          * work, we align the range to biggest of the two pages so
6419          * that page allocator won't try to merge buddies from
6420          * different pageblocks and change MIGRATE_ISOLATE to some
6421          * other migration type.
6422          *
6423          * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6424          * migrate the pages from an unaligned range (ie. pages that
6425          * we are interested in).  This will put all the pages in
6426          * range back to page allocator as MIGRATE_ISOLATE.
6427          *
6428          * When this is done, we take the pages in range from page
6429          * allocator removing them from the buddy system.  This way
6430          * page allocator will never consider using them.
6431          *
6432          * This lets us mark the pageblocks back as
6433          * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6434          * aligned range but not in the unaligned, original range are
6435          * put back to page allocator so that buddy can use them.
6436          */
6437 
6438         ret = start_isolate_page_range(pfn_max_align_down(start),
6439                                        pfn_max_align_up(end), migratetype,
6440                                        false);
6441         if (ret)
6442                 return ret;
6443 
6444         ret = __alloc_contig_migrate_range(&cc, start, end);
6445         if (ret)
6446                 goto done;
6447 
6448         /*
6449          * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
6450          * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
6451          * more, all pages in [start, end) are free in page allocator.
6452          * What we are going to do is to allocate all pages from
6453          * [start, end) (that is remove them from page allocator).
6454          *
6455          * The only problem is that pages at the beginning and at the
6456          * end of interesting range may be not aligned with pages that
6457          * page allocator holds, ie. they can be part of higher order
6458          * pages.  Because of this, we reserve the bigger range and
6459          * once this is done free the pages we are not interested in.
6460          *
6461          * We don't have to hold zone->lock here because the pages are
6462          * isolated thus they won't get removed from buddy.
6463          */
6464 
6465         lru_add_drain_all();
6466         drain_all_pages(cc.zone);
6467 
6468         order = 0;
6469         outer_start = start;
6470         while (!PageBuddy(pfn_to_page(outer_start))) {
6471                 if (++order >= MAX_ORDER) {
6472                         ret = -EBUSY;
6473                         goto done;
6474                 }
6475                 outer_start &= ~0UL << order;
6476         }
6477 
6478         /* Make sure the range is really isolated. */
6479         if (test_pages_isolated(outer_start, end, false)) {
6480                 pr_info("%s: [%lx, %lx) PFNs busy\n",
6481                         __func__, outer_start, end);
6482                 ret = -EBUSY;
6483                 goto done;
6484         }
6485 
6486         /* Grab isolated pages from freelists. */
6487         outer_end = isolate_freepages_range(&cc, outer_start, end);
6488         if (!outer_end) {
6489                 ret = -EBUSY;
6490                 goto done;
6491         }
6492 
6493         /* Free head and tail (if any) */
6494         if (start != outer_start)
6495                 free_contig_range(outer_start, start - outer_start);
6496         if (end != outer_end)
6497                 free_contig_range(end, outer_end - end);
6498 
6499 done:
6500         undo_isolate_page_range(pfn_max_align_down(start),
6501                                 pfn_max_align_up(end), migratetype);
6502         return ret;
6503 }
6504 
6505 void free_contig_range(unsigned long pfn, unsigned nr_pages)
6506 {
6507         unsigned int count = 0;
6508 
6509         for (; nr_pages--; pfn++) {
6510                 struct page *page = pfn_to_page(pfn);
6511 
6512                 count += page_count(page) != 1;
6513                 __free_page(page);
6514         }
6515         WARN(count != 0, "%d pages are still in use!\n", count);
6516 }
6517 #endif
6518 
6519 #ifdef CONFIG_MEMORY_HOTPLUG
6520 /*
6521  * The zone indicated has a new number of managed_pages; batch sizes and percpu
6522  * page high values need to be recalulated.
6523  */
6524 void __meminit zone_pcp_update(struct zone *zone)
6525 {
6526         unsigned cpu;
6527         mutex_lock(&pcp_batch_high_lock);
6528         for_each_possible_cpu(cpu)
6529                 pageset_set_high_and_batch(zone,
6530                                 per_cpu_ptr(zone->pageset, cpu));
6531         mutex_unlock(&pcp_batch_high_lock);
6532 }
6533 #endif
6534 
6535 void zone_pcp_reset(struct zone *zone)
6536 {
6537         unsigned long flags;
6538         int cpu;
6539         struct per_cpu_pageset *pset;
6540 
6541         /* avoid races with drain_pages()  */
6542         local_irq_save(flags);
6543         if (zone->pageset != &boot_pageset) {
6544                 for_each_online_cpu(cpu) {
6545                         pset = per_cpu_ptr(zone->pageset, cpu);
6546                         drain_zonestat(zone, pset);
6547                 }
6548                 free_percpu(zone->pageset);
6549                 zone->pageset = &boot_pageset;
6550         }
6551         local_irq_restore(flags);
6552 }
6553 
6554 #ifdef CONFIG_MEMORY_HOTREMOVE
6555 /*
6556  * All pages in the range must be isolated before calling this.
6557  */
6558 void
6559 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
6560 {
6561         struct page *page;
6562         struct zone *zone;
6563         unsigned int order, i;
6564         unsigned long pfn;
6565         unsigned long flags;
6566         /* find the first valid pfn */
6567         for (pfn = start_pfn; pfn < end_pfn; pfn++)
6568                 if (pfn_valid(pfn))
6569                         break;
6570         if (pfn == end_pfn)
6571                 return;
6572         zone = page_zone(pfn_to_page(pfn));
6573         spin_lock_irqsave(&zone->lock, flags);
6574         pfn = start_pfn;
6575         while (pfn < end_pfn) {
6576                 if (!pfn_valid(pfn)) {
6577                         pfn++;
6578                         continue;
6579                 }
6580                 page = pfn_to_page(pfn);
6581                 /*
6582                  * The HWPoisoned page may be not in buddy system, and
6583                  * page_count() is not 0.
6584                  */
6585                 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6586                         pfn++;
6587                         SetPageReserved(page);
6588                         continue;
6589                 }
6590 
6591                 BUG_ON(page_count(page));
6592                 BUG_ON(!PageBuddy(page));
6593                 order = page_order(page);
6594 #ifdef CONFIG_DEBUG_VM
6595                 printk(KERN_INFO "remove from free list %lx %d %lx\n",
6596                        pfn, 1 << order, end_pfn);
6597 #endif
6598                 list_del(&page->lru);
6599                 rmv_page_order(page);
6600                 zone->free_area[order].nr_free--;
6601                 for (i = 0; i < (1 << order); i++)
6602                         SetPageReserved((page+i));
6603                 pfn += (1 << order);
6604         }
6605         spin_unlock_irqrestore(&zone->lock, flags);
6606 }
6607 #endif
6608 
6609 #ifdef CONFIG_MEMORY_FAILURE
6610 bool is_free_buddy_page(struct page *page)
6611 {
6612         struct zone *zone = page_zone(page);
6613         unsigned long pfn = page_to_pfn(page);
6614         unsigned long flags;
6615         unsigned int order;
6616 
6617         spin_lock_irqsave(&zone->lock, flags);
6618         for (order = 0; order < MAX_ORDER; order++) {
6619                 struct page *page_head = page - (pfn & ((1 << order) - 1));
6620 
6621                 if (PageBuddy(page_head) && page_order(page_head) >= order)
6622                         break;
6623         }
6624         spin_unlock_irqrestore(&zone->lock, flags);
6625 
6626         return order < MAX_ORDER;
6627 }
6628 #endif
6629 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us