Version:  2.0.40 2.2.26 2.4.37 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17

Linux/mm/shmem.c

  1 /*
  2  * Resizable virtual memory filesystem for Linux.
  3  *
  4  * Copyright (C) 2000 Linus Torvalds.
  5  *               2000 Transmeta Corp.
  6  *               2000-2001 Christoph Rohland
  7  *               2000-2001 SAP AG
  8  *               2002 Red Hat Inc.
  9  * Copyright (C) 2002-2011 Hugh Dickins.
 10  * Copyright (C) 2011 Google Inc.
 11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
 12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
 13  *
 14  * Extended attribute support for tmpfs:
 15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
 16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
 17  *
 18  * tiny-shmem:
 19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
 20  *
 21  * This file is released under the GPL.
 22  */
 23 
 24 #include <linux/fs.h>
 25 #include <linux/init.h>
 26 #include <linux/vfs.h>
 27 #include <linux/mount.h>
 28 #include <linux/ramfs.h>
 29 #include <linux/pagemap.h>
 30 #include <linux/file.h>
 31 #include <linux/mm.h>
 32 #include <linux/export.h>
 33 #include <linux/swap.h>
 34 #include <linux/aio.h>
 35 
 36 static struct vfsmount *shm_mnt;
 37 
 38 #ifdef CONFIG_SHMEM
 39 /*
 40  * This virtual memory filesystem is heavily based on the ramfs. It
 41  * extends ramfs by the ability to use swap and honor resource limits
 42  * which makes it a completely usable filesystem.
 43  */
 44 
 45 #include <linux/xattr.h>
 46 #include <linux/exportfs.h>
 47 #include <linux/posix_acl.h>
 48 #include <linux/posix_acl_xattr.h>
 49 #include <linux/mman.h>
 50 #include <linux/string.h>
 51 #include <linux/slab.h>
 52 #include <linux/backing-dev.h>
 53 #include <linux/shmem_fs.h>
 54 #include <linux/writeback.h>
 55 #include <linux/blkdev.h>
 56 #include <linux/pagevec.h>
 57 #include <linux/percpu_counter.h>
 58 #include <linux/falloc.h>
 59 #include <linux/splice.h>
 60 #include <linux/security.h>
 61 #include <linux/swapops.h>
 62 #include <linux/mempolicy.h>
 63 #include <linux/namei.h>
 64 #include <linux/ctype.h>
 65 #include <linux/migrate.h>
 66 #include <linux/highmem.h>
 67 #include <linux/seq_file.h>
 68 #include <linux/magic.h>
 69 #include <linux/syscalls.h>
 70 #include <linux/fcntl.h>
 71 #include <uapi/linux/memfd.h>
 72 
 73 #include <asm/uaccess.h>
 74 #include <asm/pgtable.h>
 75 
 76 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
 77 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
 78 
 79 /* Pretend that each entry is of this size in directory's i_size */
 80 #define BOGO_DIRENT_SIZE 20
 81 
 82 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
 83 #define SHORT_SYMLINK_LEN 128
 84 
 85 /*
 86  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
 87  * inode->i_private (with i_mutex making sure that it has only one user at
 88  * a time): we would prefer not to enlarge the shmem inode just for that.
 89  */
 90 struct shmem_falloc {
 91         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
 92         pgoff_t start;          /* start of range currently being fallocated */
 93         pgoff_t next;           /* the next page offset to be fallocated */
 94         pgoff_t nr_falloced;    /* how many new pages have been fallocated */
 95         pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
 96 };
 97 
 98 /* Flag allocation requirements to shmem_getpage */
 99 enum sgp_type {
100         SGP_READ,       /* don't exceed i_size, don't allocate page */
101         SGP_CACHE,      /* don't exceed i_size, may allocate page */
102         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
103         SGP_WRITE,      /* may exceed i_size, may allocate !Uptodate page */
104         SGP_FALLOC,     /* like SGP_WRITE, but make existing page Uptodate */
105 };
106 
107 #ifdef CONFIG_TMPFS
108 static unsigned long shmem_default_max_blocks(void)
109 {
110         return totalram_pages / 2;
111 }
112 
113 static unsigned long shmem_default_max_inodes(void)
114 {
115         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
116 }
117 #endif
118 
119 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
120 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
121                                 struct shmem_inode_info *info, pgoff_t index);
122 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
123         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
124 
125 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
126         struct page **pagep, enum sgp_type sgp, int *fault_type)
127 {
128         return shmem_getpage_gfp(inode, index, pagep, sgp,
129                         mapping_gfp_mask(inode->i_mapping), fault_type);
130 }
131 
132 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
133 {
134         return sb->s_fs_info;
135 }
136 
137 /*
138  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
139  * for shared memory and for shared anonymous (/dev/zero) mappings
140  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
141  * consistent with the pre-accounting of private mappings ...
142  */
143 static inline int shmem_acct_size(unsigned long flags, loff_t size)
144 {
145         return (flags & VM_NORESERVE) ?
146                 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
147 }
148 
149 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
150 {
151         if (!(flags & VM_NORESERVE))
152                 vm_unacct_memory(VM_ACCT(size));
153 }
154 
155 static inline int shmem_reacct_size(unsigned long flags,
156                 loff_t oldsize, loff_t newsize)
157 {
158         if (!(flags & VM_NORESERVE)) {
159                 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
160                         return security_vm_enough_memory_mm(current->mm,
161                                         VM_ACCT(newsize) - VM_ACCT(oldsize));
162                 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
163                         vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
164         }
165         return 0;
166 }
167 
168 /*
169  * ... whereas tmpfs objects are accounted incrementally as
170  * pages are allocated, in order to allow huge sparse files.
171  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
172  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
173  */
174 static inline int shmem_acct_block(unsigned long flags)
175 {
176         return (flags & VM_NORESERVE) ?
177                 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
178 }
179 
180 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
181 {
182         if (flags & VM_NORESERVE)
183                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
184 }
185 
186 static const struct super_operations shmem_ops;
187 static const struct address_space_operations shmem_aops;
188 static const struct file_operations shmem_file_operations;
189 static const struct inode_operations shmem_inode_operations;
190 static const struct inode_operations shmem_dir_inode_operations;
191 static const struct inode_operations shmem_special_inode_operations;
192 static const struct vm_operations_struct shmem_vm_ops;
193 
194 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
195         .ra_pages       = 0,    /* No readahead */
196         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
197 };
198 
199 static LIST_HEAD(shmem_swaplist);
200 static DEFINE_MUTEX(shmem_swaplist_mutex);
201 
202 static int shmem_reserve_inode(struct super_block *sb)
203 {
204         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
205         if (sbinfo->max_inodes) {
206                 spin_lock(&sbinfo->stat_lock);
207                 if (!sbinfo->free_inodes) {
208                         spin_unlock(&sbinfo->stat_lock);
209                         return -ENOSPC;
210                 }
211                 sbinfo->free_inodes--;
212                 spin_unlock(&sbinfo->stat_lock);
213         }
214         return 0;
215 }
216 
217 static void shmem_free_inode(struct super_block *sb)
218 {
219         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
220         if (sbinfo->max_inodes) {
221                 spin_lock(&sbinfo->stat_lock);
222                 sbinfo->free_inodes++;
223                 spin_unlock(&sbinfo->stat_lock);
224         }
225 }
226 
227 /**
228  * shmem_recalc_inode - recalculate the block usage of an inode
229  * @inode: inode to recalc
230  *
231  * We have to calculate the free blocks since the mm can drop
232  * undirtied hole pages behind our back.
233  *
234  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
235  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
236  *
237  * It has to be called with the spinlock held.
238  */
239 static void shmem_recalc_inode(struct inode *inode)
240 {
241         struct shmem_inode_info *info = SHMEM_I(inode);
242         long freed;
243 
244         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
245         if (freed > 0) {
246                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
247                 if (sbinfo->max_blocks)
248                         percpu_counter_add(&sbinfo->used_blocks, -freed);
249                 info->alloced -= freed;
250                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
251                 shmem_unacct_blocks(info->flags, freed);
252         }
253 }
254 
255 /*
256  * Replace item expected in radix tree by a new item, while holding tree lock.
257  */
258 static int shmem_radix_tree_replace(struct address_space *mapping,
259                         pgoff_t index, void *expected, void *replacement)
260 {
261         void **pslot;
262         void *item;
263 
264         VM_BUG_ON(!expected);
265         VM_BUG_ON(!replacement);
266         pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
267         if (!pslot)
268                 return -ENOENT;
269         item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
270         if (item != expected)
271                 return -ENOENT;
272         radix_tree_replace_slot(pslot, replacement);
273         return 0;
274 }
275 
276 /*
277  * Sometimes, before we decide whether to proceed or to fail, we must check
278  * that an entry was not already brought back from swap by a racing thread.
279  *
280  * Checking page is not enough: by the time a SwapCache page is locked, it
281  * might be reused, and again be SwapCache, using the same swap as before.
282  */
283 static bool shmem_confirm_swap(struct address_space *mapping,
284                                pgoff_t index, swp_entry_t swap)
285 {
286         void *item;
287 
288         rcu_read_lock();
289         item = radix_tree_lookup(&mapping->page_tree, index);
290         rcu_read_unlock();
291         return item == swp_to_radix_entry(swap);
292 }
293 
294 /*
295  * Like add_to_page_cache_locked, but error if expected item has gone.
296  */
297 static int shmem_add_to_page_cache(struct page *page,
298                                    struct address_space *mapping,
299                                    pgoff_t index, void *expected)
300 {
301         int error;
302 
303         VM_BUG_ON_PAGE(!PageLocked(page), page);
304         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
305 
306         page_cache_get(page);
307         page->mapping = mapping;
308         page->index = index;
309 
310         spin_lock_irq(&mapping->tree_lock);
311         if (!expected)
312                 error = radix_tree_insert(&mapping->page_tree, index, page);
313         else
314                 error = shmem_radix_tree_replace(mapping, index, expected,
315                                                                  page);
316         if (!error) {
317                 mapping->nrpages++;
318                 __inc_zone_page_state(page, NR_FILE_PAGES);
319                 __inc_zone_page_state(page, NR_SHMEM);
320                 spin_unlock_irq(&mapping->tree_lock);
321         } else {
322                 page->mapping = NULL;
323                 spin_unlock_irq(&mapping->tree_lock);
324                 page_cache_release(page);
325         }
326         return error;
327 }
328 
329 /*
330  * Like delete_from_page_cache, but substitutes swap for page.
331  */
332 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
333 {
334         struct address_space *mapping = page->mapping;
335         int error;
336 
337         spin_lock_irq(&mapping->tree_lock);
338         error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
339         page->mapping = NULL;
340         mapping->nrpages--;
341         __dec_zone_page_state(page, NR_FILE_PAGES);
342         __dec_zone_page_state(page, NR_SHMEM);
343         spin_unlock_irq(&mapping->tree_lock);
344         page_cache_release(page);
345         BUG_ON(error);
346 }
347 
348 /*
349  * Remove swap entry from radix tree, free the swap and its page cache.
350  */
351 static int shmem_free_swap(struct address_space *mapping,
352                            pgoff_t index, void *radswap)
353 {
354         void *old;
355 
356         spin_lock_irq(&mapping->tree_lock);
357         old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
358         spin_unlock_irq(&mapping->tree_lock);
359         if (old != radswap)
360                 return -ENOENT;
361         free_swap_and_cache(radix_to_swp_entry(radswap));
362         return 0;
363 }
364 
365 /*
366  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
367  */
368 void shmem_unlock_mapping(struct address_space *mapping)
369 {
370         struct pagevec pvec;
371         pgoff_t indices[PAGEVEC_SIZE];
372         pgoff_t index = 0;
373 
374         pagevec_init(&pvec, 0);
375         /*
376          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
377          */
378         while (!mapping_unevictable(mapping)) {
379                 /*
380                  * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
381                  * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
382                  */
383                 pvec.nr = find_get_entries(mapping, index,
384                                            PAGEVEC_SIZE, pvec.pages, indices);
385                 if (!pvec.nr)
386                         break;
387                 index = indices[pvec.nr - 1] + 1;
388                 pagevec_remove_exceptionals(&pvec);
389                 check_move_unevictable_pages(pvec.pages, pvec.nr);
390                 pagevec_release(&pvec);
391                 cond_resched();
392         }
393 }
394 
395 /*
396  * Remove range of pages and swap entries from radix tree, and free them.
397  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
398  */
399 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
400                                                                  bool unfalloc)
401 {
402         struct address_space *mapping = inode->i_mapping;
403         struct shmem_inode_info *info = SHMEM_I(inode);
404         pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
405         pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
406         unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
407         unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
408         struct pagevec pvec;
409         pgoff_t indices[PAGEVEC_SIZE];
410         long nr_swaps_freed = 0;
411         pgoff_t index;
412         int i;
413 
414         if (lend == -1)
415                 end = -1;       /* unsigned, so actually very big */
416 
417         pagevec_init(&pvec, 0);
418         index = start;
419         while (index < end) {
420                 pvec.nr = find_get_entries(mapping, index,
421                         min(end - index, (pgoff_t)PAGEVEC_SIZE),
422                         pvec.pages, indices);
423                 if (!pvec.nr)
424                         break;
425                 for (i = 0; i < pagevec_count(&pvec); i++) {
426                         struct page *page = pvec.pages[i];
427 
428                         index = indices[i];
429                         if (index >= end)
430                                 break;
431 
432                         if (radix_tree_exceptional_entry(page)) {
433                                 if (unfalloc)
434                                         continue;
435                                 nr_swaps_freed += !shmem_free_swap(mapping,
436                                                                 index, page);
437                                 continue;
438                         }
439 
440                         if (!trylock_page(page))
441                                 continue;
442                         if (!unfalloc || !PageUptodate(page)) {
443                                 if (page->mapping == mapping) {
444                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
445                                         truncate_inode_page(mapping, page);
446                                 }
447                         }
448                         unlock_page(page);
449                 }
450                 pagevec_remove_exceptionals(&pvec);
451                 pagevec_release(&pvec);
452                 cond_resched();
453                 index++;
454         }
455 
456         if (partial_start) {
457                 struct page *page = NULL;
458                 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
459                 if (page) {
460                         unsigned int top = PAGE_CACHE_SIZE;
461                         if (start > end) {
462                                 top = partial_end;
463                                 partial_end = 0;
464                         }
465                         zero_user_segment(page, partial_start, top);
466                         set_page_dirty(page);
467                         unlock_page(page);
468                         page_cache_release(page);
469                 }
470         }
471         if (partial_end) {
472                 struct page *page = NULL;
473                 shmem_getpage(inode, end, &page, SGP_READ, NULL);
474                 if (page) {
475                         zero_user_segment(page, 0, partial_end);
476                         set_page_dirty(page);
477                         unlock_page(page);
478                         page_cache_release(page);
479                 }
480         }
481         if (start >= end)
482                 return;
483 
484         index = start;
485         while (index < end) {
486                 cond_resched();
487 
488                 pvec.nr = find_get_entries(mapping, index,
489                                 min(end - index, (pgoff_t)PAGEVEC_SIZE),
490                                 pvec.pages, indices);
491                 if (!pvec.nr) {
492                         /* If all gone or hole-punch or unfalloc, we're done */
493                         if (index == start || end != -1)
494                                 break;
495                         /* But if truncating, restart to make sure all gone */
496                         index = start;
497                         continue;
498                 }
499                 for (i = 0; i < pagevec_count(&pvec); i++) {
500                         struct page *page = pvec.pages[i];
501 
502                         index = indices[i];
503                         if (index >= end)
504                                 break;
505 
506                         if (radix_tree_exceptional_entry(page)) {
507                                 if (unfalloc)
508                                         continue;
509                                 if (shmem_free_swap(mapping, index, page)) {
510                                         /* Swap was replaced by page: retry */
511                                         index--;
512                                         break;
513                                 }
514                                 nr_swaps_freed++;
515                                 continue;
516                         }
517 
518                         lock_page(page);
519                         if (!unfalloc || !PageUptodate(page)) {
520                                 if (page->mapping == mapping) {
521                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
522                                         truncate_inode_page(mapping, page);
523                                 } else {
524                                         /* Page was replaced by swap: retry */
525                                         unlock_page(page);
526                                         index--;
527                                         break;
528                                 }
529                         }
530                         unlock_page(page);
531                 }
532                 pagevec_remove_exceptionals(&pvec);
533                 pagevec_release(&pvec);
534                 index++;
535         }
536 
537         spin_lock(&info->lock);
538         info->swapped -= nr_swaps_freed;
539         shmem_recalc_inode(inode);
540         spin_unlock(&info->lock);
541 }
542 
543 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
544 {
545         shmem_undo_range(inode, lstart, lend, false);
546         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
547 }
548 EXPORT_SYMBOL_GPL(shmem_truncate_range);
549 
550 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
551 {
552         struct inode *inode = dentry->d_inode;
553         struct shmem_inode_info *info = SHMEM_I(inode);
554         int error;
555 
556         error = inode_change_ok(inode, attr);
557         if (error)
558                 return error;
559 
560         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
561                 loff_t oldsize = inode->i_size;
562                 loff_t newsize = attr->ia_size;
563 
564                 /* protected by i_mutex */
565                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
566                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
567                         return -EPERM;
568 
569                 if (newsize != oldsize) {
570                         error = shmem_reacct_size(SHMEM_I(inode)->flags,
571                                         oldsize, newsize);
572                         if (error)
573                                 return error;
574                         i_size_write(inode, newsize);
575                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
576                 }
577                 if (newsize < oldsize) {
578                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
579                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
580                         shmem_truncate_range(inode, newsize, (loff_t)-1);
581                         /* unmap again to remove racily COWed private pages */
582                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
583                 }
584         }
585 
586         setattr_copy(inode, attr);
587         if (attr->ia_valid & ATTR_MODE)
588                 error = posix_acl_chmod(inode, inode->i_mode);
589         return error;
590 }
591 
592 static void shmem_evict_inode(struct inode *inode)
593 {
594         struct shmem_inode_info *info = SHMEM_I(inode);
595 
596         if (inode->i_mapping->a_ops == &shmem_aops) {
597                 shmem_unacct_size(info->flags, inode->i_size);
598                 inode->i_size = 0;
599                 shmem_truncate_range(inode, 0, (loff_t)-1);
600                 if (!list_empty(&info->swaplist)) {
601                         mutex_lock(&shmem_swaplist_mutex);
602                         list_del_init(&info->swaplist);
603                         mutex_unlock(&shmem_swaplist_mutex);
604                 }
605         } else
606                 kfree(info->symlink);
607 
608         simple_xattrs_free(&info->xattrs);
609         WARN_ON(inode->i_blocks);
610         shmem_free_inode(inode->i_sb);
611         clear_inode(inode);
612 }
613 
614 /*
615  * If swap found in inode, free it and move page from swapcache to filecache.
616  */
617 static int shmem_unuse_inode(struct shmem_inode_info *info,
618                              swp_entry_t swap, struct page **pagep)
619 {
620         struct address_space *mapping = info->vfs_inode.i_mapping;
621         void *radswap;
622         pgoff_t index;
623         gfp_t gfp;
624         int error = 0;
625 
626         radswap = swp_to_radix_entry(swap);
627         index = radix_tree_locate_item(&mapping->page_tree, radswap);
628         if (index == -1)
629                 return -EAGAIN; /* tell shmem_unuse we found nothing */
630 
631         /*
632          * Move _head_ to start search for next from here.
633          * But be careful: shmem_evict_inode checks list_empty without taking
634          * mutex, and there's an instant in list_move_tail when info->swaplist
635          * would appear empty, if it were the only one on shmem_swaplist.
636          */
637         if (shmem_swaplist.next != &info->swaplist)
638                 list_move_tail(&shmem_swaplist, &info->swaplist);
639 
640         gfp = mapping_gfp_mask(mapping);
641         if (shmem_should_replace_page(*pagep, gfp)) {
642                 mutex_unlock(&shmem_swaplist_mutex);
643                 error = shmem_replace_page(pagep, gfp, info, index);
644                 mutex_lock(&shmem_swaplist_mutex);
645                 /*
646                  * We needed to drop mutex to make that restrictive page
647                  * allocation, but the inode might have been freed while we
648                  * dropped it: although a racing shmem_evict_inode() cannot
649                  * complete without emptying the radix_tree, our page lock
650                  * on this swapcache page is not enough to prevent that -
651                  * free_swap_and_cache() of our swap entry will only
652                  * trylock_page(), removing swap from radix_tree whatever.
653                  *
654                  * We must not proceed to shmem_add_to_page_cache() if the
655                  * inode has been freed, but of course we cannot rely on
656                  * inode or mapping or info to check that.  However, we can
657                  * safely check if our swap entry is still in use (and here
658                  * it can't have got reused for another page): if it's still
659                  * in use, then the inode cannot have been freed yet, and we
660                  * can safely proceed (if it's no longer in use, that tells
661                  * nothing about the inode, but we don't need to unuse swap).
662                  */
663                 if (!page_swapcount(*pagep))
664                         error = -ENOENT;
665         }
666 
667         /*
668          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
669          * but also to hold up shmem_evict_inode(): so inode cannot be freed
670          * beneath us (pagelock doesn't help until the page is in pagecache).
671          */
672         if (!error)
673                 error = shmem_add_to_page_cache(*pagep, mapping, index,
674                                                 radswap);
675         if (error != -ENOMEM) {
676                 /*
677                  * Truncation and eviction use free_swap_and_cache(), which
678                  * only does trylock page: if we raced, best clean up here.
679                  */
680                 delete_from_swap_cache(*pagep);
681                 set_page_dirty(*pagep);
682                 if (!error) {
683                         spin_lock(&info->lock);
684                         info->swapped--;
685                         spin_unlock(&info->lock);
686                         swap_free(swap);
687                 }
688         }
689         return error;
690 }
691 
692 /*
693  * Search through swapped inodes to find and replace swap by page.
694  */
695 int shmem_unuse(swp_entry_t swap, struct page *page)
696 {
697         struct list_head *this, *next;
698         struct shmem_inode_info *info;
699         struct mem_cgroup *memcg;
700         int error = 0;
701 
702         /*
703          * There's a faint possibility that swap page was replaced before
704          * caller locked it: caller will come back later with the right page.
705          */
706         if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
707                 goto out;
708 
709         /*
710          * Charge page using GFP_KERNEL while we can wait, before taking
711          * the shmem_swaplist_mutex which might hold up shmem_writepage().
712          * Charged back to the user (not to caller) when swap account is used.
713          */
714         error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg);
715         if (error)
716                 goto out;
717         /* No radix_tree_preload: swap entry keeps a place for page in tree */
718         error = -EAGAIN;
719 
720         mutex_lock(&shmem_swaplist_mutex);
721         list_for_each_safe(this, next, &shmem_swaplist) {
722                 info = list_entry(this, struct shmem_inode_info, swaplist);
723                 if (info->swapped)
724                         error = shmem_unuse_inode(info, swap, &page);
725                 else
726                         list_del_init(&info->swaplist);
727                 cond_resched();
728                 if (error != -EAGAIN)
729                         break;
730                 /* found nothing in this: move on to search the next */
731         }
732         mutex_unlock(&shmem_swaplist_mutex);
733 
734         if (error) {
735                 if (error != -ENOMEM)
736                         error = 0;
737                 mem_cgroup_cancel_charge(page, memcg);
738         } else
739                 mem_cgroup_commit_charge(page, memcg, true);
740 out:
741         unlock_page(page);
742         page_cache_release(page);
743         return error;
744 }
745 
746 /*
747  * Move the page from the page cache to the swap cache.
748  */
749 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
750 {
751         struct shmem_inode_info *info;
752         struct address_space *mapping;
753         struct inode *inode;
754         swp_entry_t swap;
755         pgoff_t index;
756 
757         BUG_ON(!PageLocked(page));
758         mapping = page->mapping;
759         index = page->index;
760         inode = mapping->host;
761         info = SHMEM_I(inode);
762         if (info->flags & VM_LOCKED)
763                 goto redirty;
764         if (!total_swap_pages)
765                 goto redirty;
766 
767         /*
768          * shmem_backing_dev_info's capabilities prevent regular writeback or
769          * sync from ever calling shmem_writepage; but a stacking filesystem
770          * might use ->writepage of its underlying filesystem, in which case
771          * tmpfs should write out to swap only in response to memory pressure,
772          * and not for the writeback threads or sync.
773          */
774         if (!wbc->for_reclaim) {
775                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
776                 goto redirty;
777         }
778 
779         /*
780          * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
781          * value into swapfile.c, the only way we can correctly account for a
782          * fallocated page arriving here is now to initialize it and write it.
783          *
784          * That's okay for a page already fallocated earlier, but if we have
785          * not yet completed the fallocation, then (a) we want to keep track
786          * of this page in case we have to undo it, and (b) it may not be a
787          * good idea to continue anyway, once we're pushing into swap.  So
788          * reactivate the page, and let shmem_fallocate() quit when too many.
789          */
790         if (!PageUptodate(page)) {
791                 if (inode->i_private) {
792                         struct shmem_falloc *shmem_falloc;
793                         spin_lock(&inode->i_lock);
794                         shmem_falloc = inode->i_private;
795                         if (shmem_falloc &&
796                             !shmem_falloc->waitq &&
797                             index >= shmem_falloc->start &&
798                             index < shmem_falloc->next)
799                                 shmem_falloc->nr_unswapped++;
800                         else
801                                 shmem_falloc = NULL;
802                         spin_unlock(&inode->i_lock);
803                         if (shmem_falloc)
804                                 goto redirty;
805                 }
806                 clear_highpage(page);
807                 flush_dcache_page(page);
808                 SetPageUptodate(page);
809         }
810 
811         swap = get_swap_page();
812         if (!swap.val)
813                 goto redirty;
814 
815         /*
816          * Add inode to shmem_unuse()'s list of swapped-out inodes,
817          * if it's not already there.  Do it now before the page is
818          * moved to swap cache, when its pagelock no longer protects
819          * the inode from eviction.  But don't unlock the mutex until
820          * we've incremented swapped, because shmem_unuse_inode() will
821          * prune a !swapped inode from the swaplist under this mutex.
822          */
823         mutex_lock(&shmem_swaplist_mutex);
824         if (list_empty(&info->swaplist))
825                 list_add_tail(&info->swaplist, &shmem_swaplist);
826 
827         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
828                 swap_shmem_alloc(swap);
829                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
830 
831                 spin_lock(&info->lock);
832                 info->swapped++;
833                 shmem_recalc_inode(inode);
834                 spin_unlock(&info->lock);
835 
836                 mutex_unlock(&shmem_swaplist_mutex);
837                 BUG_ON(page_mapped(page));
838                 swap_writepage(page, wbc);
839                 return 0;
840         }
841 
842         mutex_unlock(&shmem_swaplist_mutex);
843         swapcache_free(swap);
844 redirty:
845         set_page_dirty(page);
846         if (wbc->for_reclaim)
847                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
848         unlock_page(page);
849         return 0;
850 }
851 
852 #ifdef CONFIG_NUMA
853 #ifdef CONFIG_TMPFS
854 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
855 {
856         char buffer[64];
857 
858         if (!mpol || mpol->mode == MPOL_DEFAULT)
859                 return;         /* show nothing */
860 
861         mpol_to_str(buffer, sizeof(buffer), mpol);
862 
863         seq_printf(seq, ",mpol=%s", buffer);
864 }
865 
866 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
867 {
868         struct mempolicy *mpol = NULL;
869         if (sbinfo->mpol) {
870                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
871                 mpol = sbinfo->mpol;
872                 mpol_get(mpol);
873                 spin_unlock(&sbinfo->stat_lock);
874         }
875         return mpol;
876 }
877 #endif /* CONFIG_TMPFS */
878 
879 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
880                         struct shmem_inode_info *info, pgoff_t index)
881 {
882         struct vm_area_struct pvma;
883         struct page *page;
884 
885         /* Create a pseudo vma that just contains the policy */
886         pvma.vm_start = 0;
887         /* Bias interleave by inode number to distribute better across nodes */
888         pvma.vm_pgoff = index + info->vfs_inode.i_ino;
889         pvma.vm_ops = NULL;
890         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
891 
892         page = swapin_readahead(swap, gfp, &pvma, 0);
893 
894         /* Drop reference taken by mpol_shared_policy_lookup() */
895         mpol_cond_put(pvma.vm_policy);
896 
897         return page;
898 }
899 
900 static struct page *shmem_alloc_page(gfp_t gfp,
901                         struct shmem_inode_info *info, pgoff_t index)
902 {
903         struct vm_area_struct pvma;
904         struct page *page;
905 
906         /* Create a pseudo vma that just contains the policy */
907         pvma.vm_start = 0;
908         /* Bias interleave by inode number to distribute better across nodes */
909         pvma.vm_pgoff = index + info->vfs_inode.i_ino;
910         pvma.vm_ops = NULL;
911         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
912 
913         page = alloc_page_vma(gfp, &pvma, 0);
914 
915         /* Drop reference taken by mpol_shared_policy_lookup() */
916         mpol_cond_put(pvma.vm_policy);
917 
918         return page;
919 }
920 #else /* !CONFIG_NUMA */
921 #ifdef CONFIG_TMPFS
922 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
923 {
924 }
925 #endif /* CONFIG_TMPFS */
926 
927 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
928                         struct shmem_inode_info *info, pgoff_t index)
929 {
930         return swapin_readahead(swap, gfp, NULL, 0);
931 }
932 
933 static inline struct page *shmem_alloc_page(gfp_t gfp,
934                         struct shmem_inode_info *info, pgoff_t index)
935 {
936         return alloc_page(gfp);
937 }
938 #endif /* CONFIG_NUMA */
939 
940 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
941 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
942 {
943         return NULL;
944 }
945 #endif
946 
947 /*
948  * When a page is moved from swapcache to shmem filecache (either by the
949  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
950  * shmem_unuse_inode()), it may have been read in earlier from swap, in
951  * ignorance of the mapping it belongs to.  If that mapping has special
952  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
953  * we may need to copy to a suitable page before moving to filecache.
954  *
955  * In a future release, this may well be extended to respect cpuset and
956  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
957  * but for now it is a simple matter of zone.
958  */
959 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
960 {
961         return page_zonenum(page) > gfp_zone(gfp);
962 }
963 
964 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
965                                 struct shmem_inode_info *info, pgoff_t index)
966 {
967         struct page *oldpage, *newpage;
968         struct address_space *swap_mapping;
969         pgoff_t swap_index;
970         int error;
971 
972         oldpage = *pagep;
973         swap_index = page_private(oldpage);
974         swap_mapping = page_mapping(oldpage);
975 
976         /*
977          * We have arrived here because our zones are constrained, so don't
978          * limit chance of success by further cpuset and node constraints.
979          */
980         gfp &= ~GFP_CONSTRAINT_MASK;
981         newpage = shmem_alloc_page(gfp, info, index);
982         if (!newpage)
983                 return -ENOMEM;
984 
985         page_cache_get(newpage);
986         copy_highpage(newpage, oldpage);
987         flush_dcache_page(newpage);
988 
989         __set_page_locked(newpage);
990         SetPageUptodate(newpage);
991         SetPageSwapBacked(newpage);
992         set_page_private(newpage, swap_index);
993         SetPageSwapCache(newpage);
994 
995         /*
996          * Our caller will very soon move newpage out of swapcache, but it's
997          * a nice clean interface for us to replace oldpage by newpage there.
998          */
999         spin_lock_irq(&swap_mapping->tree_lock);
1000         error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1001                                                                    newpage);
1002         if (!error) {
1003                 __inc_zone_page_state(newpage, NR_FILE_PAGES);
1004                 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
1005         }
1006         spin_unlock_irq(&swap_mapping->tree_lock);
1007 
1008         if (unlikely(error)) {
1009                 /*
1010                  * Is this possible?  I think not, now that our callers check
1011                  * both PageSwapCache and page_private after getting page lock;
1012                  * but be defensive.  Reverse old to newpage for clear and free.
1013                  */
1014                 oldpage = newpage;
1015         } else {
1016                 mem_cgroup_migrate(oldpage, newpage, false);
1017                 lru_cache_add_anon(newpage);
1018                 *pagep = newpage;
1019         }
1020 
1021         ClearPageSwapCache(oldpage);
1022         set_page_private(oldpage, 0);
1023 
1024         unlock_page(oldpage);
1025         page_cache_release(oldpage);
1026         page_cache_release(oldpage);
1027         return error;
1028 }
1029 
1030 /*
1031  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1032  *
1033  * If we allocate a new one we do not mark it dirty. That's up to the
1034  * vm. If we swap it in we mark it dirty since we also free the swap
1035  * entry since a page cannot live in both the swap and page cache
1036  */
1037 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1038         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1039 {
1040         struct address_space *mapping = inode->i_mapping;
1041         struct shmem_inode_info *info;
1042         struct shmem_sb_info *sbinfo;
1043         struct mem_cgroup *memcg;
1044         struct page *page;
1045         swp_entry_t swap;
1046         int error;
1047         int once = 0;
1048         int alloced = 0;
1049 
1050         if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
1051                 return -EFBIG;
1052 repeat:
1053         swap.val = 0;
1054         page = find_lock_entry(mapping, index);
1055         if (radix_tree_exceptional_entry(page)) {
1056                 swap = radix_to_swp_entry(page);
1057                 page = NULL;
1058         }
1059 
1060         if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1061             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1062                 error = -EINVAL;
1063                 goto failed;
1064         }
1065 
1066         if (page && sgp == SGP_WRITE)
1067                 mark_page_accessed(page);
1068 
1069         /* fallocated page? */
1070         if (page && !PageUptodate(page)) {
1071                 if (sgp != SGP_READ)
1072                         goto clear;
1073                 unlock_page(page);
1074                 page_cache_release(page);
1075                 page = NULL;
1076         }
1077         if (page || (sgp == SGP_READ && !swap.val)) {
1078                 *pagep = page;
1079                 return 0;
1080         }
1081 
1082         /*
1083          * Fast cache lookup did not find it:
1084          * bring it back from swap or allocate.
1085          */
1086         info = SHMEM_I(inode);
1087         sbinfo = SHMEM_SB(inode->i_sb);
1088 
1089         if (swap.val) {
1090                 /* Look it up and read it in.. */
1091                 page = lookup_swap_cache(swap);
1092                 if (!page) {
1093                         /* here we actually do the io */
1094                         if (fault_type)
1095                                 *fault_type |= VM_FAULT_MAJOR;
1096                         page = shmem_swapin(swap, gfp, info, index);
1097                         if (!page) {
1098                                 error = -ENOMEM;
1099                                 goto failed;
1100                         }
1101                 }
1102 
1103                 /* We have to do this with page locked to prevent races */
1104                 lock_page(page);
1105                 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1106                     !shmem_confirm_swap(mapping, index, swap)) {
1107                         error = -EEXIST;        /* try again */
1108                         goto unlock;
1109                 }
1110                 if (!PageUptodate(page)) {
1111                         error = -EIO;
1112                         goto failed;
1113                 }
1114                 wait_on_page_writeback(page);
1115 
1116                 if (shmem_should_replace_page(page, gfp)) {
1117                         error = shmem_replace_page(&page, gfp, info, index);
1118                         if (error)
1119                                 goto failed;
1120                 }
1121 
1122                 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg);
1123                 if (!error) {
1124                         error = shmem_add_to_page_cache(page, mapping, index,
1125                                                 swp_to_radix_entry(swap));
1126                         /*
1127                          * We already confirmed swap under page lock, and make
1128                          * no memory allocation here, so usually no possibility
1129                          * of error; but free_swap_and_cache() only trylocks a
1130                          * page, so it is just possible that the entry has been
1131                          * truncated or holepunched since swap was confirmed.
1132                          * shmem_undo_range() will have done some of the
1133                          * unaccounting, now delete_from_swap_cache() will do
1134                          * the rest (including mem_cgroup_uncharge_swapcache).
1135                          * Reset swap.val? No, leave it so "failed" goes back to
1136                          * "repeat": reading a hole and writing should succeed.
1137                          */
1138                         if (error) {
1139                                 mem_cgroup_cancel_charge(page, memcg);
1140                                 delete_from_swap_cache(page);
1141                         }
1142                 }
1143                 if (error)
1144                         goto failed;
1145 
1146                 mem_cgroup_commit_charge(page, memcg, true);
1147 
1148                 spin_lock(&info->lock);
1149                 info->swapped--;
1150                 shmem_recalc_inode(inode);
1151                 spin_unlock(&info->lock);
1152 
1153                 if (sgp == SGP_WRITE)
1154                         mark_page_accessed(page);
1155 
1156                 delete_from_swap_cache(page);
1157                 set_page_dirty(page);
1158                 swap_free(swap);
1159 
1160         } else {
1161                 if (shmem_acct_block(info->flags)) {
1162                         error = -ENOSPC;
1163                         goto failed;
1164                 }
1165                 if (sbinfo->max_blocks) {
1166                         if (percpu_counter_compare(&sbinfo->used_blocks,
1167                                                 sbinfo->max_blocks) >= 0) {
1168                                 error = -ENOSPC;
1169                                 goto unacct;
1170                         }
1171                         percpu_counter_inc(&sbinfo->used_blocks);
1172                 }
1173 
1174                 page = shmem_alloc_page(gfp, info, index);
1175                 if (!page) {
1176                         error = -ENOMEM;
1177                         goto decused;
1178                 }
1179 
1180                 __SetPageSwapBacked(page);
1181                 __set_page_locked(page);
1182                 if (sgp == SGP_WRITE)
1183                         __SetPageReferenced(page);
1184 
1185                 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg);
1186                 if (error)
1187                         goto decused;
1188                 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
1189                 if (!error) {
1190                         error = shmem_add_to_page_cache(page, mapping, index,
1191                                                         NULL);
1192                         radix_tree_preload_end();
1193                 }
1194                 if (error) {
1195                         mem_cgroup_cancel_charge(page, memcg);
1196                         goto decused;
1197                 }
1198                 mem_cgroup_commit_charge(page, memcg, false);
1199                 lru_cache_add_anon(page);
1200 
1201                 spin_lock(&info->lock);
1202                 info->alloced++;
1203                 inode->i_blocks += BLOCKS_PER_PAGE;
1204                 shmem_recalc_inode(inode);
1205                 spin_unlock(&info->lock);
1206                 alloced = true;
1207 
1208                 /*
1209                  * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1210                  */
1211                 if (sgp == SGP_FALLOC)
1212                         sgp = SGP_WRITE;
1213 clear:
1214                 /*
1215                  * Let SGP_WRITE caller clear ends if write does not fill page;
1216                  * but SGP_FALLOC on a page fallocated earlier must initialize
1217                  * it now, lest undo on failure cancel our earlier guarantee.
1218                  */
1219                 if (sgp != SGP_WRITE) {
1220                         clear_highpage(page);
1221                         flush_dcache_page(page);
1222                         SetPageUptodate(page);
1223                 }
1224                 if (sgp == SGP_DIRTY)
1225                         set_page_dirty(page);
1226         }
1227 
1228         /* Perhaps the file has been truncated since we checked */
1229         if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1230             ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1231                 error = -EINVAL;
1232                 if (alloced)
1233                         goto trunc;
1234                 else
1235                         goto failed;
1236         }
1237         *pagep = page;
1238         return 0;
1239 
1240         /*
1241          * Error recovery.
1242          */
1243 trunc:
1244         info = SHMEM_I(inode);
1245         ClearPageDirty(page);
1246         delete_from_page_cache(page);
1247         spin_lock(&info->lock);
1248         info->alloced--;
1249         inode->i_blocks -= BLOCKS_PER_PAGE;
1250         spin_unlock(&info->lock);
1251 decused:
1252         sbinfo = SHMEM_SB(inode->i_sb);
1253         if (sbinfo->max_blocks)
1254                 percpu_counter_add(&sbinfo->used_blocks, -1);
1255 unacct:
1256         shmem_unacct_blocks(info->flags, 1);
1257 failed:
1258         if (swap.val && error != -EINVAL &&
1259             !shmem_confirm_swap(mapping, index, swap))
1260                 error = -EEXIST;
1261 unlock:
1262         if (page) {
1263                 unlock_page(page);
1264                 page_cache_release(page);
1265         }
1266         if (error == -ENOSPC && !once++) {
1267                 info = SHMEM_I(inode);
1268                 spin_lock(&info->lock);
1269                 shmem_recalc_inode(inode);
1270                 spin_unlock(&info->lock);
1271                 goto repeat;
1272         }
1273         if (error == -EEXIST)   /* from above or from radix_tree_insert */
1274                 goto repeat;
1275         return error;
1276 }
1277 
1278 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1279 {
1280         struct inode *inode = file_inode(vma->vm_file);
1281         int error;
1282         int ret = VM_FAULT_LOCKED;
1283 
1284         /*
1285          * Trinity finds that probing a hole which tmpfs is punching can
1286          * prevent the hole-punch from ever completing: which in turn
1287          * locks writers out with its hold on i_mutex.  So refrain from
1288          * faulting pages into the hole while it's being punched.  Although
1289          * shmem_undo_range() does remove the additions, it may be unable to
1290          * keep up, as each new page needs its own unmap_mapping_range() call,
1291          * and the i_mmap tree grows ever slower to scan if new vmas are added.
1292          *
1293          * It does not matter if we sometimes reach this check just before the
1294          * hole-punch begins, so that one fault then races with the punch:
1295          * we just need to make racing faults a rare case.
1296          *
1297          * The implementation below would be much simpler if we just used a
1298          * standard mutex or completion: but we cannot take i_mutex in fault,
1299          * and bloating every shmem inode for this unlikely case would be sad.
1300          */
1301         if (unlikely(inode->i_private)) {
1302                 struct shmem_falloc *shmem_falloc;
1303 
1304                 spin_lock(&inode->i_lock);
1305                 shmem_falloc = inode->i_private;
1306                 if (shmem_falloc &&
1307                     shmem_falloc->waitq &&
1308                     vmf->pgoff >= shmem_falloc->start &&
1309                     vmf->pgoff < shmem_falloc->next) {
1310                         wait_queue_head_t *shmem_falloc_waitq;
1311                         DEFINE_WAIT(shmem_fault_wait);
1312 
1313                         ret = VM_FAULT_NOPAGE;
1314                         if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1315                            !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1316                                 /* It's polite to up mmap_sem if we can */
1317                                 up_read(&vma->vm_mm->mmap_sem);
1318                                 ret = VM_FAULT_RETRY;
1319                         }
1320 
1321                         shmem_falloc_waitq = shmem_falloc->waitq;
1322                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1323                                         TASK_UNINTERRUPTIBLE);
1324                         spin_unlock(&inode->i_lock);
1325                         schedule();
1326 
1327                         /*
1328                          * shmem_falloc_waitq points into the shmem_fallocate()
1329                          * stack of the hole-punching task: shmem_falloc_waitq
1330                          * is usually invalid by the time we reach here, but
1331                          * finish_wait() does not dereference it in that case;
1332                          * though i_lock needed lest racing with wake_up_all().
1333                          */
1334                         spin_lock(&inode->i_lock);
1335                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1336                         spin_unlock(&inode->i_lock);
1337                         return ret;
1338                 }
1339                 spin_unlock(&inode->i_lock);
1340         }
1341 
1342         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1343         if (error)
1344                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1345 
1346         if (ret & VM_FAULT_MAJOR) {
1347                 count_vm_event(PGMAJFAULT);
1348                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1349         }
1350         return ret;
1351 }
1352 
1353 #ifdef CONFIG_NUMA
1354 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1355 {
1356         struct inode *inode = file_inode(vma->vm_file);
1357         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1358 }
1359 
1360 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1361                                           unsigned long addr)
1362 {
1363         struct inode *inode = file_inode(vma->vm_file);
1364         pgoff_t index;
1365 
1366         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1367         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1368 }
1369 #endif
1370 
1371 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1372 {
1373         struct inode *inode = file_inode(file);
1374         struct shmem_inode_info *info = SHMEM_I(inode);
1375         int retval = -ENOMEM;
1376 
1377         spin_lock(&info->lock);
1378         if (lock && !(info->flags & VM_LOCKED)) {
1379                 if (!user_shm_lock(inode->i_size, user))
1380                         goto out_nomem;
1381                 info->flags |= VM_LOCKED;
1382                 mapping_set_unevictable(file->f_mapping);
1383         }
1384         if (!lock && (info->flags & VM_LOCKED) && user) {
1385                 user_shm_unlock(inode->i_size, user);
1386                 info->flags &= ~VM_LOCKED;
1387                 mapping_clear_unevictable(file->f_mapping);
1388         }
1389         retval = 0;
1390 
1391 out_nomem:
1392         spin_unlock(&info->lock);
1393         return retval;
1394 }
1395 
1396 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1397 {
1398         file_accessed(file);
1399         vma->vm_ops = &shmem_vm_ops;
1400         return 0;
1401 }
1402 
1403 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1404                                      umode_t mode, dev_t dev, unsigned long flags)
1405 {
1406         struct inode *inode;
1407         struct shmem_inode_info *info;
1408         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1409 
1410         if (shmem_reserve_inode(sb))
1411                 return NULL;
1412 
1413         inode = new_inode(sb);
1414         if (inode) {
1415                 inode->i_ino = get_next_ino();
1416                 inode_init_owner(inode, dir, mode);
1417                 inode->i_blocks = 0;
1418                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1419                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1420                 inode->i_generation = get_seconds();
1421                 info = SHMEM_I(inode);
1422                 memset(info, 0, (char *)inode - (char *)info);
1423                 spin_lock_init(&info->lock);
1424                 info->seals = F_SEAL_SEAL;
1425                 info->flags = flags & VM_NORESERVE;
1426                 INIT_LIST_HEAD(&info->swaplist);
1427                 simple_xattrs_init(&info->xattrs);
1428                 cache_no_acl(inode);
1429 
1430                 switch (mode & S_IFMT) {
1431                 default:
1432                         inode->i_op = &shmem_special_inode_operations;
1433                         init_special_inode(inode, mode, dev);
1434                         break;
1435                 case S_IFREG:
1436                         inode->i_mapping->a_ops = &shmem_aops;
1437                         inode->i_op = &shmem_inode_operations;
1438                         inode->i_fop = &shmem_file_operations;
1439                         mpol_shared_policy_init(&info->policy,
1440                                                  shmem_get_sbmpol(sbinfo));
1441                         break;
1442                 case S_IFDIR:
1443                         inc_nlink(inode);
1444                         /* Some things misbehave if size == 0 on a directory */
1445                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1446                         inode->i_op = &shmem_dir_inode_operations;
1447                         inode->i_fop = &simple_dir_operations;
1448                         break;
1449                 case S_IFLNK:
1450                         /*
1451                          * Must not load anything in the rbtree,
1452                          * mpol_free_shared_policy will not be called.
1453                          */
1454                         mpol_shared_policy_init(&info->policy, NULL);
1455                         break;
1456                 }
1457         } else
1458                 shmem_free_inode(sb);
1459         return inode;
1460 }
1461 
1462 bool shmem_mapping(struct address_space *mapping)
1463 {
1464         return mapping->backing_dev_info == &shmem_backing_dev_info;
1465 }
1466 
1467 #ifdef CONFIG_TMPFS
1468 static const struct inode_operations shmem_symlink_inode_operations;
1469 static const struct inode_operations shmem_short_symlink_operations;
1470 
1471 #ifdef CONFIG_TMPFS_XATTR
1472 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
1473 #else
1474 #define shmem_initxattrs NULL
1475 #endif
1476 
1477 static int
1478 shmem_write_begin(struct file *file, struct address_space *mapping,
1479                         loff_t pos, unsigned len, unsigned flags,
1480                         struct page **pagep, void **fsdata)
1481 {
1482         struct inode *inode = mapping->host;
1483         struct shmem_inode_info *info = SHMEM_I(inode);
1484         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1485 
1486         /* i_mutex is held by caller */
1487         if (unlikely(info->seals)) {
1488                 if (info->seals & F_SEAL_WRITE)
1489                         return -EPERM;
1490                 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
1491                         return -EPERM;
1492         }
1493 
1494         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1495 }
1496 
1497 static int
1498 shmem_write_end(struct file *file, struct address_space *mapping,
1499                         loff_t pos, unsigned len, unsigned copied,
1500                         struct page *page, void *fsdata)
1501 {
1502         struct inode *inode = mapping->host;
1503 
1504         if (pos + copied > inode->i_size)
1505                 i_size_write(inode, pos + copied);
1506 
1507         if (!PageUptodate(page)) {
1508                 if (copied < PAGE_CACHE_SIZE) {
1509                         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1510                         zero_user_segments(page, 0, from,
1511                                         from + copied, PAGE_CACHE_SIZE);
1512                 }
1513                 SetPageUptodate(page);
1514         }
1515         set_page_dirty(page);
1516         unlock_page(page);
1517         page_cache_release(page);
1518 
1519         return copied;
1520 }
1521 
1522 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1523 {
1524         struct file *file = iocb->ki_filp;
1525         struct inode *inode = file_inode(file);
1526         struct address_space *mapping = inode->i_mapping;
1527         pgoff_t index;
1528         unsigned long offset;
1529         enum sgp_type sgp = SGP_READ;
1530         int error = 0;
1531         ssize_t retval = 0;
1532         loff_t *ppos = &iocb->ki_pos;
1533 
1534         /*
1535          * Might this read be for a stacking filesystem?  Then when reading
1536          * holes of a sparse file, we actually need to allocate those pages,
1537          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1538          */
1539         if (segment_eq(get_fs(), KERNEL_DS))
1540                 sgp = SGP_DIRTY;
1541 
1542         index = *ppos >> PAGE_CACHE_SHIFT;
1543         offset = *ppos & ~PAGE_CACHE_MASK;
1544 
1545         for (;;) {
1546                 struct page *page = NULL;
1547                 pgoff_t end_index;
1548                 unsigned long nr, ret;
1549                 loff_t i_size = i_size_read(inode);
1550 
1551                 end_index = i_size >> PAGE_CACHE_SHIFT;
1552                 if (index > end_index)
1553                         break;
1554                 if (index == end_index) {
1555                         nr = i_size & ~PAGE_CACHE_MASK;
1556                         if (nr <= offset)
1557                                 break;
1558                 }
1559 
1560                 error = shmem_getpage(inode, index, &page, sgp, NULL);
1561                 if (error) {
1562                         if (error == -EINVAL)
1563                                 error = 0;
1564                         break;
1565                 }
1566                 if (page)
1567                         unlock_page(page);
1568 
1569                 /*
1570                  * We must evaluate after, since reads (unlike writes)
1571                  * are called without i_mutex protection against truncate
1572                  */
1573                 nr = PAGE_CACHE_SIZE;
1574                 i_size = i_size_read(inode);
1575                 end_index = i_size >> PAGE_CACHE_SHIFT;
1576                 if (index == end_index) {
1577                         nr = i_size & ~PAGE_CACHE_MASK;
1578                         if (nr <= offset) {
1579                                 if (page)
1580                                         page_cache_release(page);
1581                                 break;
1582                         }
1583                 }
1584                 nr -= offset;
1585 
1586                 if (page) {
1587                         /*
1588                          * If users can be writing to this page using arbitrary
1589                          * virtual addresses, take care about potential aliasing
1590                          * before reading the page on the kernel side.
1591                          */
1592                         if (mapping_writably_mapped(mapping))
1593                                 flush_dcache_page(page);
1594                         /*
1595                          * Mark the page accessed if we read the beginning.
1596                          */
1597                         if (!offset)
1598                                 mark_page_accessed(page);
1599                 } else {
1600                         page = ZERO_PAGE(0);
1601                         page_cache_get(page);
1602                 }
1603 
1604                 /*
1605                  * Ok, we have the page, and it's up-to-date, so
1606                  * now we can copy it to user space...
1607                  */
1608                 ret = copy_page_to_iter(page, offset, nr, to);
1609                 retval += ret;
1610                 offset += ret;
1611                 index += offset >> PAGE_CACHE_SHIFT;
1612                 offset &= ~PAGE_CACHE_MASK;
1613 
1614                 page_cache_release(page);
1615                 if (!iov_iter_count(to))
1616                         break;
1617                 if (ret < nr) {
1618                         error = -EFAULT;
1619                         break;
1620                 }
1621                 cond_resched();
1622         }
1623 
1624         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1625         file_accessed(file);
1626         return retval ? retval : error;
1627 }
1628 
1629 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1630                                 struct pipe_inode_info *pipe, size_t len,
1631                                 unsigned int flags)
1632 {
1633         struct address_space *mapping = in->f_mapping;
1634         struct inode *inode = mapping->host;
1635         unsigned int loff, nr_pages, req_pages;
1636         struct page *pages[PIPE_DEF_BUFFERS];
1637         struct partial_page partial[PIPE_DEF_BUFFERS];
1638         struct page *page;
1639         pgoff_t index, end_index;
1640         loff_t isize, left;
1641         int error, page_nr;
1642         struct splice_pipe_desc spd = {
1643                 .pages = pages,
1644                 .partial = partial,
1645                 .nr_pages_max = PIPE_DEF_BUFFERS,
1646                 .flags = flags,
1647                 .ops = &page_cache_pipe_buf_ops,
1648                 .spd_release = spd_release_page,
1649         };
1650 
1651         isize = i_size_read(inode);
1652         if (unlikely(*ppos >= isize))
1653                 return 0;
1654 
1655         left = isize - *ppos;
1656         if (unlikely(left < len))
1657                 len = left;
1658 
1659         if (splice_grow_spd(pipe, &spd))
1660                 return -ENOMEM;
1661 
1662         index = *ppos >> PAGE_CACHE_SHIFT;
1663         loff = *ppos & ~PAGE_CACHE_MASK;
1664         req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1665         nr_pages = min(req_pages, spd.nr_pages_max);
1666 
1667         spd.nr_pages = find_get_pages_contig(mapping, index,
1668                                                 nr_pages, spd.pages);
1669         index += spd.nr_pages;
1670         error = 0;
1671 
1672         while (spd.nr_pages < nr_pages) {
1673                 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1674                 if (error)
1675                         break;
1676                 unlock_page(page);
1677                 spd.pages[spd.nr_pages++] = page;
1678                 index++;
1679         }
1680 
1681         index = *ppos >> PAGE_CACHE_SHIFT;
1682         nr_pages = spd.nr_pages;
1683         spd.nr_pages = 0;
1684 
1685         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1686                 unsigned int this_len;
1687 
1688                 if (!len)
1689                         break;
1690 
1691                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1692                 page = spd.pages[page_nr];
1693 
1694                 if (!PageUptodate(page) || page->mapping != mapping) {
1695                         error = shmem_getpage(inode, index, &page,
1696                                                         SGP_CACHE, NULL);
1697                         if (error)
1698                                 break;
1699                         unlock_page(page);
1700                         page_cache_release(spd.pages[page_nr]);
1701                         spd.pages[page_nr] = page;
1702                 }
1703 
1704                 isize = i_size_read(inode);
1705                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1706                 if (unlikely(!isize || index > end_index))
1707                         break;
1708 
1709                 if (end_index == index) {
1710                         unsigned int plen;
1711 
1712                         plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1713                         if (plen <= loff)
1714                                 break;
1715 
1716                         this_len = min(this_len, plen - loff);
1717                         len = this_len;
1718                 }
1719 
1720                 spd.partial[page_nr].offset = loff;
1721                 spd.partial[page_nr].len = this_len;
1722                 len -= this_len;
1723                 loff = 0;
1724                 spd.nr_pages++;
1725                 index++;
1726         }
1727 
1728         while (page_nr < nr_pages)
1729                 page_cache_release(spd.pages[page_nr++]);
1730 
1731         if (spd.nr_pages)
1732                 error = splice_to_pipe(pipe, &spd);
1733 
1734         splice_shrink_spd(&spd);
1735 
1736         if (error > 0) {
1737                 *ppos += error;
1738                 file_accessed(in);
1739         }
1740         return error;
1741 }
1742 
1743 /*
1744  * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
1745  */
1746 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
1747                                     pgoff_t index, pgoff_t end, int whence)
1748 {
1749         struct page *page;
1750         struct pagevec pvec;
1751         pgoff_t indices[PAGEVEC_SIZE];
1752         bool done = false;
1753         int i;
1754 
1755         pagevec_init(&pvec, 0);
1756         pvec.nr = 1;            /* start small: we may be there already */
1757         while (!done) {
1758                 pvec.nr = find_get_entries(mapping, index,
1759                                         pvec.nr, pvec.pages, indices);
1760                 if (!pvec.nr) {
1761                         if (whence == SEEK_DATA)
1762                                 index = end;
1763                         break;
1764                 }
1765                 for (i = 0; i < pvec.nr; i++, index++) {
1766                         if (index < indices[i]) {
1767                                 if (whence == SEEK_HOLE) {
1768                                         done = true;
1769                                         break;
1770                                 }
1771                                 index = indices[i];
1772                         }
1773                         page = pvec.pages[i];
1774                         if (page && !radix_tree_exceptional_entry(page)) {
1775                                 if (!PageUptodate(page))
1776                                         page = NULL;
1777                         }
1778                         if (index >= end ||
1779                             (page && whence == SEEK_DATA) ||
1780                             (!page && whence == SEEK_HOLE)) {
1781                                 done = true;
1782                                 break;
1783                         }
1784                 }
1785                 pagevec_remove_exceptionals(&pvec);
1786                 pagevec_release(&pvec);
1787                 pvec.nr = PAGEVEC_SIZE;
1788                 cond_resched();
1789         }
1790         return index;
1791 }
1792 
1793 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1794 {
1795         struct address_space *mapping = file->f_mapping;
1796         struct inode *inode = mapping->host;
1797         pgoff_t start, end;
1798         loff_t new_offset;
1799 
1800         if (whence != SEEK_DATA && whence != SEEK_HOLE)
1801                 return generic_file_llseek_size(file, offset, whence,
1802                                         MAX_LFS_FILESIZE, i_size_read(inode));
1803         mutex_lock(&inode->i_mutex);
1804         /* We're holding i_mutex so we can access i_size directly */
1805 
1806         if (offset < 0)
1807                 offset = -EINVAL;
1808         else if (offset >= inode->i_size)
1809                 offset = -ENXIO;
1810         else {
1811                 start = offset >> PAGE_CACHE_SHIFT;
1812                 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1813                 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
1814                 new_offset <<= PAGE_CACHE_SHIFT;
1815                 if (new_offset > offset) {
1816                         if (new_offset < inode->i_size)
1817                                 offset = new_offset;
1818                         else if (whence == SEEK_DATA)
1819                                 offset = -ENXIO;
1820                         else
1821                                 offset = inode->i_size;
1822                 }
1823         }
1824 
1825         if (offset >= 0)
1826                 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
1827         mutex_unlock(&inode->i_mutex);
1828         return offset;
1829 }
1830 
1831 /*
1832  * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
1833  * so reuse a tag which we firmly believe is never set or cleared on shmem.
1834  */
1835 #define SHMEM_TAG_PINNED        PAGECACHE_TAG_TOWRITE
1836 #define LAST_SCAN               4       /* about 150ms max */
1837 
1838 static void shmem_tag_pins(struct address_space *mapping)
1839 {
1840         struct radix_tree_iter iter;
1841         void **slot;
1842         pgoff_t start;
1843         struct page *page;
1844 
1845         lru_add_drain();
1846         start = 0;
1847         rcu_read_lock();
1848 
1849 restart:
1850         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1851                 page = radix_tree_deref_slot(slot);
1852                 if (!page || radix_tree_exception(page)) {
1853                         if (radix_tree_deref_retry(page))
1854                                 goto restart;
1855                 } else if (page_count(page) - page_mapcount(page) > 1) {
1856                         spin_lock_irq(&mapping->tree_lock);
1857                         radix_tree_tag_set(&mapping->page_tree, iter.index,
1858                                            SHMEM_TAG_PINNED);
1859                         spin_unlock_irq(&mapping->tree_lock);
1860                 }
1861 
1862                 if (need_resched()) {
1863                         cond_resched_rcu();
1864                         start = iter.index + 1;
1865                         goto restart;
1866                 }
1867         }
1868         rcu_read_unlock();
1869 }
1870 
1871 /*
1872  * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
1873  * via get_user_pages(), drivers might have some pending I/O without any active
1874  * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
1875  * and see whether it has an elevated ref-count. If so, we tag them and wait for
1876  * them to be dropped.
1877  * The caller must guarantee that no new user will acquire writable references
1878  * to those pages to avoid races.
1879  */
1880 static int shmem_wait_for_pins(struct address_space *mapping)
1881 {
1882         struct radix_tree_iter iter;
1883         void **slot;
1884         pgoff_t start;
1885         struct page *page;
1886         int error, scan;
1887 
1888         shmem_tag_pins(mapping);
1889 
1890         error = 0;
1891         for (scan = 0; scan <= LAST_SCAN; scan++) {
1892                 if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED))
1893                         break;
1894 
1895                 if (!scan)
1896                         lru_add_drain_all();
1897                 else if (schedule_timeout_killable((HZ << scan) / 200))
1898                         scan = LAST_SCAN;
1899 
1900                 start = 0;
1901                 rcu_read_lock();
1902 restart:
1903                 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
1904                                            start, SHMEM_TAG_PINNED) {
1905 
1906                         page = radix_tree_deref_slot(slot);
1907                         if (radix_tree_exception(page)) {
1908                                 if (radix_tree_deref_retry(page))
1909                                         goto restart;
1910 
1911                                 page = NULL;
1912                         }
1913 
1914                         if (page &&
1915                             page_count(page) - page_mapcount(page) != 1) {
1916                                 if (scan < LAST_SCAN)
1917                                         goto continue_resched;
1918 
1919                                 /*
1920                                  * On the last scan, we clean up all those tags
1921                                  * we inserted; but make a note that we still
1922                                  * found pages pinned.
1923                                  */
1924                                 error = -EBUSY;
1925                         }
1926 
1927                         spin_lock_irq(&mapping->tree_lock);
1928                         radix_tree_tag_clear(&mapping->page_tree,
1929                                              iter.index, SHMEM_TAG_PINNED);
1930                         spin_unlock_irq(&mapping->tree_lock);
1931 continue_resched:
1932                         if (need_resched()) {
1933                                 cond_resched_rcu();
1934                                 start = iter.index + 1;
1935                                 goto restart;
1936                         }
1937                 }
1938                 rcu_read_unlock();
1939         }
1940 
1941         return error;
1942 }
1943 
1944 #define F_ALL_SEALS (F_SEAL_SEAL | \
1945                      F_SEAL_SHRINK | \
1946                      F_SEAL_GROW | \
1947                      F_SEAL_WRITE)
1948 
1949 int shmem_add_seals(struct file *file, unsigned int seals)
1950 {
1951         struct inode *inode = file_inode(file);
1952         struct shmem_inode_info *info = SHMEM_I(inode);
1953         int error;
1954 
1955         /*
1956          * SEALING
1957          * Sealing allows multiple parties to share a shmem-file but restrict
1958          * access to a specific subset of file operations. Seals can only be
1959          * added, but never removed. This way, mutually untrusted parties can
1960          * share common memory regions with a well-defined policy. A malicious
1961          * peer can thus never perform unwanted operations on a shared object.
1962          *
1963          * Seals are only supported on special shmem-files and always affect
1964          * the whole underlying inode. Once a seal is set, it may prevent some
1965          * kinds of access to the file. Currently, the following seals are
1966          * defined:
1967          *   SEAL_SEAL: Prevent further seals from being set on this file
1968          *   SEAL_SHRINK: Prevent the file from shrinking
1969          *   SEAL_GROW: Prevent the file from growing
1970          *   SEAL_WRITE: Prevent write access to the file
1971          *
1972          * As we don't require any trust relationship between two parties, we
1973          * must prevent seals from being removed. Therefore, sealing a file
1974          * only adds a given set of seals to the file, it never touches
1975          * existing seals. Furthermore, the "setting seals"-operation can be
1976          * sealed itself, which basically prevents any further seal from being
1977          * added.
1978          *
1979          * Semantics of sealing are only defined on volatile files. Only
1980          * anonymous shmem files support sealing. More importantly, seals are
1981          * never written to disk. Therefore, there's no plan to support it on
1982          * other file types.
1983          */
1984 
1985         if (file->f_op != &shmem_file_operations)
1986                 return -EINVAL;
1987         if (!(file->f_mode & FMODE_WRITE))
1988                 return -EPERM;
1989         if (seals & ~(unsigned int)F_ALL_SEALS)
1990                 return -EINVAL;
1991 
1992         mutex_lock(&inode->i_mutex);
1993 
1994         if (info->seals & F_SEAL_SEAL) {
1995                 error = -EPERM;
1996                 goto unlock;
1997         }
1998 
1999         if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
2000                 error = mapping_deny_writable(file->f_mapping);
2001                 if (error)
2002                         goto unlock;
2003 
2004                 error = shmem_wait_for_pins(file->f_mapping);
2005                 if (error) {
2006                         mapping_allow_writable(file->f_mapping);
2007                         goto unlock;
2008                 }
2009         }
2010 
2011         info->seals |= seals;
2012         error = 0;
2013 
2014 unlock:
2015         mutex_unlock(&inode->i_mutex);
2016         return error;
2017 }
2018 EXPORT_SYMBOL_GPL(shmem_add_seals);
2019 
2020 int shmem_get_seals(struct file *file)
2021 {
2022         if (file->f_op != &shmem_file_operations)
2023                 return -EINVAL;
2024 
2025         return SHMEM_I(file_inode(file))->seals;
2026 }
2027 EXPORT_SYMBOL_GPL(shmem_get_seals);
2028 
2029 long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
2030 {
2031         long error;
2032 
2033         switch (cmd) {
2034         case F_ADD_SEALS:
2035                 /* disallow upper 32bit */
2036                 if (arg > UINT_MAX)
2037                         return -EINVAL;
2038 
2039                 error = shmem_add_seals(file, arg);
2040                 break;
2041         case F_GET_SEALS:
2042                 error = shmem_get_seals(file);
2043                 break;
2044         default:
2045                 error = -EINVAL;
2046                 break;
2047         }
2048 
2049         return error;
2050 }
2051 
2052 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2053                                                          loff_t len)
2054 {
2055         struct inode *inode = file_inode(file);
2056         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2057         struct shmem_inode_info *info = SHMEM_I(inode);
2058         struct shmem_falloc shmem_falloc;
2059         pgoff_t start, index, end;
2060         int error;
2061 
2062         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2063                 return -EOPNOTSUPP;
2064 
2065         mutex_lock(&inode->i_mutex);
2066 
2067         if (mode & FALLOC_FL_PUNCH_HOLE) {
2068                 struct address_space *mapping = file->f_mapping;
2069                 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2070                 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2071                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2072 
2073                 /* protected by i_mutex */
2074                 if (info->seals & F_SEAL_WRITE) {
2075                         error = -EPERM;
2076                         goto out;
2077                 }
2078 
2079                 shmem_falloc.waitq = &shmem_falloc_waitq;
2080                 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2081                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2082                 spin_lock(&inode->i_lock);
2083                 inode->i_private = &shmem_falloc;
2084                 spin_unlock(&inode->i_lock);
2085 
2086                 if ((u64)unmap_end > (u64)unmap_start)
2087                         unmap_mapping_range(mapping, unmap_start,
2088                                             1 + unmap_end - unmap_start, 0);
2089                 shmem_truncate_range(inode, offset, offset + len - 1);
2090                 /* No need to unmap again: hole-punching leaves COWed pages */
2091 
2092                 spin_lock(&inode->i_lock);
2093                 inode->i_private = NULL;
2094                 wake_up_all(&shmem_falloc_waitq);
2095                 spin_unlock(&inode->i_lock);
2096                 error = 0;
2097                 goto out;
2098         }
2099 
2100         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2101         error = inode_newsize_ok(inode, offset + len);
2102         if (error)
2103                 goto out;
2104 
2105         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2106                 error = -EPERM;
2107                 goto out;
2108         }
2109 
2110         start = offset >> PAGE_CACHE_SHIFT;
2111         end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2112         /* Try to avoid a swapstorm if len is impossible to satisfy */
2113         if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2114                 error = -ENOSPC;
2115                 goto out;
2116         }
2117 
2118         shmem_falloc.waitq = NULL;
2119         shmem_falloc.start = start;
2120         shmem_falloc.next  = start;
2121         shmem_falloc.nr_falloced = 0;
2122         shmem_falloc.nr_unswapped = 0;
2123         spin_lock(&inode->i_lock);
2124         inode->i_private = &shmem_falloc;
2125         spin_unlock(&inode->i_lock);
2126 
2127         for (index = start; index < end; index++) {
2128                 struct page *page;
2129 
2130                 /*
2131                  * Good, the fallocate(2) manpage permits EINTR: we may have
2132                  * been interrupted because we are using up too much memory.
2133                  */
2134                 if (signal_pending(current))
2135                         error = -EINTR;
2136                 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2137                         error = -ENOMEM;
2138                 else
2139                         error = shmem_getpage(inode, index, &page, SGP_FALLOC,
2140                                                                         NULL);
2141                 if (error) {
2142                         /* Remove the !PageUptodate pages we added */
2143                         shmem_undo_range(inode,
2144                                 (loff_t)start << PAGE_CACHE_SHIFT,
2145                                 (loff_t)index << PAGE_CACHE_SHIFT, true);
2146                         goto undone;
2147                 }
2148 
2149                 /*
2150                  * Inform shmem_writepage() how far we have reached.
2151                  * No need for lock or barrier: we have the page lock.
2152                  */
2153                 shmem_falloc.next++;
2154                 if (!PageUptodate(page))
2155                         shmem_falloc.nr_falloced++;
2156 
2157                 /*
2158                  * If !PageUptodate, leave it that way so that freeable pages
2159                  * can be recognized if we need to rollback on error later.
2160                  * But set_page_dirty so that memory pressure will swap rather
2161                  * than free the pages we are allocating (and SGP_CACHE pages
2162                  * might still be clean: we now need to mark those dirty too).
2163                  */
2164                 set_page_dirty(page);
2165                 unlock_page(page);
2166                 page_cache_release(page);
2167                 cond_resched();
2168         }
2169 
2170         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2171                 i_size_write(inode, offset + len);
2172         inode->i_ctime = CURRENT_TIME;
2173 undone:
2174         spin_lock(&inode->i_lock);
2175         inode->i_private = NULL;
2176         spin_unlock(&inode->i_lock);
2177 out:
2178         mutex_unlock(&inode->i_mutex);
2179         return error;
2180 }
2181 
2182 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2183 {
2184         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2185 
2186         buf->f_type = TMPFS_MAGIC;
2187         buf->f_bsize = PAGE_CACHE_SIZE;
2188         buf->f_namelen = NAME_MAX;
2189         if (sbinfo->max_blocks) {
2190                 buf->f_blocks = sbinfo->max_blocks;
2191                 buf->f_bavail =
2192                 buf->f_bfree  = sbinfo->max_blocks -
2193                                 percpu_counter_sum(&sbinfo->used_blocks);
2194         }
2195         if (sbinfo->max_inodes) {
2196                 buf->f_files = sbinfo->max_inodes;
2197                 buf->f_ffree = sbinfo->free_inodes;
2198         }
2199         /* else leave those fields 0 like simple_statfs */
2200         return 0;
2201 }
2202 
2203 /*
2204  * File creation. Allocate an inode, and we're done..
2205  */
2206 static int
2207 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2208 {
2209         struct inode *inode;
2210         int error = -ENOSPC;
2211 
2212         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2213         if (inode) {
2214                 error = simple_acl_create(dir, inode);
2215                 if (error)
2216                         goto out_iput;
2217                 error = security_inode_init_security(inode, dir,
2218                                                      &dentry->d_name,
2219                                                      shmem_initxattrs, NULL);
2220                 if (error && error != -EOPNOTSUPP)
2221                         goto out_iput;
2222 
2223                 error = 0;
2224                 dir->i_size += BOGO_DIRENT_SIZE;
2225                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2226                 d_instantiate(dentry, inode);
2227                 dget(dentry); /* Extra count - pin the dentry in core */
2228         }
2229         return error;
2230 out_iput:
2231         iput(inode);
2232         return error;
2233 }
2234 
2235 static int
2236 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2237 {
2238         struct inode *inode;
2239         int error = -ENOSPC;
2240 
2241         inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2242         if (inode) {
2243                 error = security_inode_init_security(inode, dir,
2244                                                      NULL,
2245                                                      shmem_initxattrs, NULL);
2246                 if (error && error != -EOPNOTSUPP)
2247                         goto out_iput;
2248                 error = simple_acl_create(dir, inode);
2249                 if (error)
2250                         goto out_iput;
2251                 d_tmpfile(dentry, inode);
2252         }
2253         return error;
2254 out_iput:
2255         iput(inode);
2256         return error;
2257 }
2258 
2259 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2260 {
2261         int error;
2262 
2263         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2264                 return error;
2265         inc_nlink(dir);
2266         return 0;
2267 }
2268 
2269 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2270                 bool excl)
2271 {
2272         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2273 }
2274 
2275 /*
2276  * Link a file..
2277  */
2278 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2279 {
2280         struct inode *inode = old_dentry->d_inode;
2281         int ret;
2282 
2283         /*
2284          * No ordinary (disk based) filesystem counts links as inodes;
2285          * but each new link needs a new dentry, pinning lowmem, and
2286          * tmpfs dentries cannot be pruned until they are unlinked.
2287          */
2288         ret = shmem_reserve_inode(inode->i_sb);
2289         if (ret)
2290                 goto out;
2291 
2292         dir->i_size += BOGO_DIRENT_SIZE;
2293         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2294         inc_nlink(inode);
2295         ihold(inode);   /* New dentry reference */
2296         dget(dentry);           /* Extra pinning count for the created dentry */
2297         d_instantiate(dentry, inode);
2298 out:
2299         return ret;
2300 }
2301 
2302 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2303 {
2304         struct inode *inode = dentry->d_inode;
2305 
2306         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2307                 shmem_free_inode(inode->i_sb);
2308 
2309         dir->i_size -= BOGO_DIRENT_SIZE;
2310         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2311         drop_nlink(inode);
2312         dput(dentry);   /* Undo the count from "create" - this does all the work */
2313         return 0;
2314 }
2315 
2316 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2317 {
2318         if (!simple_empty(dentry))
2319                 return -ENOTEMPTY;
2320 
2321         drop_nlink(dentry->d_inode);
2322         drop_nlink(dir);
2323         return shmem_unlink(dir, dentry);
2324 }
2325 
2326 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2327 {
2328         bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
2329         bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
2330 
2331         if (old_dir != new_dir && old_is_dir != new_is_dir) {
2332                 if (old_is_dir) {
2333                         drop_nlink(old_dir);
2334                         inc_nlink(new_dir);
2335                 } else {
2336                         drop_nlink(new_dir);
2337                         inc_nlink(old_dir);
2338                 }
2339         }
2340         old_dir->i_ctime = old_dir->i_mtime =
2341         new_dir->i_ctime = new_dir->i_mtime =
2342         old_dentry->d_inode->i_ctime =
2343         new_dentry->d_inode->i_ctime = CURRENT_TIME;
2344 
2345         return 0;
2346 }
2347 
2348 /*
2349  * The VFS layer already does all the dentry stuff for rename,
2350  * we just have to decrement the usage count for the target if
2351  * it exists so that the VFS layer correctly free's it when it
2352  * gets overwritten.
2353  */
2354 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
2355 {
2356         struct inode *inode = old_dentry->d_inode;
2357         int they_are_dirs = S_ISDIR(inode->i_mode);
2358 
2359         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
2360                 return -EINVAL;
2361 
2362         if (flags & RENAME_EXCHANGE)
2363                 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
2364 
2365         if (!simple_empty(new_dentry))
2366                 return -ENOTEMPTY;
2367 
2368         if (new_dentry->d_inode) {
2369                 (void) shmem_unlink(new_dir, new_dentry);
2370                 if (they_are_dirs) {
2371                         drop_nlink(new_dentry->d_inode);
2372                         drop_nlink(old_dir);
2373                 }
2374         } else if (they_are_dirs) {
2375                 drop_nlink(old_dir);
2376                 inc_nlink(new_dir);
2377         }
2378 
2379         old_dir->i_size -= BOGO_DIRENT_SIZE;
2380         new_dir->i_size += BOGO_DIRENT_SIZE;
2381         old_dir->i_ctime = old_dir->i_mtime =
2382         new_dir->i_ctime = new_dir->i_mtime =
2383         inode->i_ctime = CURRENT_TIME;
2384         return 0;
2385 }
2386 
2387 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2388 {
2389         int error;
2390         int len;
2391         struct inode *inode;
2392         struct page *page;
2393         char *kaddr;
2394         struct shmem_inode_info *info;
2395 
2396         len = strlen(symname) + 1;
2397         if (len > PAGE_CACHE_SIZE)
2398                 return -ENAMETOOLONG;
2399 
2400         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
2401         if (!inode)
2402                 return -ENOSPC;
2403 
2404         error = security_inode_init_security(inode, dir, &dentry->d_name,
2405                                              shmem_initxattrs, NULL);
2406         if (error) {
2407                 if (error != -EOPNOTSUPP) {
2408                         iput(inode);
2409                         return error;
2410                 }
2411                 error = 0;
2412         }
2413 
2414         info = SHMEM_I(inode);
2415         inode->i_size = len-1;
2416         if (len <= SHORT_SYMLINK_LEN) {
2417                 info->symlink = kmemdup(symname, len, GFP_KERNEL);
2418                 if (!info->symlink) {
2419                         iput(inode);
2420                         return -ENOMEM;
2421                 }
2422                 inode->i_op = &shmem_short_symlink_operations;
2423         } else {
2424                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2425                 if (error) {
2426                         iput(inode);
2427                         return error;
2428                 }
2429                 inode->i_mapping->a_ops = &shmem_aops;
2430                 inode->i_op = &shmem_symlink_inode_operations;
2431                 kaddr = kmap_atomic(page);
2432                 memcpy(kaddr, symname, len);
2433                 kunmap_atomic(kaddr);
2434                 SetPageUptodate(page);
2435                 set_page_dirty(page);
2436                 unlock_page(page);
2437                 page_cache_release(page);
2438         }
2439         dir->i_size += BOGO_DIRENT_SIZE;
2440         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2441         d_instantiate(dentry, inode);
2442         dget(dentry);
2443         return 0;
2444 }
2445 
2446 static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
2447 {
2448         nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
2449         return NULL;
2450 }
2451 
2452 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
2453 {
2454         struct page *page = NULL;
2455         int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2456         nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
2457         if (page)
2458                 unlock_page(page);
2459         return page;
2460 }
2461 
2462 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
2463 {
2464         if (!IS_ERR(nd_get_link(nd))) {
2465                 struct page *page = cookie;
2466                 kunmap(page);
2467                 mark_page_accessed(page);
2468                 page_cache_release(page);
2469         }
2470 }
2471 
2472 #ifdef CONFIG_TMPFS_XATTR
2473 /*
2474  * Superblocks without xattr inode operations may get some security.* xattr
2475  * support from the LSM "for free". As soon as we have any other xattrs
2476  * like ACLs, we also need to implement the security.* handlers at
2477  * filesystem level, though.
2478  */
2479 
2480 /*
2481  * Callback for security_inode_init_security() for acquiring xattrs.
2482  */
2483 static int shmem_initxattrs(struct inode *inode,
2484                             const struct xattr *xattr_array,
2485                             void *fs_info)
2486 {
2487         struct shmem_inode_info *info = SHMEM_I(inode);
2488         const struct xattr *xattr;
2489         struct simple_xattr *new_xattr;
2490         size_t len;
2491 
2492         for (xattr = xattr_array; xattr->name != NULL; xattr++) {
2493                 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
2494                 if (!new_xattr)
2495                         return -ENOMEM;
2496 
2497                 len = strlen(xattr->name) + 1;
2498                 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
2499                                           GFP_KERNEL);
2500                 if (!new_xattr->name) {
2501                         kfree(new_xattr);
2502                         return -ENOMEM;
2503                 }
2504 
2505                 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
2506                        XATTR_SECURITY_PREFIX_LEN);
2507                 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
2508                        xattr->name, len);
2509 
2510                 simple_xattr_list_add(&info->xattrs, new_xattr);
2511         }
2512 
2513         return 0;
2514 }
2515 
2516 static const struct xattr_handler *shmem_xattr_handlers[] = {
2517 #ifdef CONFIG_TMPFS_POSIX_ACL
2518         &posix_acl_access_xattr_handler,
2519         &posix_acl_default_xattr_handler,
2520 #endif
2521         NULL
2522 };
2523 
2524 static int shmem_xattr_validate(const char *name)
2525 {
2526         struct { const char *prefix; size_t len; } arr[] = {
2527                 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2528                 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2529         };
2530         int i;
2531 
2532         for (i = 0; i < ARRAY_SIZE(arr); i++) {
2533                 size_t preflen = arr[i].len;
2534                 if (strncmp(name, arr[i].prefix, preflen) == 0) {
2535                         if (!name[preflen])
2536                                 return -EINVAL;
2537                         return 0;
2538                 }
2539         }
2540         return -EOPNOTSUPP;
2541 }
2542 
2543 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2544                               void *buffer, size_t size)
2545 {
2546         struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2547         int err;
2548 
2549         /*
2550          * If this is a request for a synthetic attribute in the system.*
2551          * namespace use the generic infrastructure to resolve a handler
2552          * for it via sb->s_xattr.
2553          */
2554         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2555                 return generic_getxattr(dentry, name, buffer, size);
2556 
2557         err = shmem_xattr_validate(name);
2558         if (err)
2559                 return err;
2560 
2561         return simple_xattr_get(&info->xattrs, name, buffer, size);
2562 }
2563 
2564 static int shmem_setxattr(struct dentry *dentry, const char *name,
2565                           const void *value, size_t size, int flags)
2566 {
2567         struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2568         int err;
2569 
2570         /*
2571          * If this is a request for a synthetic attribute in the system.*
2572          * namespace use the generic infrastructure to resolve a handler
2573          * for it via sb->s_xattr.
2574          */
2575         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2576                 return generic_setxattr(dentry, name, value, size, flags);
2577 
2578         err = shmem_xattr_validate(name);
2579         if (err)
2580                 return err;
2581 
2582         return simple_xattr_set(&info->xattrs, name, value, size, flags);
2583 }
2584 
2585 static int shmem_removexattr(struct dentry *dentry, const char *name)
2586 {
2587         struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2588         int err;
2589 
2590         /*
2591          * If this is a request for a synthetic attribute in the system.*
2592          * namespace use the generic infrastructure to resolve a handler
2593          * for it via sb->s_xattr.
2594          */
2595         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2596                 return generic_removexattr(dentry, name);
2597 
2598         err = shmem_xattr_validate(name);
2599         if (err)
2600                 return err;
2601 
2602         return simple_xattr_remove(&info->xattrs, name);
2603 }
2604 
2605 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2606 {
2607         struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
2608         return simple_xattr_list(&info->xattrs, buffer, size);
2609 }
2610 #endif /* CONFIG_TMPFS_XATTR */
2611 
2612 static const struct inode_operations shmem_short_symlink_operations = {
2613         .readlink       = generic_readlink,
2614         .follow_link    = shmem_follow_short_symlink,
2615 #ifdef CONFIG_TMPFS_XATTR
2616         .setxattr       = shmem_setxattr,
2617         .getxattr       = shmem_getxattr,
2618         .listxattr      = shmem_listxattr,
2619         .removexattr    = shmem_removexattr,
2620 #endif
2621 };
2622 
2623 static const struct inode_operations shmem_symlink_inode_operations = {
2624         .readlink       = generic_readlink,
2625         .follow_link    = shmem_follow_link,
2626         .put_link       = shmem_put_link,
2627 #ifdef CONFIG_TMPFS_XATTR
2628         .setxattr       = shmem_setxattr,
2629         .getxattr       = shmem_getxattr,
2630         .listxattr      = shmem_listxattr,
2631         .removexattr    = shmem_removexattr,
2632 #endif
2633 };
2634 
2635 static struct dentry *shmem_get_parent(struct dentry *child)
2636 {
2637         return ERR_PTR(-ESTALE);
2638 }
2639 
2640 static int shmem_match(struct inode *ino, void *vfh)
2641 {
2642         __u32 *fh = vfh;
2643         __u64 inum = fh[2];
2644         inum = (inum << 32) | fh[1];
2645         return ino->i_ino == inum && fh[0] == ino->i_generation;
2646 }
2647 
2648 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2649                 struct fid *fid, int fh_len, int fh_type)
2650 {
2651         struct inode *inode;
2652         struct dentry *dentry = NULL;
2653         u64 inum;
2654 
2655         if (fh_len < 3)
2656                 return NULL;
2657 
2658         inum = fid->raw[2];
2659         inum = (inum << 32) | fid->raw[1];
2660 
2661         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2662                         shmem_match, fid->raw);
2663         if (inode) {
2664                 dentry = d_find_alias(inode);
2665                 iput(inode);
2666         }
2667 
2668         return dentry;
2669 }
2670 
2671 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
2672                                 struct inode *parent)
2673 {
2674         if (*len < 3) {
2675                 *len = 3;
2676                 return FILEID_INVALID;
2677         }
2678 
2679         if (inode_unhashed(inode)) {
2680                 /* Unfortunately insert_inode_hash is not idempotent,
2681                  * so as we hash inodes here rather than at creation
2682                  * time, we need a lock to ensure we only try
2683                  * to do it once
2684                  */
2685                 static DEFINE_SPINLOCK(lock);
2686                 spin_lock(&lock);
2687                 if (inode_unhashed(inode))
2688                         __insert_inode_hash(inode,
2689                                             inode->i_ino + inode->i_generation);
2690                 spin_unlock(&lock);
2691         }
2692 
2693         fh[0] = inode->i_generation;
2694         fh[1] = inode->i_ino;
2695         fh[2] = ((__u64)inode->i_ino) >> 32;
2696 
2697         *len = 3;
2698         return 1;
2699 }
2700 
2701 static const struct export_operations shmem_export_ops = {
2702         .get_parent     = shmem_get_parent,
2703         .encode_fh      = shmem_encode_fh,
2704         .fh_to_dentry   = shmem_fh_to_dentry,
2705 };
2706 
2707 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2708                                bool remount)
2709 {
2710         char *this_char, *value, *rest;
2711         struct mempolicy *mpol = NULL;
2712         uid_t uid;
2713         gid_t gid;
2714 
2715         while (options != NULL) {
2716                 this_char = options;
2717                 for (;;) {
2718                         /*
2719                          * NUL-terminate this option: unfortunately,
2720                          * mount options form a comma-separated list,
2721                          * but mpol's nodelist may also contain commas.
2722                          */
2723                         options = strchr(options, ',');
2724                         if (options == NULL)
2725                                 break;
2726                         options++;
2727                         if (!isdigit(*options)) {
2728                                 options[-1] = '\0';
2729                                 break;
2730                         }
2731                 }
2732                 if (!*this_char)
2733                         continue;
2734                 if ((value = strchr(this_char,'=')) != NULL) {
2735                         *value++ = 0;
2736                 } else {
2737                         printk(KERN_ERR
2738                             "tmpfs: No value for mount option '%s'\n",
2739                             this_char);
2740                         goto error;
2741                 }
2742 
2743                 if (!strcmp(this_char,"size")) {
2744                         unsigned long long size;
2745                         size = memparse(value,&rest);
2746                         if (*rest == '%') {
2747                                 size <<= PAGE_SHIFT;
2748                                 size *= totalram_pages;
2749                                 do_div(size, 100);
2750                                 rest++;
2751                         }
2752                         if (*rest)
2753                                 goto bad_val;
2754                         sbinfo->max_blocks =
2755                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2756                 } else if (!strcmp(this_char,"nr_blocks")) {
2757                         sbinfo->max_blocks = memparse(value, &rest);
2758                         if (*rest)
2759                                 goto bad_val;
2760                 } else if (!strcmp(this_char,"nr_inodes")) {
2761                         sbinfo->max_inodes = memparse(value, &rest);
2762                         if (*rest)
2763                                 goto bad_val;
2764                 } else if (!strcmp(this_char,"mode")) {
2765                         if (remount)
2766                                 continue;
2767                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2768                         if (*rest)
2769                                 goto bad_val;
2770                 } else if (!strcmp(this_char,"uid")) {
2771                         if (remount)
2772                                 continue;
2773                         uid = simple_strtoul(value, &rest, 0);
2774                         if (*rest)
2775                                 goto bad_val;
2776                         sbinfo->uid = make_kuid(current_user_ns(), uid);
2777                         if (!uid_valid(sbinfo->uid))
2778                                 goto bad_val;
2779                 } else if (!strcmp(this_char,"gid")) {
2780                         if (remount)
2781                                 continue;
2782                         gid = simple_strtoul(value, &rest, 0);
2783                         if (*rest)
2784                                 goto bad_val;
2785                         sbinfo->gid = make_kgid(current_user_ns(), gid);
2786                         if (!gid_valid(sbinfo->gid))
2787                                 goto bad_val;
2788                 } else if (!strcmp(this_char,"mpol")) {
2789                         mpol_put(mpol);
2790                         mpol = NULL;
2791                         if (mpol_parse_str(value, &mpol))
2792                                 goto bad_val;
2793                 } else {
2794                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2795                                this_char);
2796                         goto error;
2797                 }
2798         }
2799         sbinfo->mpol = mpol;
2800         return 0;
2801 
2802 bad_val:
2803         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2804                value, this_char);
2805 error:
2806         mpol_put(mpol);
2807         return 1;
2808 
2809 }
2810 
2811 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2812 {
2813         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2814         struct shmem_sb_info config = *sbinfo;
2815         unsigned long inodes;
2816         int error = -EINVAL;
2817 
2818         config.mpol = NULL;
2819         if (shmem_parse_options(data, &config, true))
2820                 return error;
2821 
2822         spin_lock(&sbinfo->stat_lock);
2823         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2824         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2825                 goto out;
2826         if (config.max_inodes < inodes)
2827                 goto out;
2828         /*
2829          * Those tests disallow limited->unlimited while any are in use;
2830          * but we must separately disallow unlimited->limited, because
2831          * in that case we have no record of how much is already in use.
2832          */
2833         if (config.max_blocks && !sbinfo->max_blocks)
2834                 goto out;
2835         if (config.max_inodes && !sbinfo->max_inodes)
2836                 goto out;
2837 
2838         error = 0;
2839         sbinfo->max_blocks  = config.max_blocks;
2840         sbinfo->max_inodes  = config.max_inodes;
2841         sbinfo->free_inodes = config.max_inodes - inodes;
2842 
2843         /*
2844          * Preserve previous mempolicy unless mpol remount option was specified.
2845          */
2846         if (config.mpol) {
2847                 mpol_put(sbinfo->mpol);
2848                 sbinfo->mpol = config.mpol;     /* transfers initial ref */
2849         }
2850 out:
2851         spin_unlock(&sbinfo->stat_lock);
2852         return error;
2853 }
2854 
2855 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2856 {
2857         struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
2858 
2859         if (sbinfo->max_blocks != shmem_default_max_blocks())
2860                 seq_printf(seq, ",size=%luk",
2861                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2862         if (sbinfo->max_inodes != shmem_default_max_inodes())
2863                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2864         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2865                 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
2866         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
2867                 seq_printf(seq, ",uid=%u",
2868                                 from_kuid_munged(&init_user_ns, sbinfo->uid));
2869         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
2870                 seq_printf(seq, ",gid=%u",
2871                                 from_kgid_munged(&init_user_ns, sbinfo->gid));
2872         shmem_show_mpol(seq, sbinfo->mpol);
2873         return 0;
2874 }
2875 
2876 #define MFD_NAME_PREFIX "memfd:"
2877 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
2878 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
2879 
2880 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
2881 
2882 SYSCALL_DEFINE2(memfd_create,
2883                 const char __user *, uname,
2884                 unsigned int, flags)
2885 {
2886         struct shmem_inode_info *info;
2887         struct file *file;
2888         int fd, error;
2889         char *name;
2890         long len;
2891 
2892         if (flags & ~(unsigned int)MFD_ALL_FLAGS)
2893                 return -EINVAL;
2894 
2895         /* length includes terminating zero */
2896         len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
2897         if (len <= 0)
2898                 return -EFAULT;
2899         if (len > MFD_NAME_MAX_LEN + 1)
2900                 return -EINVAL;
2901 
2902         name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
2903         if (!name)
2904                 return -ENOMEM;
2905 
2906         strcpy(name, MFD_NAME_PREFIX);
2907         if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
2908                 error = -EFAULT;
2909                 goto err_name;
2910         }
2911 
2912         /* terminating-zero may have changed after strnlen_user() returned */
2913         if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
2914                 error = -EFAULT;
2915                 goto err_name;
2916         }
2917 
2918         fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
2919         if (fd < 0) {
2920                 error = fd;
2921                 goto err_name;
2922         }
2923 
2924         file = shmem_file_setup(name, 0, VM_NORESERVE);
2925         if (IS_ERR(file)) {
2926                 error = PTR_ERR(file);
2927                 goto err_fd;
2928         }
2929         info = SHMEM_I(file_inode(file));
2930         file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
2931         file->f_flags |= O_RDWR | O_LARGEFILE;
2932         if (flags & MFD_ALLOW_SEALING)
2933                 info->seals &= ~F_SEAL_SEAL;
2934 
2935         fd_install(fd, file);
2936         kfree(name);
2937         return fd;
2938 
2939 err_fd:
2940         put_unused_fd(fd);
2941 err_name:
2942         kfree(name);
2943         return error;
2944 }
2945 
2946 #endif /* CONFIG_TMPFS */
2947 
2948 static void shmem_put_super(struct super_block *sb)
2949 {
2950         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2951 
2952         percpu_counter_destroy(&sbinfo->used_blocks);
2953         mpol_put(sbinfo->mpol);
2954         kfree(sbinfo);
2955         sb->s_fs_info = NULL;
2956 }
2957 
2958 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2959 {
2960         struct inode *inode;
2961         struct shmem_sb_info *sbinfo;
2962         int err = -ENOMEM;
2963 
2964         /* Round up to L1_CACHE_BYTES to resist false sharing */
2965         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2966                                 L1_CACHE_BYTES), GFP_KERNEL);
2967         if (!sbinfo)
2968                 return -ENOMEM;
2969 
2970         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2971         sbinfo->uid = current_fsuid();
2972         sbinfo->gid = current_fsgid();
2973         sb->s_fs_info = sbinfo;
2974 
2975 #ifdef CONFIG_TMPFS
2976         /*
2977          * Per default we only allow half of the physical ram per
2978          * tmpfs instance, limiting inodes to one per page of lowmem;
2979          * but the internal instance is left unlimited.
2980          */
2981         if (!(sb->s_flags & MS_KERNMOUNT)) {
2982                 sbinfo->max_blocks = shmem_default_max_blocks();
2983                 sbinfo->max_inodes = shmem_default_max_inodes();
2984                 if (shmem_parse_options(data, sbinfo, false)) {
2985                         err = -EINVAL;
2986                         goto failed;
2987                 }
2988         } else {
2989                 sb->s_flags |= MS_NOUSER;
2990         }
2991         sb->s_export_op = &shmem_export_ops;
2992         sb->s_flags |= MS_NOSEC;
2993 #else
2994         sb->s_flags |= MS_NOUSER;
2995 #endif
2996 
2997         spin_lock_init(&sbinfo->stat_lock);
2998         if (percpu_counter_init(&sbinfo->used_blocks, 0))
2999                 goto failed;
3000         sbinfo->free_inodes = sbinfo->max_inodes;
3001 
3002         sb->s_maxbytes = MAX_LFS_FILESIZE;
3003         sb->s_blocksize = PAGE_CACHE_SIZE;
3004         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
3005         sb->s_magic = TMPFS_MAGIC;
3006         sb->s_op = &shmem_ops;
3007         sb->s_time_gran = 1;
3008 #ifdef CONFIG_TMPFS_XATTR
3009         sb->s_xattr = shmem_xattr_handlers;
3010 #endif
3011 #ifdef CONFIG_TMPFS_POSIX_ACL
3012         sb->s_flags |= MS_POSIXACL;
3013 #endif
3014 
3015         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3016         if (!inode)
3017                 goto failed;
3018         inode->i_uid = sbinfo->uid;
3019         inode->i_gid = sbinfo->gid;
3020         sb->s_root = d_make_root(inode);
3021         if (!sb->s_root)
3022                 goto failed;
3023         return 0;
3024 
3025 failed:
3026         shmem_put_super(sb);
3027         return err;
3028 }
3029 
3030 static struct kmem_cache *shmem_inode_cachep;
3031 
3032 static struct inode *shmem_alloc_inode(struct super_block *sb)
3033 {
3034         struct shmem_inode_info *info;
3035         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3036         if (!info)
3037                 return NULL;
3038         return &info->vfs_inode;
3039 }
3040 
3041 static void shmem_destroy_callback(struct rcu_head *head)
3042 {
3043         struct inode *inode = container_of(head, struct inode, i_rcu);
3044         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3045 }
3046 
3047 static void shmem_destroy_inode(struct inode *inode)
3048 {
3049         if (S_ISREG(inode->i_mode))
3050                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3051         call_rcu(&inode->i_rcu, shmem_destroy_callback);
3052 }
3053 
3054 static void shmem_init_inode(void *foo)
3055 {
3056         struct shmem_inode_info *info = foo;
3057         inode_init_once(&info->vfs_inode);
3058 }
3059 
3060 static int shmem_init_inodecache(void)
3061 {
3062         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3063                                 sizeof(struct shmem_inode_info),
3064                                 0, SLAB_PANIC, shmem_init_inode);
3065         return 0;
3066 }
3067 
3068 static void shmem_destroy_inodecache(void)
3069 {
3070         kmem_cache_destroy(shmem_inode_cachep);
3071 }
3072 
3073 static const struct address_space_operations shmem_aops = {
3074         .writepage      = shmem_writepage,
3075         .set_page_dirty = __set_page_dirty_no_writeback,
3076 #ifdef CONFIG_TMPFS
3077         .write_begin    = shmem_write_begin,
3078         .write_end      = shmem_write_end,
3079 #endif
3080         .migratepage    = migrate_page,
3081         .error_remove_page = generic_error_remove_page,
3082 };
3083 
3084 static const struct file_operations shmem_file_operations = {
3085         .mmap           = shmem_mmap,
3086 #ifdef CONFIG_TMPFS
3087         .llseek         = shmem_file_llseek,
3088         .read           = new_sync_read,
3089         .write          = new_sync_write,
3090         .read_iter      = shmem_file_read_iter,
3091         .write_iter     = generic_file_write_iter,
3092         .fsync          = noop_fsync,
3093         .splice_read    = shmem_file_splice_read,
3094         .splice_write   = iter_file_splice_write,
3095         .fallocate      = shmem_fallocate,
3096 #endif
3097 };
3098 
3099 static const struct inode_operations shmem_inode_operations = {
3100         .setattr        = shmem_setattr,
3101 #ifdef CONFIG_TMPFS_XATTR
3102         .setxattr       = shmem_setxattr,
3103         .getxattr       = shmem_getxattr,
3104         .listxattr      = shmem_listxattr,
3105         .removexattr    = shmem_removexattr,
3106         .set_acl        = simple_set_acl,
3107 #endif
3108 };
3109 
3110 static const struct inode_operations shmem_dir_inode_operations = {
3111 #ifdef CONFIG_TMPFS
3112         .create         = shmem_create,
3113         .lookup         = simple_lookup,
3114         .link           = shmem_link,
3115         .unlink         = shmem_unlink,
3116         .symlink        = shmem_symlink,
3117         .mkdir          = shmem_mkdir,
3118         .rmdir          = shmem_rmdir,
3119         .mknod          = shmem_mknod,
3120         .rename2        = shmem_rename2,
3121         .tmpfile        = shmem_tmpfile,
3122 #endif
3123 #ifdef CONFIG_TMPFS_XATTR
3124         .setxattr       = shmem_setxattr,
3125         .getxattr       = shmem_getxattr,
3126         .listxattr      = shmem_listxattr,
3127         .removexattr    = shmem_removexattr,
3128 #endif
3129 #ifdef CONFIG_TMPFS_POSIX_ACL
3130         .setattr        = shmem_setattr,
3131         .set_acl        = simple_set_acl,
3132 #endif
3133 };
3134 
3135 static const struct inode_operations shmem_special_inode_operations = {
3136 #ifdef CONFIG_TMPFS_XATTR
3137         .setxattr       = shmem_setxattr,
3138         .getxattr       = shmem_getxattr,
3139         .listxattr      = shmem_listxattr,
3140         .removexattr    = shmem_removexattr,
3141 #endif
3142 #ifdef CONFIG_TMPFS_POSIX_ACL
3143         .setattr        = shmem_setattr,
3144         .set_acl        = simple_set_acl,
3145 #endif
3146 };
3147 
3148 static const struct super_operations shmem_ops = {
3149         .alloc_inode    = shmem_alloc_inode,
3150         .destroy_inode  = shmem_destroy_inode,
3151 #ifdef CONFIG_TMPFS
3152         .statfs         = shmem_statfs,
3153         .remount_fs     = shmem_remount_fs,
3154         .show_options   = shmem_show_options,
3155 #endif
3156         .evict_inode    = shmem_evict_inode,
3157         .drop_inode     = generic_delete_inode,
3158         .put_super      = shmem_put_super,
3159 };
3160 
3161 static const struct vm_operations_struct shmem_vm_ops = {
3162         .fault          = shmem_fault,
3163         .map_pages      = filemap_map_pages,
3164 #ifdef CONFIG_NUMA
3165         .set_policy     = shmem_set_policy,
3166         .get_policy     = shmem_get_policy,
3167 #endif
3168         .remap_pages    = generic_file_remap_pages,
3169 };
3170 
3171 static struct dentry *shmem_mount(struct file_system_type *fs_type,
3172         int flags, const char *dev_name, void *data)
3173 {
3174         return mount_nodev(fs_type, flags, data, shmem_fill_super);
3175 }
3176 
3177 static struct file_system_type shmem_fs_type = {
3178         .owner          = THIS_MODULE,
3179         .name           = "tmpfs",
3180         .mount          = shmem_mount,
3181         .kill_sb        = kill_litter_super,
3182         .fs_flags       = FS_USERNS_MOUNT,
3183 };
3184 
3185 int __init shmem_init(void)
3186 {
3187         int error;
3188 
3189         /* If rootfs called this, don't re-init */
3190         if (shmem_inode_cachep)
3191                 return 0;
3192 
3193         error = bdi_init(&shmem_backing_dev_info);
3194         if (error)
3195                 goto out4;
3196 
3197         error = shmem_init_inodecache();
3198         if (error)
3199                 goto out3;
3200 
3201         error = register_filesystem(&shmem_fs_type);
3202         if (error) {
3203                 printk(KERN_ERR "Could not register tmpfs\n");
3204                 goto out2;
3205         }
3206 
3207         shm_mnt = kern_mount(&shmem_fs_type);
3208         if (IS_ERR(shm_mnt)) {
3209                 error = PTR_ERR(shm_mnt);
3210                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
3211                 goto out1;
3212         }
3213         return 0;
3214 
3215 out1:
3216         unregister_filesystem(&shmem_fs_type);
3217 out2:
3218         shmem_destroy_inodecache();
3219 out3:
3220         bdi_destroy(&shmem_backing_dev_info);
3221 out4:
3222         shm_mnt = ERR_PTR(error);
3223         return error;
3224 }
3225 
3226 #else /* !CONFIG_SHMEM */
3227 
3228 /*
3229  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3230  *
3231  * This is intended for small system where the benefits of the full
3232  * shmem code (swap-backed and resource-limited) are outweighed by
3233  * their complexity. On systems without swap this code should be
3234  * effectively equivalent, but much lighter weight.
3235  */
3236 
3237 static struct file_system_type shmem_fs_type = {
3238         .name           = "tmpfs",
3239         .mount          = ramfs_mount,
3240         .kill_sb        = kill_litter_super,
3241         .fs_flags       = FS_USERNS_MOUNT,
3242 };
3243 
3244 int __init shmem_init(void)
3245 {
3246         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3247 
3248         shm_mnt = kern_mount(&shmem_fs_type);
3249         BUG_ON(IS_ERR(shm_mnt));
3250 
3251         return 0;
3252 }
3253 
3254 int shmem_unuse(swp_entry_t swap, struct page *page)
3255 {
3256         return 0;
3257 }
3258 
3259 int shmem_lock(struct file *file, int lock, struct user_struct *user)
3260 {
3261         return 0;
3262 }
3263 
3264 void shmem_unlock_mapping(struct address_space *mapping)
3265 {
3266 }
3267 
3268 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
3269 {
3270         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
3271 }
3272 EXPORT_SYMBOL_GPL(shmem_truncate_range);
3273 
3274 #define shmem_vm_ops                            generic_file_vm_ops
3275 #define shmem_file_operations                   ramfs_file_operations
3276 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
3277 #define shmem_acct_size(flags, size)            0
3278 #define shmem_unacct_size(flags, size)          do {} while (0)
3279 
3280 #endif /* CONFIG_SHMEM */
3281 
3282 /* common code */
3283 
3284 static struct dentry_operations anon_ops = {
3285         .d_dname = simple_dname
3286 };
3287 
3288 static struct file *__shmem_file_setup(const char *name, loff_t size,
3289                                        unsigned long flags, unsigned int i_flags)
3290 {
3291         struct file *res;
3292         struct inode *inode;
3293         struct path path;
3294         struct super_block *sb;
3295         struct qstr this;
3296 
3297         if (IS_ERR(shm_mnt))
3298                 return ERR_CAST(shm_mnt);
3299 
3300         if (size < 0 || size > MAX_LFS_FILESIZE)
3301                 return ERR_PTR(-EINVAL);
3302 
3303         if (shmem_acct_size(flags, size))
3304                 return ERR_PTR(-ENOMEM);
3305 
3306         res = ERR_PTR(-ENOMEM);
3307         this.name = name;
3308         this.len = strlen(name);
3309         this.hash = 0; /* will go */
3310         sb = shm_mnt->mnt_sb;
3311         path.mnt = mntget(shm_mnt);
3312         path.dentry = d_alloc_pseudo(sb, &this);
3313         if (!path.dentry)
3314                 goto put_memory;
3315         d_set_d_op(path.dentry, &anon_ops);
3316 
3317         res = ERR_PTR(-ENOSPC);
3318         inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
3319         if (!inode)
3320                 goto put_memory;
3321 
3322         inode->i_flags |= i_flags;
3323         d_instantiate(path.dentry, inode);
3324         inode->i_size = size;
3325         clear_nlink(inode);     /* It is unlinked */
3326         res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
3327         if (IS_ERR(res))
3328                 goto put_path;
3329 
3330         res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
3331                   &shmem_file_operations);
3332         if (IS_ERR(res))
3333                 goto put_path;
3334 
3335         return res;
3336 
3337 put_memory:
3338         shmem_unacct_size(flags, size);
3339 put_path:
3340         path_put(&path);
3341         return res;
3342 }
3343 
3344 /**
3345  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3346  *      kernel internal.  There will be NO LSM permission checks against the
3347  *      underlying inode.  So users of this interface must do LSM checks at a
3348  *      higher layer.  The one user is the big_key implementation.  LSM checks
3349  *      are provided at the key level rather than the inode level.
3350  * @name: name for dentry (to be seen in /proc/<pid>/maps
3351  * @size: size to be set for the file
3352  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3353  */
3354 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
3355 {
3356         return __shmem_file_setup(name, size, flags, S_PRIVATE);
3357 }
3358 
3359 /**
3360  * shmem_file_setup - get an unlinked file living in tmpfs
3361  * @name: name for dentry (to be seen in /proc/<pid>/maps
3362  * @size: size to be set for the file
3363  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3364  */
3365 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
3366 {
3367         return __shmem_file_setup(name, size, flags, 0);
3368 }
3369 EXPORT_SYMBOL_GPL(shmem_file_setup);
3370 
3371 /**
3372  * shmem_zero_setup - setup a shared anonymous mapping
3373  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3374  */
3375 int shmem_zero_setup(struct vm_area_struct *vma)
3376 {
3377         struct file *file;
3378         loff_t size = vma->vm_end - vma->vm_start;
3379 
3380         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
3381         if (IS_ERR(file))
3382                 return PTR_ERR(file);
3383 
3384         if (vma->vm_file)
3385                 fput(vma->vm_file);
3386         vma->vm_file = file;
3387         vma->vm_ops = &shmem_vm_ops;
3388         return 0;
3389 }
3390 
3391 /**
3392  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3393  * @mapping:    the page's address_space
3394  * @index:      the page index
3395  * @gfp:        the page allocator flags to use if allocating
3396  *
3397  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3398  * with any new page allocations done using the specified allocation flags.
3399  * But read_cache_page_gfp() uses the ->readpage() method: which does not
3400  * suit tmpfs, since it may have pages in swapcache, and needs to find those
3401  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3402  *
3403  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
3404  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
3405  */
3406 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3407                                          pgoff_t index, gfp_t gfp)
3408 {
3409 #ifdef CONFIG_SHMEM
3410         struct inode *inode = mapping->host;
3411         struct page *page;
3412         int error;
3413 
3414         BUG_ON(mapping->a_ops != &shmem_aops);
3415         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
3416         if (error)
3417                 page = ERR_PTR(error);
3418         else
3419                 unlock_page(page);
3420         return page;
3421 #else
3422         /*
3423          * The tiny !SHMEM case uses ramfs without swap
3424          */
3425         return read_cache_page_gfp(mapping, index, gfp);
3426 #endif
3427 }
3428 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
3429 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us