Version:  2.0.40 2.2.26 2.4.37 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7

Linux/mm/shmem.c

  1 /*
  2  * Resizable virtual memory filesystem for Linux.
  3  *
  4  * Copyright (C) 2000 Linus Torvalds.
  5  *               2000 Transmeta Corp.
  6  *               2000-2001 Christoph Rohland
  7  *               2000-2001 SAP AG
  8  *               2002 Red Hat Inc.
  9  * Copyright (C) 2002-2011 Hugh Dickins.
 10  * Copyright (C) 2011 Google Inc.
 11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
 12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
 13  *
 14  * Extended attribute support for tmpfs:
 15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
 16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
 17  *
 18  * tiny-shmem:
 19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
 20  *
 21  * This file is released under the GPL.
 22  */
 23 
 24 #include <linux/fs.h>
 25 #include <linux/init.h>
 26 #include <linux/vfs.h>
 27 #include <linux/mount.h>
 28 #include <linux/ramfs.h>
 29 #include <linux/pagemap.h>
 30 #include <linux/file.h>
 31 #include <linux/mm.h>
 32 #include <linux/export.h>
 33 #include <linux/swap.h>
 34 #include <linux/uio.h>
 35 
 36 static struct vfsmount *shm_mnt;
 37 
 38 #ifdef CONFIG_SHMEM
 39 /*
 40  * This virtual memory filesystem is heavily based on the ramfs. It
 41  * extends ramfs by the ability to use swap and honor resource limits
 42  * which makes it a completely usable filesystem.
 43  */
 44 
 45 #include <linux/xattr.h>
 46 #include <linux/exportfs.h>
 47 #include <linux/posix_acl.h>
 48 #include <linux/posix_acl_xattr.h>
 49 #include <linux/mman.h>
 50 #include <linux/string.h>
 51 #include <linux/slab.h>
 52 #include <linux/backing-dev.h>
 53 #include <linux/shmem_fs.h>
 54 #include <linux/writeback.h>
 55 #include <linux/blkdev.h>
 56 #include <linux/pagevec.h>
 57 #include <linux/percpu_counter.h>
 58 #include <linux/falloc.h>
 59 #include <linux/splice.h>
 60 #include <linux/security.h>
 61 #include <linux/swapops.h>
 62 #include <linux/mempolicy.h>
 63 #include <linux/namei.h>
 64 #include <linux/ctype.h>
 65 #include <linux/migrate.h>
 66 #include <linux/highmem.h>
 67 #include <linux/seq_file.h>
 68 #include <linux/magic.h>
 69 #include <linux/syscalls.h>
 70 #include <linux/fcntl.h>
 71 #include <uapi/linux/memfd.h>
 72 
 73 #include <asm/uaccess.h>
 74 #include <asm/pgtable.h>
 75 
 76 #include "internal.h"
 77 
 78 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
 79 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
 80 
 81 /* Pretend that each entry is of this size in directory's i_size */
 82 #define BOGO_DIRENT_SIZE 20
 83 
 84 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
 85 #define SHORT_SYMLINK_LEN 128
 86 
 87 /*
 88  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
 89  * inode->i_private (with i_mutex making sure that it has only one user at
 90  * a time): we would prefer not to enlarge the shmem inode just for that.
 91  */
 92 struct shmem_falloc {
 93         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
 94         pgoff_t start;          /* start of range currently being fallocated */
 95         pgoff_t next;           /* the next page offset to be fallocated */
 96         pgoff_t nr_falloced;    /* how many new pages have been fallocated */
 97         pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
 98 };
 99 
100 /* Flag allocation requirements to shmem_getpage */
101 enum sgp_type {
102         SGP_READ,       /* don't exceed i_size, don't allocate page */
103         SGP_CACHE,      /* don't exceed i_size, may allocate page */
104         SGP_WRITE,      /* may exceed i_size, may allocate !Uptodate page */
105         SGP_FALLOC,     /* like SGP_WRITE, but make existing page Uptodate */
106 };
107 
108 #ifdef CONFIG_TMPFS
109 static unsigned long shmem_default_max_blocks(void)
110 {
111         return totalram_pages / 2;
112 }
113 
114 static unsigned long shmem_default_max_inodes(void)
115 {
116         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
117 }
118 #endif
119 
120 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
121 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
122                                 struct shmem_inode_info *info, pgoff_t index);
123 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
124                 struct page **pagep, enum sgp_type sgp,
125                 gfp_t gfp, struct mm_struct *fault_mm, int *fault_type);
126 
127 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
128                 struct page **pagep, enum sgp_type sgp)
129 {
130         return shmem_getpage_gfp(inode, index, pagep, sgp,
131                 mapping_gfp_mask(inode->i_mapping), NULL, NULL);
132 }
133 
134 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
135 {
136         return sb->s_fs_info;
137 }
138 
139 /*
140  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
141  * for shared memory and for shared anonymous (/dev/zero) mappings
142  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
143  * consistent with the pre-accounting of private mappings ...
144  */
145 static inline int shmem_acct_size(unsigned long flags, loff_t size)
146 {
147         return (flags & VM_NORESERVE) ?
148                 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
149 }
150 
151 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
152 {
153         if (!(flags & VM_NORESERVE))
154                 vm_unacct_memory(VM_ACCT(size));
155 }
156 
157 static inline int shmem_reacct_size(unsigned long flags,
158                 loff_t oldsize, loff_t newsize)
159 {
160         if (!(flags & VM_NORESERVE)) {
161                 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
162                         return security_vm_enough_memory_mm(current->mm,
163                                         VM_ACCT(newsize) - VM_ACCT(oldsize));
164                 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
165                         vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
166         }
167         return 0;
168 }
169 
170 /*
171  * ... whereas tmpfs objects are accounted incrementally as
172  * pages are allocated, in order to allow large sparse files.
173  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
174  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
175  */
176 static inline int shmem_acct_block(unsigned long flags)
177 {
178         return (flags & VM_NORESERVE) ?
179                 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0;
180 }
181 
182 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
183 {
184         if (flags & VM_NORESERVE)
185                 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
186 }
187 
188 static const struct super_operations shmem_ops;
189 static const struct address_space_operations shmem_aops;
190 static const struct file_operations shmem_file_operations;
191 static const struct inode_operations shmem_inode_operations;
192 static const struct inode_operations shmem_dir_inode_operations;
193 static const struct inode_operations shmem_special_inode_operations;
194 static const struct vm_operations_struct shmem_vm_ops;
195 
196 static LIST_HEAD(shmem_swaplist);
197 static DEFINE_MUTEX(shmem_swaplist_mutex);
198 
199 static int shmem_reserve_inode(struct super_block *sb)
200 {
201         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
202         if (sbinfo->max_inodes) {
203                 spin_lock(&sbinfo->stat_lock);
204                 if (!sbinfo->free_inodes) {
205                         spin_unlock(&sbinfo->stat_lock);
206                         return -ENOSPC;
207                 }
208                 sbinfo->free_inodes--;
209                 spin_unlock(&sbinfo->stat_lock);
210         }
211         return 0;
212 }
213 
214 static void shmem_free_inode(struct super_block *sb)
215 {
216         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
217         if (sbinfo->max_inodes) {
218                 spin_lock(&sbinfo->stat_lock);
219                 sbinfo->free_inodes++;
220                 spin_unlock(&sbinfo->stat_lock);
221         }
222 }
223 
224 /**
225  * shmem_recalc_inode - recalculate the block usage of an inode
226  * @inode: inode to recalc
227  *
228  * We have to calculate the free blocks since the mm can drop
229  * undirtied hole pages behind our back.
230  *
231  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
232  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
233  *
234  * It has to be called with the spinlock held.
235  */
236 static void shmem_recalc_inode(struct inode *inode)
237 {
238         struct shmem_inode_info *info = SHMEM_I(inode);
239         long freed;
240 
241         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
242         if (freed > 0) {
243                 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
244                 if (sbinfo->max_blocks)
245                         percpu_counter_add(&sbinfo->used_blocks, -freed);
246                 info->alloced -= freed;
247                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
248                 shmem_unacct_blocks(info->flags, freed);
249         }
250 }
251 
252 /*
253  * Replace item expected in radix tree by a new item, while holding tree lock.
254  */
255 static int shmem_radix_tree_replace(struct address_space *mapping,
256                         pgoff_t index, void *expected, void *replacement)
257 {
258         void **pslot;
259         void *item;
260 
261         VM_BUG_ON(!expected);
262         VM_BUG_ON(!replacement);
263         pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
264         if (!pslot)
265                 return -ENOENT;
266         item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
267         if (item != expected)
268                 return -ENOENT;
269         radix_tree_replace_slot(pslot, replacement);
270         return 0;
271 }
272 
273 /*
274  * Sometimes, before we decide whether to proceed or to fail, we must check
275  * that an entry was not already brought back from swap by a racing thread.
276  *
277  * Checking page is not enough: by the time a SwapCache page is locked, it
278  * might be reused, and again be SwapCache, using the same swap as before.
279  */
280 static bool shmem_confirm_swap(struct address_space *mapping,
281                                pgoff_t index, swp_entry_t swap)
282 {
283         void *item;
284 
285         rcu_read_lock();
286         item = radix_tree_lookup(&mapping->page_tree, index);
287         rcu_read_unlock();
288         return item == swp_to_radix_entry(swap);
289 }
290 
291 /*
292  * Like add_to_page_cache_locked, but error if expected item has gone.
293  */
294 static int shmem_add_to_page_cache(struct page *page,
295                                    struct address_space *mapping,
296                                    pgoff_t index, void *expected)
297 {
298         int error;
299 
300         VM_BUG_ON_PAGE(!PageLocked(page), page);
301         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
302 
303         get_page(page);
304         page->mapping = mapping;
305         page->index = index;
306 
307         spin_lock_irq(&mapping->tree_lock);
308         if (!expected)
309                 error = radix_tree_insert(&mapping->page_tree, index, page);
310         else
311                 error = shmem_radix_tree_replace(mapping, index, expected,
312                                                                  page);
313         if (!error) {
314                 mapping->nrpages++;
315                 __inc_zone_page_state(page, NR_FILE_PAGES);
316                 __inc_zone_page_state(page, NR_SHMEM);
317                 spin_unlock_irq(&mapping->tree_lock);
318         } else {
319                 page->mapping = NULL;
320                 spin_unlock_irq(&mapping->tree_lock);
321                 put_page(page);
322         }
323         return error;
324 }
325 
326 /*
327  * Like delete_from_page_cache, but substitutes swap for page.
328  */
329 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
330 {
331         struct address_space *mapping = page->mapping;
332         int error;
333 
334         spin_lock_irq(&mapping->tree_lock);
335         error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
336         page->mapping = NULL;
337         mapping->nrpages--;
338         __dec_zone_page_state(page, NR_FILE_PAGES);
339         __dec_zone_page_state(page, NR_SHMEM);
340         spin_unlock_irq(&mapping->tree_lock);
341         put_page(page);
342         BUG_ON(error);
343 }
344 
345 /*
346  * Remove swap entry from radix tree, free the swap and its page cache.
347  */
348 static int shmem_free_swap(struct address_space *mapping,
349                            pgoff_t index, void *radswap)
350 {
351         void *old;
352 
353         spin_lock_irq(&mapping->tree_lock);
354         old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
355         spin_unlock_irq(&mapping->tree_lock);
356         if (old != radswap)
357                 return -ENOENT;
358         free_swap_and_cache(radix_to_swp_entry(radswap));
359         return 0;
360 }
361 
362 /*
363  * Determine (in bytes) how many of the shmem object's pages mapped by the
364  * given offsets are swapped out.
365  *
366  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
367  * as long as the inode doesn't go away and racy results are not a problem.
368  */
369 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
370                                                 pgoff_t start, pgoff_t end)
371 {
372         struct radix_tree_iter iter;
373         void **slot;
374         struct page *page;
375         unsigned long swapped = 0;
376 
377         rcu_read_lock();
378 
379         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
380                 if (iter.index >= end)
381                         break;
382 
383                 page = radix_tree_deref_slot(slot);
384 
385                 if (radix_tree_deref_retry(page)) {
386                         slot = radix_tree_iter_retry(&iter);
387                         continue;
388                 }
389 
390                 if (radix_tree_exceptional_entry(page))
391                         swapped++;
392 
393                 if (need_resched()) {
394                         cond_resched_rcu();
395                         slot = radix_tree_iter_next(&iter);
396                 }
397         }
398 
399         rcu_read_unlock();
400 
401         return swapped << PAGE_SHIFT;
402 }
403 
404 /*
405  * Determine (in bytes) how many of the shmem object's pages mapped by the
406  * given vma is swapped out.
407  *
408  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
409  * as long as the inode doesn't go away and racy results are not a problem.
410  */
411 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
412 {
413         struct inode *inode = file_inode(vma->vm_file);
414         struct shmem_inode_info *info = SHMEM_I(inode);
415         struct address_space *mapping = inode->i_mapping;
416         unsigned long swapped;
417 
418         /* Be careful as we don't hold info->lock */
419         swapped = READ_ONCE(info->swapped);
420 
421         /*
422          * The easier cases are when the shmem object has nothing in swap, or
423          * the vma maps it whole. Then we can simply use the stats that we
424          * already track.
425          */
426         if (!swapped)
427                 return 0;
428 
429         if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
430                 return swapped << PAGE_SHIFT;
431 
432         /* Here comes the more involved part */
433         return shmem_partial_swap_usage(mapping,
434                         linear_page_index(vma, vma->vm_start),
435                         linear_page_index(vma, vma->vm_end));
436 }
437 
438 /*
439  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
440  */
441 void shmem_unlock_mapping(struct address_space *mapping)
442 {
443         struct pagevec pvec;
444         pgoff_t indices[PAGEVEC_SIZE];
445         pgoff_t index = 0;
446 
447         pagevec_init(&pvec, 0);
448         /*
449          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
450          */
451         while (!mapping_unevictable(mapping)) {
452                 /*
453                  * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
454                  * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
455                  */
456                 pvec.nr = find_get_entries(mapping, index,
457                                            PAGEVEC_SIZE, pvec.pages, indices);
458                 if (!pvec.nr)
459                         break;
460                 index = indices[pvec.nr - 1] + 1;
461                 pagevec_remove_exceptionals(&pvec);
462                 check_move_unevictable_pages(pvec.pages, pvec.nr);
463                 pagevec_release(&pvec);
464                 cond_resched();
465         }
466 }
467 
468 /*
469  * Remove range of pages and swap entries from radix tree, and free them.
470  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
471  */
472 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
473                                                                  bool unfalloc)
474 {
475         struct address_space *mapping = inode->i_mapping;
476         struct shmem_inode_info *info = SHMEM_I(inode);
477         pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
478         pgoff_t end = (lend + 1) >> PAGE_SHIFT;
479         unsigned int partial_start = lstart & (PAGE_SIZE - 1);
480         unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
481         struct pagevec pvec;
482         pgoff_t indices[PAGEVEC_SIZE];
483         long nr_swaps_freed = 0;
484         pgoff_t index;
485         int i;
486 
487         if (lend == -1)
488                 end = -1;       /* unsigned, so actually very big */
489 
490         pagevec_init(&pvec, 0);
491         index = start;
492         while (index < end) {
493                 pvec.nr = find_get_entries(mapping, index,
494                         min(end - index, (pgoff_t)PAGEVEC_SIZE),
495                         pvec.pages, indices);
496                 if (!pvec.nr)
497                         break;
498                 for (i = 0; i < pagevec_count(&pvec); i++) {
499                         struct page *page = pvec.pages[i];
500 
501                         index = indices[i];
502                         if (index >= end)
503                                 break;
504 
505                         if (radix_tree_exceptional_entry(page)) {
506                                 if (unfalloc)
507                                         continue;
508                                 nr_swaps_freed += !shmem_free_swap(mapping,
509                                                                 index, page);
510                                 continue;
511                         }
512 
513                         if (!trylock_page(page))
514                                 continue;
515                         if (!unfalloc || !PageUptodate(page)) {
516                                 if (page->mapping == mapping) {
517                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
518                                         truncate_inode_page(mapping, page);
519                                 }
520                         }
521                         unlock_page(page);
522                 }
523                 pagevec_remove_exceptionals(&pvec);
524                 pagevec_release(&pvec);
525                 cond_resched();
526                 index++;
527         }
528 
529         if (partial_start) {
530                 struct page *page = NULL;
531                 shmem_getpage(inode, start - 1, &page, SGP_READ);
532                 if (page) {
533                         unsigned int top = PAGE_SIZE;
534                         if (start > end) {
535                                 top = partial_end;
536                                 partial_end = 0;
537                         }
538                         zero_user_segment(page, partial_start, top);
539                         set_page_dirty(page);
540                         unlock_page(page);
541                         put_page(page);
542                 }
543         }
544         if (partial_end) {
545                 struct page *page = NULL;
546                 shmem_getpage(inode, end, &page, SGP_READ);
547                 if (page) {
548                         zero_user_segment(page, 0, partial_end);
549                         set_page_dirty(page);
550                         unlock_page(page);
551                         put_page(page);
552                 }
553         }
554         if (start >= end)
555                 return;
556 
557         index = start;
558         while (index < end) {
559                 cond_resched();
560 
561                 pvec.nr = find_get_entries(mapping, index,
562                                 min(end - index, (pgoff_t)PAGEVEC_SIZE),
563                                 pvec.pages, indices);
564                 if (!pvec.nr) {
565                         /* If all gone or hole-punch or unfalloc, we're done */
566                         if (index == start || end != -1)
567                                 break;
568                         /* But if truncating, restart to make sure all gone */
569                         index = start;
570                         continue;
571                 }
572                 for (i = 0; i < pagevec_count(&pvec); i++) {
573                         struct page *page = pvec.pages[i];
574 
575                         index = indices[i];
576                         if (index >= end)
577                                 break;
578 
579                         if (radix_tree_exceptional_entry(page)) {
580                                 if (unfalloc)
581                                         continue;
582                                 if (shmem_free_swap(mapping, index, page)) {
583                                         /* Swap was replaced by page: retry */
584                                         index--;
585                                         break;
586                                 }
587                                 nr_swaps_freed++;
588                                 continue;
589                         }
590 
591                         lock_page(page);
592                         if (!unfalloc || !PageUptodate(page)) {
593                                 if (page->mapping == mapping) {
594                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
595                                         truncate_inode_page(mapping, page);
596                                 } else {
597                                         /* Page was replaced by swap: retry */
598                                         unlock_page(page);
599                                         index--;
600                                         break;
601                                 }
602                         }
603                         unlock_page(page);
604                 }
605                 pagevec_remove_exceptionals(&pvec);
606                 pagevec_release(&pvec);
607                 index++;
608         }
609 
610         spin_lock(&info->lock);
611         info->swapped -= nr_swaps_freed;
612         shmem_recalc_inode(inode);
613         spin_unlock(&info->lock);
614 }
615 
616 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
617 {
618         shmem_undo_range(inode, lstart, lend, false);
619         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
620 }
621 EXPORT_SYMBOL_GPL(shmem_truncate_range);
622 
623 static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry,
624                          struct kstat *stat)
625 {
626         struct inode *inode = dentry->d_inode;
627         struct shmem_inode_info *info = SHMEM_I(inode);
628 
629         if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
630                 spin_lock(&info->lock);
631                 shmem_recalc_inode(inode);
632                 spin_unlock(&info->lock);
633         }
634         generic_fillattr(inode, stat);
635         return 0;
636 }
637 
638 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
639 {
640         struct inode *inode = d_inode(dentry);
641         struct shmem_inode_info *info = SHMEM_I(inode);
642         int error;
643 
644         error = inode_change_ok(inode, attr);
645         if (error)
646                 return error;
647 
648         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
649                 loff_t oldsize = inode->i_size;
650                 loff_t newsize = attr->ia_size;
651 
652                 /* protected by i_mutex */
653                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
654                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
655                         return -EPERM;
656 
657                 if (newsize != oldsize) {
658                         error = shmem_reacct_size(SHMEM_I(inode)->flags,
659                                         oldsize, newsize);
660                         if (error)
661                                 return error;
662                         i_size_write(inode, newsize);
663                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
664                 }
665                 if (newsize <= oldsize) {
666                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
667                         if (oldsize > holebegin)
668                                 unmap_mapping_range(inode->i_mapping,
669                                                         holebegin, 0, 1);
670                         if (info->alloced)
671                                 shmem_truncate_range(inode,
672                                                         newsize, (loff_t)-1);
673                         /* unmap again to remove racily COWed private pages */
674                         if (oldsize > holebegin)
675                                 unmap_mapping_range(inode->i_mapping,
676                                                         holebegin, 0, 1);
677                 }
678         }
679 
680         setattr_copy(inode, attr);
681         if (attr->ia_valid & ATTR_MODE)
682                 error = posix_acl_chmod(inode, inode->i_mode);
683         return error;
684 }
685 
686 static void shmem_evict_inode(struct inode *inode)
687 {
688         struct shmem_inode_info *info = SHMEM_I(inode);
689 
690         if (inode->i_mapping->a_ops == &shmem_aops) {
691                 shmem_unacct_size(info->flags, inode->i_size);
692                 inode->i_size = 0;
693                 shmem_truncate_range(inode, 0, (loff_t)-1);
694                 if (!list_empty(&info->swaplist)) {
695                         mutex_lock(&shmem_swaplist_mutex);
696                         list_del_init(&info->swaplist);
697                         mutex_unlock(&shmem_swaplist_mutex);
698                 }
699         }
700 
701         simple_xattrs_free(&info->xattrs);
702         WARN_ON(inode->i_blocks);
703         shmem_free_inode(inode->i_sb);
704         clear_inode(inode);
705 }
706 
707 /*
708  * If swap found in inode, free it and move page from swapcache to filecache.
709  */
710 static int shmem_unuse_inode(struct shmem_inode_info *info,
711                              swp_entry_t swap, struct page **pagep)
712 {
713         struct address_space *mapping = info->vfs_inode.i_mapping;
714         void *radswap;
715         pgoff_t index;
716         gfp_t gfp;
717         int error = 0;
718 
719         radswap = swp_to_radix_entry(swap);
720         index = radix_tree_locate_item(&mapping->page_tree, radswap);
721         if (index == -1)
722                 return -EAGAIN; /* tell shmem_unuse we found nothing */
723 
724         /*
725          * Move _head_ to start search for next from here.
726          * But be careful: shmem_evict_inode checks list_empty without taking
727          * mutex, and there's an instant in list_move_tail when info->swaplist
728          * would appear empty, if it were the only one on shmem_swaplist.
729          */
730         if (shmem_swaplist.next != &info->swaplist)
731                 list_move_tail(&shmem_swaplist, &info->swaplist);
732 
733         gfp = mapping_gfp_mask(mapping);
734         if (shmem_should_replace_page(*pagep, gfp)) {
735                 mutex_unlock(&shmem_swaplist_mutex);
736                 error = shmem_replace_page(pagep, gfp, info, index);
737                 mutex_lock(&shmem_swaplist_mutex);
738                 /*
739                  * We needed to drop mutex to make that restrictive page
740                  * allocation, but the inode might have been freed while we
741                  * dropped it: although a racing shmem_evict_inode() cannot
742                  * complete without emptying the radix_tree, our page lock
743                  * on this swapcache page is not enough to prevent that -
744                  * free_swap_and_cache() of our swap entry will only
745                  * trylock_page(), removing swap from radix_tree whatever.
746                  *
747                  * We must not proceed to shmem_add_to_page_cache() if the
748                  * inode has been freed, but of course we cannot rely on
749                  * inode or mapping or info to check that.  However, we can
750                  * safely check if our swap entry is still in use (and here
751                  * it can't have got reused for another page): if it's still
752                  * in use, then the inode cannot have been freed yet, and we
753                  * can safely proceed (if it's no longer in use, that tells
754                  * nothing about the inode, but we don't need to unuse swap).
755                  */
756                 if (!page_swapcount(*pagep))
757                         error = -ENOENT;
758         }
759 
760         /*
761          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
762          * but also to hold up shmem_evict_inode(): so inode cannot be freed
763          * beneath us (pagelock doesn't help until the page is in pagecache).
764          */
765         if (!error)
766                 error = shmem_add_to_page_cache(*pagep, mapping, index,
767                                                 radswap);
768         if (error != -ENOMEM) {
769                 /*
770                  * Truncation and eviction use free_swap_and_cache(), which
771                  * only does trylock page: if we raced, best clean up here.
772                  */
773                 delete_from_swap_cache(*pagep);
774                 set_page_dirty(*pagep);
775                 if (!error) {
776                         spin_lock(&info->lock);
777                         info->swapped--;
778                         spin_unlock(&info->lock);
779                         swap_free(swap);
780                 }
781         }
782         return error;
783 }
784 
785 /*
786  * Search through swapped inodes to find and replace swap by page.
787  */
788 int shmem_unuse(swp_entry_t swap, struct page *page)
789 {
790         struct list_head *this, *next;
791         struct shmem_inode_info *info;
792         struct mem_cgroup *memcg;
793         int error = 0;
794 
795         /*
796          * There's a faint possibility that swap page was replaced before
797          * caller locked it: caller will come back later with the right page.
798          */
799         if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
800                 goto out;
801 
802         /*
803          * Charge page using GFP_KERNEL while we can wait, before taking
804          * the shmem_swaplist_mutex which might hold up shmem_writepage().
805          * Charged back to the user (not to caller) when swap account is used.
806          */
807         error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg,
808                         false);
809         if (error)
810                 goto out;
811         /* No radix_tree_preload: swap entry keeps a place for page in tree */
812         error = -EAGAIN;
813 
814         mutex_lock(&shmem_swaplist_mutex);
815         list_for_each_safe(this, next, &shmem_swaplist) {
816                 info = list_entry(this, struct shmem_inode_info, swaplist);
817                 if (info->swapped)
818                         error = shmem_unuse_inode(info, swap, &page);
819                 else
820                         list_del_init(&info->swaplist);
821                 cond_resched();
822                 if (error != -EAGAIN)
823                         break;
824                 /* found nothing in this: move on to search the next */
825         }
826         mutex_unlock(&shmem_swaplist_mutex);
827 
828         if (error) {
829                 if (error != -ENOMEM)
830                         error = 0;
831                 mem_cgroup_cancel_charge(page, memcg, false);
832         } else
833                 mem_cgroup_commit_charge(page, memcg, true, false);
834 out:
835         unlock_page(page);
836         put_page(page);
837         return error;
838 }
839 
840 /*
841  * Move the page from the page cache to the swap cache.
842  */
843 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
844 {
845         struct shmem_inode_info *info;
846         struct address_space *mapping;
847         struct inode *inode;
848         swp_entry_t swap;
849         pgoff_t index;
850 
851         BUG_ON(!PageLocked(page));
852         mapping = page->mapping;
853         index = page->index;
854         inode = mapping->host;
855         info = SHMEM_I(inode);
856         if (info->flags & VM_LOCKED)
857                 goto redirty;
858         if (!total_swap_pages)
859                 goto redirty;
860 
861         /*
862          * Our capabilities prevent regular writeback or sync from ever calling
863          * shmem_writepage; but a stacking filesystem might use ->writepage of
864          * its underlying filesystem, in which case tmpfs should write out to
865          * swap only in response to memory pressure, and not for the writeback
866          * threads or sync.
867          */
868         if (!wbc->for_reclaim) {
869                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
870                 goto redirty;
871         }
872 
873         /*
874          * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
875          * value into swapfile.c, the only way we can correctly account for a
876          * fallocated page arriving here is now to initialize it and write it.
877          *
878          * That's okay for a page already fallocated earlier, but if we have
879          * not yet completed the fallocation, then (a) we want to keep track
880          * of this page in case we have to undo it, and (b) it may not be a
881          * good idea to continue anyway, once we're pushing into swap.  So
882          * reactivate the page, and let shmem_fallocate() quit when too many.
883          */
884         if (!PageUptodate(page)) {
885                 if (inode->i_private) {
886                         struct shmem_falloc *shmem_falloc;
887                         spin_lock(&inode->i_lock);
888                         shmem_falloc = inode->i_private;
889                         if (shmem_falloc &&
890                             !shmem_falloc->waitq &&
891                             index >= shmem_falloc->start &&
892                             index < shmem_falloc->next)
893                                 shmem_falloc->nr_unswapped++;
894                         else
895                                 shmem_falloc = NULL;
896                         spin_unlock(&inode->i_lock);
897                         if (shmem_falloc)
898                                 goto redirty;
899                 }
900                 clear_highpage(page);
901                 flush_dcache_page(page);
902                 SetPageUptodate(page);
903         }
904 
905         swap = get_swap_page();
906         if (!swap.val)
907                 goto redirty;
908 
909         if (mem_cgroup_try_charge_swap(page, swap))
910                 goto free_swap;
911 
912         /*
913          * Add inode to shmem_unuse()'s list of swapped-out inodes,
914          * if it's not already there.  Do it now before the page is
915          * moved to swap cache, when its pagelock no longer protects
916          * the inode from eviction.  But don't unlock the mutex until
917          * we've incremented swapped, because shmem_unuse_inode() will
918          * prune a !swapped inode from the swaplist under this mutex.
919          */
920         mutex_lock(&shmem_swaplist_mutex);
921         if (list_empty(&info->swaplist))
922                 list_add_tail(&info->swaplist, &shmem_swaplist);
923 
924         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
925                 spin_lock(&info->lock);
926                 shmem_recalc_inode(inode);
927                 info->swapped++;
928                 spin_unlock(&info->lock);
929 
930                 swap_shmem_alloc(swap);
931                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
932 
933                 mutex_unlock(&shmem_swaplist_mutex);
934                 BUG_ON(page_mapped(page));
935                 swap_writepage(page, wbc);
936                 return 0;
937         }
938 
939         mutex_unlock(&shmem_swaplist_mutex);
940 free_swap:
941         swapcache_free(swap);
942 redirty:
943         set_page_dirty(page);
944         if (wbc->for_reclaim)
945                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
946         unlock_page(page);
947         return 0;
948 }
949 
950 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
951 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
952 {
953         char buffer[64];
954 
955         if (!mpol || mpol->mode == MPOL_DEFAULT)
956                 return;         /* show nothing */
957 
958         mpol_to_str(buffer, sizeof(buffer), mpol);
959 
960         seq_printf(seq, ",mpol=%s", buffer);
961 }
962 
963 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
964 {
965         struct mempolicy *mpol = NULL;
966         if (sbinfo->mpol) {
967                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
968                 mpol = sbinfo->mpol;
969                 mpol_get(mpol);
970                 spin_unlock(&sbinfo->stat_lock);
971         }
972         return mpol;
973 }
974 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
975 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
976 {
977 }
978 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
979 {
980         return NULL;
981 }
982 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
983 #ifndef CONFIG_NUMA
984 #define vm_policy vm_private_data
985 #endif
986 
987 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
988                         struct shmem_inode_info *info, pgoff_t index)
989 {
990         struct vm_area_struct pvma;
991         struct page *page;
992 
993         /* Create a pseudo vma that just contains the policy */
994         pvma.vm_start = 0;
995         /* Bias interleave by inode number to distribute better across nodes */
996         pvma.vm_pgoff = index + info->vfs_inode.i_ino;
997         pvma.vm_ops = NULL;
998         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
999 
1000         page = swapin_readahead(swap, gfp, &pvma, 0);
1001 
1002         /* Drop reference taken by mpol_shared_policy_lookup() */
1003         mpol_cond_put(pvma.vm_policy);
1004 
1005         return page;
1006 }
1007 
1008 static struct page *shmem_alloc_page(gfp_t gfp,
1009                         struct shmem_inode_info *info, pgoff_t index)
1010 {
1011         struct vm_area_struct pvma;
1012         struct page *page;
1013 
1014         /* Create a pseudo vma that just contains the policy */
1015         pvma.vm_start = 0;
1016         /* Bias interleave by inode number to distribute better across nodes */
1017         pvma.vm_pgoff = index + info->vfs_inode.i_ino;
1018         pvma.vm_ops = NULL;
1019         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1020 
1021         page = alloc_pages_vma(gfp, 0, &pvma, 0, numa_node_id(), false);
1022         if (page) {
1023                 __SetPageLocked(page);
1024                 __SetPageSwapBacked(page);
1025         }
1026 
1027         /* Drop reference taken by mpol_shared_policy_lookup() */
1028         mpol_cond_put(pvma.vm_policy);
1029 
1030         return page;
1031 }
1032 
1033 /*
1034  * When a page is moved from swapcache to shmem filecache (either by the
1035  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1036  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1037  * ignorance of the mapping it belongs to.  If that mapping has special
1038  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1039  * we may need to copy to a suitable page before moving to filecache.
1040  *
1041  * In a future release, this may well be extended to respect cpuset and
1042  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1043  * but for now it is a simple matter of zone.
1044  */
1045 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1046 {
1047         return page_zonenum(page) > gfp_zone(gfp);
1048 }
1049 
1050 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1051                                 struct shmem_inode_info *info, pgoff_t index)
1052 {
1053         struct page *oldpage, *newpage;
1054         struct address_space *swap_mapping;
1055         pgoff_t swap_index;
1056         int error;
1057 
1058         oldpage = *pagep;
1059         swap_index = page_private(oldpage);
1060         swap_mapping = page_mapping(oldpage);
1061 
1062         /*
1063          * We have arrived here because our zones are constrained, so don't
1064          * limit chance of success by further cpuset and node constraints.
1065          */
1066         gfp &= ~GFP_CONSTRAINT_MASK;
1067         newpage = shmem_alloc_page(gfp, info, index);
1068         if (!newpage)
1069                 return -ENOMEM;
1070 
1071         get_page(newpage);
1072         copy_highpage(newpage, oldpage);
1073         flush_dcache_page(newpage);
1074 
1075         SetPageUptodate(newpage);
1076         set_page_private(newpage, swap_index);
1077         SetPageSwapCache(newpage);
1078 
1079         /*
1080          * Our caller will very soon move newpage out of swapcache, but it's
1081          * a nice clean interface for us to replace oldpage by newpage there.
1082          */
1083         spin_lock_irq(&swap_mapping->tree_lock);
1084         error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1085                                                                    newpage);
1086         if (!error) {
1087                 __inc_zone_page_state(newpage, NR_FILE_PAGES);
1088                 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
1089         }
1090         spin_unlock_irq(&swap_mapping->tree_lock);
1091 
1092         if (unlikely(error)) {
1093                 /*
1094                  * Is this possible?  I think not, now that our callers check
1095                  * both PageSwapCache and page_private after getting page lock;
1096                  * but be defensive.  Reverse old to newpage for clear and free.
1097                  */
1098                 oldpage = newpage;
1099         } else {
1100                 mem_cgroup_migrate(oldpage, newpage);
1101                 lru_cache_add_anon(newpage);
1102                 *pagep = newpage;
1103         }
1104 
1105         ClearPageSwapCache(oldpage);
1106         set_page_private(oldpage, 0);
1107 
1108         unlock_page(oldpage);
1109         put_page(oldpage);
1110         put_page(oldpage);
1111         return error;
1112 }
1113 
1114 /*
1115  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1116  *
1117  * If we allocate a new one we do not mark it dirty. That's up to the
1118  * vm. If we swap it in we mark it dirty since we also free the swap
1119  * entry since a page cannot live in both the swap and page cache.
1120  *
1121  * fault_mm and fault_type are only supplied by shmem_fault:
1122  * otherwise they are NULL.
1123  */
1124 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1125         struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1126         struct mm_struct *fault_mm, int *fault_type)
1127 {
1128         struct address_space *mapping = inode->i_mapping;
1129         struct shmem_inode_info *info;
1130         struct shmem_sb_info *sbinfo;
1131         struct mm_struct *charge_mm;
1132         struct mem_cgroup *memcg;
1133         struct page *page;
1134         swp_entry_t swap;
1135         int error;
1136         int once = 0;
1137         int alloced = 0;
1138 
1139         if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1140                 return -EFBIG;
1141 repeat:
1142         swap.val = 0;
1143         page = find_lock_entry(mapping, index);
1144         if (radix_tree_exceptional_entry(page)) {
1145                 swap = radix_to_swp_entry(page);
1146                 page = NULL;
1147         }
1148 
1149         if (sgp <= SGP_CACHE &&
1150             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1151                 error = -EINVAL;
1152                 goto unlock;
1153         }
1154 
1155         if (page && sgp == SGP_WRITE)
1156                 mark_page_accessed(page);
1157 
1158         /* fallocated page? */
1159         if (page && !PageUptodate(page)) {
1160                 if (sgp != SGP_READ)
1161                         goto clear;
1162                 unlock_page(page);
1163                 put_page(page);
1164                 page = NULL;
1165         }
1166         if (page || (sgp == SGP_READ && !swap.val)) {
1167                 *pagep = page;
1168                 return 0;
1169         }
1170 
1171         /*
1172          * Fast cache lookup did not find it:
1173          * bring it back from swap or allocate.
1174          */
1175         info = SHMEM_I(inode);
1176         sbinfo = SHMEM_SB(inode->i_sb);
1177         charge_mm = fault_mm ? : current->mm;
1178 
1179         if (swap.val) {
1180                 /* Look it up and read it in.. */
1181                 page = lookup_swap_cache(swap);
1182                 if (!page) {
1183                         /* Or update major stats only when swapin succeeds?? */
1184                         if (fault_type) {
1185                                 *fault_type |= VM_FAULT_MAJOR;
1186                                 count_vm_event(PGMAJFAULT);
1187                                 mem_cgroup_count_vm_event(fault_mm, PGMAJFAULT);
1188                         }
1189                         /* Here we actually start the io */
1190                         page = shmem_swapin(swap, gfp, info, index);
1191                         if (!page) {
1192                                 error = -ENOMEM;
1193                                 goto failed;
1194                         }
1195                 }
1196 
1197                 /* We have to do this with page locked to prevent races */
1198                 lock_page(page);
1199                 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1200                     !shmem_confirm_swap(mapping, index, swap)) {
1201                         error = -EEXIST;        /* try again */
1202                         goto unlock;
1203                 }
1204                 if (!PageUptodate(page)) {
1205                         error = -EIO;
1206                         goto failed;
1207                 }
1208                 wait_on_page_writeback(page);
1209 
1210                 if (shmem_should_replace_page(page, gfp)) {
1211                         error = shmem_replace_page(&page, gfp, info, index);
1212                         if (error)
1213                                 goto failed;
1214                 }
1215 
1216                 error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1217                                 false);
1218                 if (!error) {
1219                         error = shmem_add_to_page_cache(page, mapping, index,
1220                                                 swp_to_radix_entry(swap));
1221                         /*
1222                          * We already confirmed swap under page lock, and make
1223                          * no memory allocation here, so usually no possibility
1224                          * of error; but free_swap_and_cache() only trylocks a
1225                          * page, so it is just possible that the entry has been
1226                          * truncated or holepunched since swap was confirmed.
1227                          * shmem_undo_range() will have done some of the
1228                          * unaccounting, now delete_from_swap_cache() will do
1229                          * the rest.
1230                          * Reset swap.val? No, leave it so "failed" goes back to
1231                          * "repeat": reading a hole and writing should succeed.
1232                          */
1233                         if (error) {
1234                                 mem_cgroup_cancel_charge(page, memcg, false);
1235                                 delete_from_swap_cache(page);
1236                         }
1237                 }
1238                 if (error)
1239                         goto failed;
1240 
1241                 mem_cgroup_commit_charge(page, memcg, true, false);
1242 
1243                 spin_lock(&info->lock);
1244                 info->swapped--;
1245                 shmem_recalc_inode(inode);
1246                 spin_unlock(&info->lock);
1247 
1248                 if (sgp == SGP_WRITE)
1249                         mark_page_accessed(page);
1250 
1251                 delete_from_swap_cache(page);
1252                 set_page_dirty(page);
1253                 swap_free(swap);
1254 
1255         } else {
1256                 if (shmem_acct_block(info->flags)) {
1257                         error = -ENOSPC;
1258                         goto failed;
1259                 }
1260                 if (sbinfo->max_blocks) {
1261                         if (percpu_counter_compare(&sbinfo->used_blocks,
1262                                                 sbinfo->max_blocks) >= 0) {
1263                                 error = -ENOSPC;
1264                                 goto unacct;
1265                         }
1266                         percpu_counter_inc(&sbinfo->used_blocks);
1267                 }
1268 
1269                 page = shmem_alloc_page(gfp, info, index);
1270                 if (!page) {
1271                         error = -ENOMEM;
1272                         goto decused;
1273                 }
1274                 if (sgp == SGP_WRITE)
1275                         __SetPageReferenced(page);
1276 
1277                 error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1278                                 false);
1279                 if (error)
1280                         goto decused;
1281                 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK);
1282                 if (!error) {
1283                         error = shmem_add_to_page_cache(page, mapping, index,
1284                                                         NULL);
1285                         radix_tree_preload_end();
1286                 }
1287                 if (error) {
1288                         mem_cgroup_cancel_charge(page, memcg, false);
1289                         goto decused;
1290                 }
1291                 mem_cgroup_commit_charge(page, memcg, false, false);
1292                 lru_cache_add_anon(page);
1293 
1294                 spin_lock(&info->lock);
1295                 info->alloced++;
1296                 inode->i_blocks += BLOCKS_PER_PAGE;
1297                 shmem_recalc_inode(inode);
1298                 spin_unlock(&info->lock);
1299                 alloced = true;
1300 
1301                 /*
1302                  * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1303                  */
1304                 if (sgp == SGP_FALLOC)
1305                         sgp = SGP_WRITE;
1306 clear:
1307                 /*
1308                  * Let SGP_WRITE caller clear ends if write does not fill page;
1309                  * but SGP_FALLOC on a page fallocated earlier must initialize
1310                  * it now, lest undo on failure cancel our earlier guarantee.
1311                  */
1312                 if (sgp != SGP_WRITE) {
1313                         clear_highpage(page);
1314                         flush_dcache_page(page);
1315                         SetPageUptodate(page);
1316                 }
1317         }
1318 
1319         /* Perhaps the file has been truncated since we checked */
1320         if (sgp <= SGP_CACHE &&
1321             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1322                 if (alloced) {
1323                         ClearPageDirty(page);
1324                         delete_from_page_cache(page);
1325                         spin_lock(&info->lock);
1326                         shmem_recalc_inode(inode);
1327                         spin_unlock(&info->lock);
1328                 }
1329                 error = -EINVAL;
1330                 goto unlock;
1331         }
1332         *pagep = page;
1333         return 0;
1334 
1335         /*
1336          * Error recovery.
1337          */
1338 decused:
1339         if (sbinfo->max_blocks)
1340                 percpu_counter_add(&sbinfo->used_blocks, -1);
1341 unacct:
1342         shmem_unacct_blocks(info->flags, 1);
1343 failed:
1344         if (swap.val && !shmem_confirm_swap(mapping, index, swap))
1345                 error = -EEXIST;
1346 unlock:
1347         if (page) {
1348                 unlock_page(page);
1349                 put_page(page);
1350         }
1351         if (error == -ENOSPC && !once++) {
1352                 info = SHMEM_I(inode);
1353                 spin_lock(&info->lock);
1354                 shmem_recalc_inode(inode);
1355                 spin_unlock(&info->lock);
1356                 goto repeat;
1357         }
1358         if (error == -EEXIST)   /* from above or from radix_tree_insert */
1359                 goto repeat;
1360         return error;
1361 }
1362 
1363 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1364 {
1365         struct inode *inode = file_inode(vma->vm_file);
1366         gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1367         int error;
1368         int ret = VM_FAULT_LOCKED;
1369 
1370         /*
1371          * Trinity finds that probing a hole which tmpfs is punching can
1372          * prevent the hole-punch from ever completing: which in turn
1373          * locks writers out with its hold on i_mutex.  So refrain from
1374          * faulting pages into the hole while it's being punched.  Although
1375          * shmem_undo_range() does remove the additions, it may be unable to
1376          * keep up, as each new page needs its own unmap_mapping_range() call,
1377          * and the i_mmap tree grows ever slower to scan if new vmas are added.
1378          *
1379          * It does not matter if we sometimes reach this check just before the
1380          * hole-punch begins, so that one fault then races with the punch:
1381          * we just need to make racing faults a rare case.
1382          *
1383          * The implementation below would be much simpler if we just used a
1384          * standard mutex or completion: but we cannot take i_mutex in fault,
1385          * and bloating every shmem inode for this unlikely case would be sad.
1386          */
1387         if (unlikely(inode->i_private)) {
1388                 struct shmem_falloc *shmem_falloc;
1389 
1390                 spin_lock(&inode->i_lock);
1391                 shmem_falloc = inode->i_private;
1392                 if (shmem_falloc &&
1393                     shmem_falloc->waitq &&
1394                     vmf->pgoff >= shmem_falloc->start &&
1395                     vmf->pgoff < shmem_falloc->next) {
1396                         wait_queue_head_t *shmem_falloc_waitq;
1397                         DEFINE_WAIT(shmem_fault_wait);
1398 
1399                         ret = VM_FAULT_NOPAGE;
1400                         if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1401                            !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1402                                 /* It's polite to up mmap_sem if we can */
1403                                 up_read(&vma->vm_mm->mmap_sem);
1404                                 ret = VM_FAULT_RETRY;
1405                         }
1406 
1407                         shmem_falloc_waitq = shmem_falloc->waitq;
1408                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1409                                         TASK_UNINTERRUPTIBLE);
1410                         spin_unlock(&inode->i_lock);
1411                         schedule();
1412 
1413                         /*
1414                          * shmem_falloc_waitq points into the shmem_fallocate()
1415                          * stack of the hole-punching task: shmem_falloc_waitq
1416                          * is usually invalid by the time we reach here, but
1417                          * finish_wait() does not dereference it in that case;
1418                          * though i_lock needed lest racing with wake_up_all().
1419                          */
1420                         spin_lock(&inode->i_lock);
1421                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1422                         spin_unlock(&inode->i_lock);
1423                         return ret;
1424                 }
1425                 spin_unlock(&inode->i_lock);
1426         }
1427 
1428         error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
1429                                   gfp, vma->vm_mm, &ret);
1430         if (error)
1431                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1432         return ret;
1433 }
1434 
1435 #ifdef CONFIG_NUMA
1436 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1437 {
1438         struct inode *inode = file_inode(vma->vm_file);
1439         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1440 }
1441 
1442 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1443                                           unsigned long addr)
1444 {
1445         struct inode *inode = file_inode(vma->vm_file);
1446         pgoff_t index;
1447 
1448         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1449         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1450 }
1451 #endif
1452 
1453 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1454 {
1455         struct inode *inode = file_inode(file);
1456         struct shmem_inode_info *info = SHMEM_I(inode);
1457         int retval = -ENOMEM;
1458 
1459         spin_lock(&info->lock);
1460         if (lock && !(info->flags & VM_LOCKED)) {
1461                 if (!user_shm_lock(inode->i_size, user))
1462                         goto out_nomem;
1463                 info->flags |= VM_LOCKED;
1464                 mapping_set_unevictable(file->f_mapping);
1465         }
1466         if (!lock && (info->flags & VM_LOCKED) && user) {
1467                 user_shm_unlock(inode->i_size, user);
1468                 info->flags &= ~VM_LOCKED;
1469                 mapping_clear_unevictable(file->f_mapping);
1470         }
1471         retval = 0;
1472 
1473 out_nomem:
1474         spin_unlock(&info->lock);
1475         return retval;
1476 }
1477 
1478 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1479 {
1480         file_accessed(file);
1481         vma->vm_ops = &shmem_vm_ops;
1482         return 0;
1483 }
1484 
1485 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1486                                      umode_t mode, dev_t dev, unsigned long flags)
1487 {
1488         struct inode *inode;
1489         struct shmem_inode_info *info;
1490         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1491 
1492         if (shmem_reserve_inode(sb))
1493                 return NULL;
1494 
1495         inode = new_inode(sb);
1496         if (inode) {
1497                 inode->i_ino = get_next_ino();
1498                 inode_init_owner(inode, dir, mode);
1499                 inode->i_blocks = 0;
1500                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1501                 inode->i_generation = get_seconds();
1502                 info = SHMEM_I(inode);
1503                 memset(info, 0, (char *)inode - (char *)info);
1504                 spin_lock_init(&info->lock);
1505                 info->seals = F_SEAL_SEAL;
1506                 info->flags = flags & VM_NORESERVE;
1507                 INIT_LIST_HEAD(&info->swaplist);
1508                 simple_xattrs_init(&info->xattrs);
1509                 cache_no_acl(inode);
1510 
1511                 switch (mode & S_IFMT) {
1512                 default:
1513                         inode->i_op = &shmem_special_inode_operations;
1514                         init_special_inode(inode, mode, dev);
1515                         break;
1516                 case S_IFREG:
1517                         inode->i_mapping->a_ops = &shmem_aops;
1518                         inode->i_op = &shmem_inode_operations;
1519                         inode->i_fop = &shmem_file_operations;
1520                         mpol_shared_policy_init(&info->policy,
1521                                                  shmem_get_sbmpol(sbinfo));
1522                         break;
1523                 case S_IFDIR:
1524                         inc_nlink(inode);
1525                         /* Some things misbehave if size == 0 on a directory */
1526                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1527                         inode->i_op = &shmem_dir_inode_operations;
1528                         inode->i_fop = &simple_dir_operations;
1529                         break;
1530                 case S_IFLNK:
1531                         /*
1532                          * Must not load anything in the rbtree,
1533                          * mpol_free_shared_policy will not be called.
1534                          */
1535                         mpol_shared_policy_init(&info->policy, NULL);
1536                         break;
1537                 }
1538         } else
1539                 shmem_free_inode(sb);
1540         return inode;
1541 }
1542 
1543 bool shmem_mapping(struct address_space *mapping)
1544 {
1545         if (!mapping->host)
1546                 return false;
1547 
1548         return mapping->host->i_sb->s_op == &shmem_ops;
1549 }
1550 
1551 #ifdef CONFIG_TMPFS
1552 static const struct inode_operations shmem_symlink_inode_operations;
1553 static const struct inode_operations shmem_short_symlink_operations;
1554 
1555 #ifdef CONFIG_TMPFS_XATTR
1556 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
1557 #else
1558 #define shmem_initxattrs NULL
1559 #endif
1560 
1561 static int
1562 shmem_write_begin(struct file *file, struct address_space *mapping,
1563                         loff_t pos, unsigned len, unsigned flags,
1564                         struct page **pagep, void **fsdata)
1565 {
1566         struct inode *inode = mapping->host;
1567         struct shmem_inode_info *info = SHMEM_I(inode);
1568         pgoff_t index = pos >> PAGE_SHIFT;
1569 
1570         /* i_mutex is held by caller */
1571         if (unlikely(info->seals)) {
1572                 if (info->seals & F_SEAL_WRITE)
1573                         return -EPERM;
1574                 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
1575                         return -EPERM;
1576         }
1577 
1578         return shmem_getpage(inode, index, pagep, SGP_WRITE);
1579 }
1580 
1581 static int
1582 shmem_write_end(struct file *file, struct address_space *mapping,
1583                         loff_t pos, unsigned len, unsigned copied,
1584                         struct page *page, void *fsdata)
1585 {
1586         struct inode *inode = mapping->host;
1587 
1588         if (pos + copied > inode->i_size)
1589                 i_size_write(inode, pos + copied);
1590 
1591         if (!PageUptodate(page)) {
1592                 if (copied < PAGE_SIZE) {
1593                         unsigned from = pos & (PAGE_SIZE - 1);
1594                         zero_user_segments(page, 0, from,
1595                                         from + copied, PAGE_SIZE);
1596                 }
1597                 SetPageUptodate(page);
1598         }
1599         set_page_dirty(page);
1600         unlock_page(page);
1601         put_page(page);
1602 
1603         return copied;
1604 }
1605 
1606 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1607 {
1608         struct file *file = iocb->ki_filp;
1609         struct inode *inode = file_inode(file);
1610         struct address_space *mapping = inode->i_mapping;
1611         pgoff_t index;
1612         unsigned long offset;
1613         enum sgp_type sgp = SGP_READ;
1614         int error = 0;
1615         ssize_t retval = 0;
1616         loff_t *ppos = &iocb->ki_pos;
1617 
1618         /*
1619          * Might this read be for a stacking filesystem?  Then when reading
1620          * holes of a sparse file, we actually need to allocate those pages,
1621          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1622          */
1623         if (!iter_is_iovec(to))
1624                 sgp = SGP_CACHE;
1625 
1626         index = *ppos >> PAGE_SHIFT;
1627         offset = *ppos & ~PAGE_MASK;
1628 
1629         for (;;) {
1630                 struct page *page = NULL;
1631                 pgoff_t end_index;
1632                 unsigned long nr, ret;
1633                 loff_t i_size = i_size_read(inode);
1634 
1635                 end_index = i_size >> PAGE_SHIFT;
1636                 if (index > end_index)
1637                         break;
1638                 if (index == end_index) {
1639                         nr = i_size & ~PAGE_MASK;
1640                         if (nr <= offset)
1641                                 break;
1642                 }
1643 
1644                 error = shmem_getpage(inode, index, &page, sgp);
1645                 if (error) {
1646                         if (error == -EINVAL)
1647                                 error = 0;
1648                         break;
1649                 }
1650                 if (page) {
1651                         if (sgp == SGP_CACHE)
1652                                 set_page_dirty(page);
1653                         unlock_page(page);
1654                 }
1655 
1656                 /*
1657                  * We must evaluate after, since reads (unlike writes)
1658                  * are called without i_mutex protection against truncate
1659                  */
1660                 nr = PAGE_SIZE;
1661                 i_size = i_size_read(inode);
1662                 end_index = i_size >> PAGE_SHIFT;
1663                 if (index == end_index) {
1664                         nr = i_size & ~PAGE_MASK;
1665                         if (nr <= offset) {
1666                                 if (page)
1667                                         put_page(page);
1668                                 break;
1669                         }
1670                 }
1671                 nr -= offset;
1672 
1673                 if (page) {
1674                         /*
1675                          * If users can be writing to this page using arbitrary
1676                          * virtual addresses, take care about potential aliasing
1677                          * before reading the page on the kernel side.
1678                          */
1679                         if (mapping_writably_mapped(mapping))
1680                                 flush_dcache_page(page);
1681                         /*
1682                          * Mark the page accessed if we read the beginning.
1683                          */
1684                         if (!offset)
1685                                 mark_page_accessed(page);
1686                 } else {
1687                         page = ZERO_PAGE(0);
1688                         get_page(page);
1689                 }
1690 
1691                 /*
1692                  * Ok, we have the page, and it's up-to-date, so
1693                  * now we can copy it to user space...
1694                  */
1695                 ret = copy_page_to_iter(page, offset, nr, to);
1696                 retval += ret;
1697                 offset += ret;
1698                 index += offset >> PAGE_SHIFT;
1699                 offset &= ~PAGE_MASK;
1700 
1701                 put_page(page);
1702                 if (!iov_iter_count(to))
1703                         break;
1704                 if (ret < nr) {
1705                         error = -EFAULT;
1706                         break;
1707                 }
1708                 cond_resched();
1709         }
1710 
1711         *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
1712         file_accessed(file);
1713         return retval ? retval : error;
1714 }
1715 
1716 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1717                                 struct pipe_inode_info *pipe, size_t len,
1718                                 unsigned int flags)
1719 {
1720         struct address_space *mapping = in->f_mapping;
1721         struct inode *inode = mapping->host;
1722         unsigned int loff, nr_pages, req_pages;
1723         struct page *pages[PIPE_DEF_BUFFERS];
1724         struct partial_page partial[PIPE_DEF_BUFFERS];
1725         struct page *page;
1726         pgoff_t index, end_index;
1727         loff_t isize, left;
1728         int error, page_nr;
1729         struct splice_pipe_desc spd = {
1730                 .pages = pages,
1731                 .partial = partial,
1732                 .nr_pages_max = PIPE_DEF_BUFFERS,
1733                 .flags = flags,
1734                 .ops = &page_cache_pipe_buf_ops,
1735                 .spd_release = spd_release_page,
1736         };
1737 
1738         isize = i_size_read(inode);
1739         if (unlikely(*ppos >= isize))
1740                 return 0;
1741 
1742         left = isize - *ppos;
1743         if (unlikely(left < len))
1744                 len = left;
1745 
1746         if (splice_grow_spd(pipe, &spd))
1747                 return -ENOMEM;
1748 
1749         index = *ppos >> PAGE_SHIFT;
1750         loff = *ppos & ~PAGE_MASK;
1751         req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT;
1752         nr_pages = min(req_pages, spd.nr_pages_max);
1753 
1754         spd.nr_pages = find_get_pages_contig(mapping, index,
1755                                                 nr_pages, spd.pages);
1756         index += spd.nr_pages;
1757         error = 0;
1758 
1759         while (spd.nr_pages < nr_pages) {
1760                 error = shmem_getpage(inode, index, &page, SGP_CACHE);
1761                 if (error)
1762                         break;
1763                 unlock_page(page);
1764                 spd.pages[spd.nr_pages++] = page;
1765                 index++;
1766         }
1767 
1768         index = *ppos >> PAGE_SHIFT;
1769         nr_pages = spd.nr_pages;
1770         spd.nr_pages = 0;
1771 
1772         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1773                 unsigned int this_len;
1774 
1775                 if (!len)
1776                         break;
1777 
1778                 this_len = min_t(unsigned long, len, PAGE_SIZE - loff);
1779                 page = spd.pages[page_nr];
1780 
1781                 if (!PageUptodate(page) || page->mapping != mapping) {
1782                         error = shmem_getpage(inode, index, &page, SGP_CACHE);
1783                         if (error)
1784                                 break;
1785                         unlock_page(page);
1786                         put_page(spd.pages[page_nr]);
1787                         spd.pages[page_nr] = page;
1788                 }
1789 
1790                 isize = i_size_read(inode);
1791                 end_index = (isize - 1) >> PAGE_SHIFT;
1792                 if (unlikely(!isize || index > end_index))
1793                         break;
1794 
1795                 if (end_index == index) {
1796                         unsigned int plen;
1797 
1798                         plen = ((isize - 1) & ~PAGE_MASK) + 1;
1799                         if (plen <= loff)
1800                                 break;
1801 
1802                         this_len = min(this_len, plen - loff);
1803                         len = this_len;
1804                 }
1805 
1806                 spd.partial[page_nr].offset = loff;
1807                 spd.partial[page_nr].len = this_len;
1808                 len -= this_len;
1809                 loff = 0;
1810                 spd.nr_pages++;
1811                 index++;
1812         }
1813 
1814         while (page_nr < nr_pages)
1815                 put_page(spd.pages[page_nr++]);
1816 
1817         if (spd.nr_pages)
1818                 error = splice_to_pipe(pipe, &spd);
1819 
1820         splice_shrink_spd(&spd);
1821 
1822         if (error > 0) {
1823                 *ppos += error;
1824                 file_accessed(in);
1825         }
1826         return error;
1827 }
1828 
1829 /*
1830  * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
1831  */
1832 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
1833                                     pgoff_t index, pgoff_t end, int whence)
1834 {
1835         struct page *page;
1836         struct pagevec pvec;
1837         pgoff_t indices[PAGEVEC_SIZE];
1838         bool done = false;
1839         int i;
1840 
1841         pagevec_init(&pvec, 0);
1842         pvec.nr = 1;            /* start small: we may be there already */
1843         while (!done) {
1844                 pvec.nr = find_get_entries(mapping, index,
1845                                         pvec.nr, pvec.pages, indices);
1846                 if (!pvec.nr) {
1847                         if (whence == SEEK_DATA)
1848                                 index = end;
1849                         break;
1850                 }
1851                 for (i = 0; i < pvec.nr; i++, index++) {
1852                         if (index < indices[i]) {
1853                                 if (whence == SEEK_HOLE) {
1854                                         done = true;
1855                                         break;
1856                                 }
1857                                 index = indices[i];
1858                         }
1859                         page = pvec.pages[i];
1860                         if (page && !radix_tree_exceptional_entry(page)) {
1861                                 if (!PageUptodate(page))
1862                                         page = NULL;
1863                         }
1864                         if (index >= end ||
1865                             (page && whence == SEEK_DATA) ||
1866                             (!page && whence == SEEK_HOLE)) {
1867                                 done = true;
1868                                 break;
1869                         }
1870                 }
1871                 pagevec_remove_exceptionals(&pvec);
1872                 pagevec_release(&pvec);
1873                 pvec.nr = PAGEVEC_SIZE;
1874                 cond_resched();
1875         }
1876         return index;
1877 }
1878 
1879 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
1880 {
1881         struct address_space *mapping = file->f_mapping;
1882         struct inode *inode = mapping->host;
1883         pgoff_t start, end;
1884         loff_t new_offset;
1885 
1886         if (whence != SEEK_DATA && whence != SEEK_HOLE)
1887                 return generic_file_llseek_size(file, offset, whence,
1888                                         MAX_LFS_FILESIZE, i_size_read(inode));
1889         inode_lock(inode);
1890         /* We're holding i_mutex so we can access i_size directly */
1891 
1892         if (offset < 0)
1893                 offset = -EINVAL;
1894         else if (offset >= inode->i_size)
1895                 offset = -ENXIO;
1896         else {
1897                 start = offset >> PAGE_SHIFT;
1898                 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1899                 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
1900                 new_offset <<= PAGE_SHIFT;
1901                 if (new_offset > offset) {
1902                         if (new_offset < inode->i_size)
1903                                 offset = new_offset;
1904                         else if (whence == SEEK_DATA)
1905                                 offset = -ENXIO;
1906                         else
1907                                 offset = inode->i_size;
1908                 }
1909         }
1910 
1911         if (offset >= 0)
1912                 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
1913         inode_unlock(inode);
1914         return offset;
1915 }
1916 
1917 /*
1918  * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
1919  * so reuse a tag which we firmly believe is never set or cleared on shmem.
1920  */
1921 #define SHMEM_TAG_PINNED        PAGECACHE_TAG_TOWRITE
1922 #define LAST_SCAN               4       /* about 150ms max */
1923 
1924 static void shmem_tag_pins(struct address_space *mapping)
1925 {
1926         struct radix_tree_iter iter;
1927         void **slot;
1928         pgoff_t start;
1929         struct page *page;
1930 
1931         lru_add_drain();
1932         start = 0;
1933         rcu_read_lock();
1934 
1935         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1936                 page = radix_tree_deref_slot(slot);
1937                 if (!page || radix_tree_exception(page)) {
1938                         if (radix_tree_deref_retry(page)) {
1939                                 slot = radix_tree_iter_retry(&iter);
1940                                 continue;
1941                         }
1942                 } else if (page_count(page) - page_mapcount(page) > 1) {
1943                         spin_lock_irq(&mapping->tree_lock);
1944                         radix_tree_tag_set(&mapping->page_tree, iter.index,
1945                                            SHMEM_TAG_PINNED);
1946                         spin_unlock_irq(&mapping->tree_lock);
1947                 }
1948 
1949                 if (need_resched()) {
1950                         cond_resched_rcu();
1951                         slot = radix_tree_iter_next(&iter);
1952                 }
1953         }
1954         rcu_read_unlock();
1955 }
1956 
1957 /*
1958  * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
1959  * via get_user_pages(), drivers might have some pending I/O without any active
1960  * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
1961  * and see whether it has an elevated ref-count. If so, we tag them and wait for
1962  * them to be dropped.
1963  * The caller must guarantee that no new user will acquire writable references
1964  * to those pages to avoid races.
1965  */
1966 static int shmem_wait_for_pins(struct address_space *mapping)
1967 {
1968         struct radix_tree_iter iter;
1969         void **slot;
1970         pgoff_t start;
1971         struct page *page;
1972         int error, scan;
1973 
1974         shmem_tag_pins(mapping);
1975 
1976         error = 0;
1977         for (scan = 0; scan <= LAST_SCAN; scan++) {
1978                 if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED))
1979                         break;
1980 
1981                 if (!scan)
1982                         lru_add_drain_all();
1983                 else if (schedule_timeout_killable((HZ << scan) / 200))
1984                         scan = LAST_SCAN;
1985 
1986                 start = 0;
1987                 rcu_read_lock();
1988                 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
1989                                            start, SHMEM_TAG_PINNED) {
1990 
1991                         page = radix_tree_deref_slot(slot);
1992                         if (radix_tree_exception(page)) {
1993                                 if (radix_tree_deref_retry(page)) {
1994                                         slot = radix_tree_iter_retry(&iter);
1995                                         continue;
1996                                 }
1997 
1998                                 page = NULL;
1999                         }
2000 
2001                         if (page &&
2002                             page_count(page) - page_mapcount(page) != 1) {
2003                                 if (scan < LAST_SCAN)
2004                                         goto continue_resched;
2005 
2006                                 /*
2007                                  * On the last scan, we clean up all those tags
2008                                  * we inserted; but make a note that we still
2009                                  * found pages pinned.
2010                                  */
2011                                 error = -EBUSY;
2012                         }
2013 
2014                         spin_lock_irq(&mapping->tree_lock);
2015                         radix_tree_tag_clear(&mapping->page_tree,
2016                                              iter.index, SHMEM_TAG_PINNED);
2017                         spin_unlock_irq(&mapping->tree_lock);
2018 continue_resched:
2019                         if (need_resched()) {
2020                                 cond_resched_rcu();
2021                                 slot = radix_tree_iter_next(&iter);
2022                         }
2023                 }
2024                 rcu_read_unlock();
2025         }
2026 
2027         return error;
2028 }
2029 
2030 #define F_ALL_SEALS (F_SEAL_SEAL | \
2031                      F_SEAL_SHRINK | \
2032                      F_SEAL_GROW | \
2033                      F_SEAL_WRITE)
2034 
2035 int shmem_add_seals(struct file *file, unsigned int seals)
2036 {
2037         struct inode *inode = file_inode(file);
2038         struct shmem_inode_info *info = SHMEM_I(inode);
2039         int error;
2040 
2041         /*
2042          * SEALING
2043          * Sealing allows multiple parties to share a shmem-file but restrict
2044          * access to a specific subset of file operations. Seals can only be
2045          * added, but never removed. This way, mutually untrusted parties can
2046          * share common memory regions with a well-defined policy. A malicious
2047          * peer can thus never perform unwanted operations on a shared object.
2048          *
2049          * Seals are only supported on special shmem-files and always affect
2050          * the whole underlying inode. Once a seal is set, it may prevent some
2051          * kinds of access to the file. Currently, the following seals are
2052          * defined:
2053          *   SEAL_SEAL: Prevent further seals from being set on this file
2054          *   SEAL_SHRINK: Prevent the file from shrinking
2055          *   SEAL_GROW: Prevent the file from growing
2056          *   SEAL_WRITE: Prevent write access to the file
2057          *
2058          * As we don't require any trust relationship between two parties, we
2059          * must prevent seals from being removed. Therefore, sealing a file
2060          * only adds a given set of seals to the file, it never touches
2061          * existing seals. Furthermore, the "setting seals"-operation can be
2062          * sealed itself, which basically prevents any further seal from being
2063          * added.
2064          *
2065          * Semantics of sealing are only defined on volatile files. Only
2066          * anonymous shmem files support sealing. More importantly, seals are
2067          * never written to disk. Therefore, there's no plan to support it on
2068          * other file types.
2069          */
2070 
2071         if (file->f_op != &shmem_file_operations)
2072                 return -EINVAL;
2073         if (!(file->f_mode & FMODE_WRITE))
2074                 return -EPERM;
2075         if (seals & ~(unsigned int)F_ALL_SEALS)
2076                 return -EINVAL;
2077 
2078         inode_lock(inode);
2079 
2080         if (info->seals & F_SEAL_SEAL) {
2081                 error = -EPERM;
2082                 goto unlock;
2083         }
2084 
2085         if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
2086                 error = mapping_deny_writable(file->f_mapping);
2087                 if (error)
2088                         goto unlock;
2089 
2090                 error = shmem_wait_for_pins(file->f_mapping);
2091                 if (error) {
2092                         mapping_allow_writable(file->f_mapping);
2093                         goto unlock;
2094                 }
2095         }
2096 
2097         info->seals |= seals;
2098         error = 0;
2099 
2100 unlock:
2101         inode_unlock(inode);
2102         return error;
2103 }
2104 EXPORT_SYMBOL_GPL(shmem_add_seals);
2105 
2106 int shmem_get_seals(struct file *file)
2107 {
2108         if (file->f_op != &shmem_file_operations)
2109                 return -EINVAL;
2110 
2111         return SHMEM_I(file_inode(file))->seals;
2112 }
2113 EXPORT_SYMBOL_GPL(shmem_get_seals);
2114 
2115 long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
2116 {
2117         long error;
2118 
2119         switch (cmd) {
2120         case F_ADD_SEALS:
2121                 /* disallow upper 32bit */
2122                 if (arg > UINT_MAX)
2123                         return -EINVAL;
2124 
2125                 error = shmem_add_seals(file, arg);
2126                 break;
2127         case F_GET_SEALS:
2128                 error = shmem_get_seals(file);
2129                 break;
2130         default:
2131                 error = -EINVAL;
2132                 break;
2133         }
2134 
2135         return error;
2136 }
2137 
2138 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2139                                                          loff_t len)
2140 {
2141         struct inode *inode = file_inode(file);
2142         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2143         struct shmem_inode_info *info = SHMEM_I(inode);
2144         struct shmem_falloc shmem_falloc;
2145         pgoff_t start, index, end;
2146         int error;
2147 
2148         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2149                 return -EOPNOTSUPP;
2150 
2151         inode_lock(inode);
2152 
2153         if (mode & FALLOC_FL_PUNCH_HOLE) {
2154                 struct address_space *mapping = file->f_mapping;
2155                 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2156                 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2157                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2158 
2159                 /* protected by i_mutex */
2160                 if (info->seals & F_SEAL_WRITE) {
2161                         error = -EPERM;
2162                         goto out;
2163                 }
2164 
2165                 shmem_falloc.waitq = &shmem_falloc_waitq;
2166                 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2167                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2168                 spin_lock(&inode->i_lock);
2169                 inode->i_private = &shmem_falloc;
2170                 spin_unlock(&inode->i_lock);
2171 
2172                 if ((u64)unmap_end > (u64)unmap_start)
2173                         unmap_mapping_range(mapping, unmap_start,
2174                                             1 + unmap_end - unmap_start, 0);
2175                 shmem_truncate_range(inode, offset, offset + len - 1);
2176                 /* No need to unmap again: hole-punching leaves COWed pages */
2177 
2178                 spin_lock(&inode->i_lock);
2179                 inode->i_private = NULL;
2180                 wake_up_all(&shmem_falloc_waitq);
2181                 spin_unlock(&inode->i_lock);
2182                 error = 0;
2183                 goto out;
2184         }
2185 
2186         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2187         error = inode_newsize_ok(inode, offset + len);
2188         if (error)
2189                 goto out;
2190 
2191         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2192                 error = -EPERM;
2193                 goto out;
2194         }
2195 
2196         start = offset >> PAGE_SHIFT;
2197         end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2198         /* Try to avoid a swapstorm if len is impossible to satisfy */
2199         if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2200                 error = -ENOSPC;
2201                 goto out;
2202         }
2203 
2204         shmem_falloc.waitq = NULL;
2205         shmem_falloc.start = start;
2206         shmem_falloc.next  = start;
2207         shmem_falloc.nr_falloced = 0;
2208         shmem_falloc.nr_unswapped = 0;
2209         spin_lock(&inode->i_lock);
2210         inode->i_private = &shmem_falloc;
2211         spin_unlock(&inode->i_lock);
2212 
2213         for (index = start; index < end; index++) {
2214                 struct page *page;
2215 
2216                 /*
2217                  * Good, the fallocate(2) manpage permits EINTR: we may have
2218                  * been interrupted because we are using up too much memory.
2219                  */
2220                 if (signal_pending(current))
2221                         error = -EINTR;
2222                 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2223                         error = -ENOMEM;
2224                 else
2225                         error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2226                 if (error) {
2227                         /* Remove the !PageUptodate pages we added */
2228                         if (index > start) {
2229                                 shmem_undo_range(inode,
2230                                     (loff_t)start << PAGE_SHIFT,
2231                                     ((loff_t)index << PAGE_SHIFT) - 1, true);
2232                         }
2233                         goto undone;
2234                 }
2235 
2236                 /*
2237                  * Inform shmem_writepage() how far we have reached.
2238                  * No need for lock or barrier: we have the page lock.
2239                  */
2240                 shmem_falloc.next++;
2241                 if (!PageUptodate(page))
2242                         shmem_falloc.nr_falloced++;
2243 
2244                 /*
2245                  * If !PageUptodate, leave it that way so that freeable pages
2246                  * can be recognized if we need to rollback on error later.
2247                  * But set_page_dirty so that memory pressure will swap rather
2248                  * than free the pages we are allocating (and SGP_CACHE pages
2249                  * might still be clean: we now need to mark those dirty too).
2250                  */
2251                 set_page_dirty(page);
2252                 unlock_page(page);
2253                 put_page(page);
2254                 cond_resched();
2255         }
2256 
2257         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2258                 i_size_write(inode, offset + len);
2259         inode->i_ctime = CURRENT_TIME;
2260 undone:
2261         spin_lock(&inode->i_lock);
2262         inode->i_private = NULL;
2263         spin_unlock(&inode->i_lock);
2264 out:
2265         inode_unlock(inode);
2266         return error;
2267 }
2268 
2269 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2270 {
2271         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2272 
2273         buf->f_type = TMPFS_MAGIC;
2274         buf->f_bsize = PAGE_SIZE;
2275         buf->f_namelen = NAME_MAX;
2276         if (sbinfo->max_blocks) {
2277                 buf->f_blocks = sbinfo->max_blocks;
2278                 buf->f_bavail =
2279                 buf->f_bfree  = sbinfo->max_blocks -
2280                                 percpu_counter_sum(&sbinfo->used_blocks);
2281         }
2282         if (sbinfo->max_inodes) {
2283                 buf->f_files = sbinfo->max_inodes;
2284                 buf->f_ffree = sbinfo->free_inodes;
2285         }
2286         /* else leave those fields 0 like simple_statfs */
2287         return 0;
2288 }
2289 
2290 /*
2291  * File creation. Allocate an inode, and we're done..
2292  */
2293 static int
2294 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2295 {
2296         struct inode *inode;
2297         int error = -ENOSPC;
2298 
2299         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2300         if (inode) {
2301                 error = simple_acl_create(dir, inode);
2302                 if (error)
2303                         goto out_iput;
2304                 error = security_inode_init_security(inode, dir,
2305                                                      &dentry->d_name,
2306                                                      shmem_initxattrs, NULL);
2307                 if (error && error != -EOPNOTSUPP)
2308                         goto out_iput;
2309 
2310                 error = 0;
2311                 dir->i_size += BOGO_DIRENT_SIZE;
2312                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2313                 d_instantiate(dentry, inode);
2314                 dget(dentry); /* Extra count - pin the dentry in core */
2315         }
2316         return error;
2317 out_iput:
2318         iput(inode);
2319         return error;
2320 }
2321 
2322 static int
2323 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2324 {
2325         struct inode *inode;
2326         int error = -ENOSPC;
2327 
2328         inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2329         if (inode) {
2330                 error = security_inode_init_security(inode, dir,
2331                                                      NULL,
2332                                                      shmem_initxattrs, NULL);
2333                 if (error && error != -EOPNOTSUPP)
2334                         goto out_iput;
2335                 error = simple_acl_create(dir, inode);
2336                 if (error)
2337                         goto out_iput;
2338                 d_tmpfile(dentry, inode);
2339         }
2340         return error;
2341 out_iput:
2342         iput(inode);
2343         return error;
2344 }
2345 
2346 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2347 {
2348         int error;
2349 
2350         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2351                 return error;
2352         inc_nlink(dir);
2353         return 0;
2354 }
2355 
2356 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2357                 bool excl)
2358 {
2359         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2360 }
2361 
2362 /*
2363  * Link a file..
2364  */
2365 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2366 {
2367         struct inode *inode = d_inode(old_dentry);
2368         int ret;
2369 
2370         /*
2371          * No ordinary (disk based) filesystem counts links as inodes;
2372          * but each new link needs a new dentry, pinning lowmem, and
2373          * tmpfs dentries cannot be pruned until they are unlinked.
2374          */
2375         ret = shmem_reserve_inode(inode->i_sb);
2376         if (ret)
2377                 goto out;
2378 
2379         dir->i_size += BOGO_DIRENT_SIZE;
2380         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2381         inc_nlink(inode);
2382         ihold(inode);   /* New dentry reference */
2383         dget(dentry);           /* Extra pinning count for the created dentry */
2384         d_instantiate(dentry, inode);
2385 out:
2386         return ret;
2387 }
2388 
2389 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2390 {
2391         struct inode *inode = d_inode(dentry);
2392 
2393         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2394                 shmem_free_inode(inode->i_sb);
2395 
2396         dir->i_size -= BOGO_DIRENT_SIZE;
2397         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2398         drop_nlink(inode);
2399         dput(dentry);   /* Undo the count from "create" - this does all the work */
2400         return 0;
2401 }
2402 
2403 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2404 {
2405         if (!simple_empty(dentry))
2406                 return -ENOTEMPTY;
2407 
2408         drop_nlink(d_inode(dentry));
2409         drop_nlink(dir);
2410         return shmem_unlink(dir, dentry);
2411 }
2412 
2413 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2414 {
2415         bool old_is_dir = d_is_dir(old_dentry);
2416         bool new_is_dir = d_is_dir(new_dentry);
2417 
2418         if (old_dir != new_dir && old_is_dir != new_is_dir) {
2419                 if (old_is_dir) {
2420                         drop_nlink(old_dir);
2421                         inc_nlink(new_dir);
2422                 } else {
2423                         drop_nlink(new_dir);
2424                         inc_nlink(old_dir);
2425                 }
2426         }
2427         old_dir->i_ctime = old_dir->i_mtime =
2428         new_dir->i_ctime = new_dir->i_mtime =
2429         d_inode(old_dentry)->i_ctime =
2430         d_inode(new_dentry)->i_ctime = CURRENT_TIME;
2431 
2432         return 0;
2433 }
2434 
2435 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
2436 {
2437         struct dentry *whiteout;
2438         int error;
2439 
2440         whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2441         if (!whiteout)
2442                 return -ENOMEM;
2443 
2444         error = shmem_mknod(old_dir, whiteout,
2445                             S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2446         dput(whiteout);
2447         if (error)
2448                 return error;
2449 
2450         /*
2451          * Cheat and hash the whiteout while the old dentry is still in
2452          * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
2453          *
2454          * d_lookup() will consistently find one of them at this point,
2455          * not sure which one, but that isn't even important.
2456          */
2457         d_rehash(whiteout);
2458         return 0;
2459 }
2460 
2461 /*
2462  * The VFS layer already does all the dentry stuff for rename,
2463  * we just have to decrement the usage count for the target if
2464  * it exists so that the VFS layer correctly free's it when it
2465  * gets overwritten.
2466  */
2467 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
2468 {
2469         struct inode *inode = d_inode(old_dentry);
2470         int they_are_dirs = S_ISDIR(inode->i_mode);
2471 
2472         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
2473                 return -EINVAL;
2474 
2475         if (flags & RENAME_EXCHANGE)
2476                 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
2477 
2478         if (!simple_empty(new_dentry))
2479                 return -ENOTEMPTY;
2480 
2481         if (flags & RENAME_WHITEOUT) {
2482                 int error;
2483 
2484                 error = shmem_whiteout(old_dir, old_dentry);
2485                 if (error)
2486                         return error;
2487         }
2488 
2489         if (d_really_is_positive(new_dentry)) {
2490                 (void) shmem_unlink(new_dir, new_dentry);
2491                 if (they_are_dirs) {
2492                         drop_nlink(d_inode(new_dentry));
2493                         drop_nlink(old_dir);
2494                 }
2495         } else if (they_are_dirs) {
2496                 drop_nlink(old_dir);
2497                 inc_nlink(new_dir);
2498         }
2499 
2500         old_dir->i_size -= BOGO_DIRENT_SIZE;
2501         new_dir->i_size += BOGO_DIRENT_SIZE;
2502         old_dir->i_ctime = old_dir->i_mtime =
2503         new_dir->i_ctime = new_dir->i_mtime =
2504         inode->i_ctime = CURRENT_TIME;
2505         return 0;
2506 }
2507 
2508 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2509 {
2510         int error;
2511         int len;
2512         struct inode *inode;
2513         struct page *page;
2514         struct shmem_inode_info *info;
2515 
2516         len = strlen(symname) + 1;
2517         if (len > PAGE_SIZE)
2518                 return -ENAMETOOLONG;
2519 
2520         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
2521         if (!inode)
2522                 return -ENOSPC;
2523 
2524         error = security_inode_init_security(inode, dir, &dentry->d_name,
2525                                              shmem_initxattrs, NULL);
2526         if (error) {
2527                 if (error != -EOPNOTSUPP) {
2528                         iput(inode);
2529                         return error;
2530                 }
2531                 error = 0;
2532         }
2533 
2534         info = SHMEM_I(inode);
2535         inode->i_size = len-1;
2536         if (len <= SHORT_SYMLINK_LEN) {
2537                 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
2538                 if (!inode->i_link) {
2539                         iput(inode);
2540                         return -ENOMEM;
2541                 }
2542                 inode->i_op = &shmem_short_symlink_operations;
2543         } else {
2544                 inode_nohighmem(inode);
2545                 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
2546                 if (error) {
2547                         iput(inode);
2548                         return error;
2549                 }
2550                 inode->i_mapping->a_ops = &shmem_aops;
2551                 inode->i_op = &shmem_symlink_inode_operations;
2552                 memcpy(page_address(page), symname, len);
2553                 SetPageUptodate(page);
2554                 set_page_dirty(page);
2555                 unlock_page(page);
2556                 put_page(page);
2557         }
2558         dir->i_size += BOGO_DIRENT_SIZE;
2559         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2560         d_instantiate(dentry, inode);
2561         dget(dentry);
2562         return 0;
2563 }
2564 
2565 static void shmem_put_link(void *arg)
2566 {
2567         mark_page_accessed(arg);
2568         put_page(arg);
2569 }
2570 
2571 static const char *shmem_get_link(struct dentry *dentry,
2572                                   struct inode *inode,
2573                                   struct delayed_call *done)
2574 {
2575         struct page *page = NULL;
2576         int error;
2577         if (!dentry) {
2578                 page = find_get_page(inode->i_mapping, 0);
2579                 if (!page)
2580                         return ERR_PTR(-ECHILD);
2581                 if (!PageUptodate(page)) {
2582                         put_page(page);
2583                         return ERR_PTR(-ECHILD);
2584                 }
2585         } else {
2586                 error = shmem_getpage(inode, 0, &page, SGP_READ);
2587                 if (error)
2588                         return ERR_PTR(error);
2589                 unlock_page(page);
2590         }
2591         set_delayed_call(done, shmem_put_link, page);
2592         return page_address(page);
2593 }
2594 
2595 #ifdef CONFIG_TMPFS_XATTR
2596 /*
2597  * Superblocks without xattr inode operations may get some security.* xattr
2598  * support from the LSM "for free". As soon as we have any other xattrs
2599  * like ACLs, we also need to implement the security.* handlers at
2600  * filesystem level, though.
2601  */
2602 
2603 /*
2604  * Callback for security_inode_init_security() for acquiring xattrs.
2605  */
2606 static int shmem_initxattrs(struct inode *inode,
2607                             const struct xattr *xattr_array,
2608                             void *fs_info)
2609 {
2610         struct shmem_inode_info *info = SHMEM_I(inode);
2611         const struct xattr *xattr;
2612         struct simple_xattr *new_xattr;
2613         size_t len;
2614 
2615         for (xattr = xattr_array; xattr->name != NULL; xattr++) {
2616                 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
2617                 if (!new_xattr)
2618                         return -ENOMEM;
2619 
2620                 len = strlen(xattr->name) + 1;
2621                 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
2622                                           GFP_KERNEL);
2623                 if (!new_xattr->name) {
2624                         kfree(new_xattr);
2625                         return -ENOMEM;
2626                 }
2627 
2628                 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
2629                        XATTR_SECURITY_PREFIX_LEN);
2630                 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
2631                        xattr->name, len);
2632 
2633                 simple_xattr_list_add(&info->xattrs, new_xattr);
2634         }
2635 
2636         return 0;
2637 }
2638 
2639 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
2640                                    struct dentry *unused, struct inode *inode,
2641                                    const char *name, void *buffer, size_t size)
2642 {
2643         struct shmem_inode_info *info = SHMEM_I(inode);
2644 
2645         name = xattr_full_name(handler, name);
2646         return simple_xattr_get(&info->xattrs, name, buffer, size);
2647 }
2648 
2649 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
2650                                    struct dentry *unused, struct inode *inode,
2651                                    const char *name, const void *value,
2652                                    size_t size, int flags)
2653 {
2654         struct shmem_inode_info *info = SHMEM_I(inode);
2655 
2656         name = xattr_full_name(handler, name);
2657         return simple_xattr_set(&info->xattrs, name, value, size, flags);
2658 }
2659 
2660 static const struct xattr_handler shmem_security_xattr_handler = {
2661         .prefix = XATTR_SECURITY_PREFIX,
2662         .get = shmem_xattr_handler_get,
2663         .set = shmem_xattr_handler_set,
2664 };
2665 
2666 static const struct xattr_handler shmem_trusted_xattr_handler = {
2667         .prefix = XATTR_TRUSTED_PREFIX,
2668         .get = shmem_xattr_handler_get,
2669         .set = shmem_xattr_handler_set,
2670 };
2671 
2672 static const struct xattr_handler *shmem_xattr_handlers[] = {
2673 #ifdef CONFIG_TMPFS_POSIX_ACL
2674         &posix_acl_access_xattr_handler,
2675         &posix_acl_default_xattr_handler,
2676 #endif
2677         &shmem_security_xattr_handler,
2678         &shmem_trusted_xattr_handler,
2679         NULL
2680 };
2681 
2682 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2683 {
2684         struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
2685         return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
2686 }
2687 #endif /* CONFIG_TMPFS_XATTR */
2688 
2689 static const struct inode_operations shmem_short_symlink_operations = {
2690         .readlink       = generic_readlink,
2691         .get_link       = simple_get_link,
2692 #ifdef CONFIG_TMPFS_XATTR
2693         .setxattr       = generic_setxattr,
2694         .getxattr       = generic_getxattr,
2695         .listxattr      = shmem_listxattr,
2696         .removexattr    = generic_removexattr,
2697 #endif
2698 };
2699 
2700 static const struct inode_operations shmem_symlink_inode_operations = {
2701         .readlink       = generic_readlink,
2702         .get_link       = shmem_get_link,
2703 #ifdef CONFIG_TMPFS_XATTR
2704         .setxattr       = generic_setxattr,
2705         .getxattr       = generic_getxattr,
2706         .listxattr      = shmem_listxattr,
2707         .removexattr    = generic_removexattr,
2708 #endif
2709 };
2710 
2711 static struct dentry *shmem_get_parent(struct dentry *child)
2712 {
2713         return ERR_PTR(-ESTALE);
2714 }
2715 
2716 static int shmem_match(struct inode *ino, void *vfh)
2717 {
2718         __u32 *fh = vfh;
2719         __u64 inum = fh[2];
2720         inum = (inum << 32) | fh[1];
2721         return ino->i_ino == inum && fh[0] == ino->i_generation;
2722 }
2723 
2724 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2725                 struct fid *fid, int fh_len, int fh_type)
2726 {
2727         struct inode *inode;
2728         struct dentry *dentry = NULL;
2729         u64 inum;
2730 
2731         if (fh_len < 3)
2732                 return NULL;
2733 
2734         inum = fid->raw[2];
2735         inum = (inum << 32) | fid->raw[1];
2736 
2737         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2738                         shmem_match, fid->raw);
2739         if (inode) {
2740                 dentry = d_find_alias(inode);
2741                 iput(inode);
2742         }
2743 
2744         return dentry;
2745 }
2746 
2747 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
2748                                 struct inode *parent)
2749 {
2750         if (*len < 3) {
2751                 *len = 3;
2752                 return FILEID_INVALID;
2753         }
2754 
2755         if (inode_unhashed(inode)) {
2756                 /* Unfortunately insert_inode_hash is not idempotent,
2757                  * so as we hash inodes here rather than at creation
2758                  * time, we need a lock to ensure we only try
2759                  * to do it once
2760                  */
2761                 static DEFINE_SPINLOCK(lock);
2762                 spin_lock(&lock);
2763                 if (inode_unhashed(inode))
2764                         __insert_inode_hash(inode,
2765                                             inode->i_ino + inode->i_generation);
2766                 spin_unlock(&lock);
2767         }
2768 
2769         fh[0] = inode->i_generation;
2770         fh[1] = inode->i_ino;
2771         fh[2] = ((__u64)inode->i_ino) >> 32;
2772 
2773         *len = 3;
2774         return 1;
2775 }
2776 
2777 static const struct export_operations shmem_export_ops = {
2778         .get_parent     = shmem_get_parent,
2779         .encode_fh      = shmem_encode_fh,
2780         .fh_to_dentry   = shmem_fh_to_dentry,
2781 };
2782 
2783 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2784                                bool remount)
2785 {
2786         char *this_char, *value, *rest;
2787         struct mempolicy *mpol = NULL;
2788         uid_t uid;
2789         gid_t gid;
2790 
2791         while (options != NULL) {
2792                 this_char = options;
2793                 for (;;) {
2794                         /*
2795                          * NUL-terminate this option: unfortunately,
2796                          * mount options form a comma-separated list,
2797                          * but mpol's nodelist may also contain commas.
2798                          */
2799                         options = strchr(options, ',');
2800                         if (options == NULL)
2801                                 break;
2802                         options++;
2803                         if (!isdigit(*options)) {
2804                                 options[-1] = '\0';
2805                                 break;
2806                         }
2807                 }
2808                 if (!*this_char)
2809                         continue;
2810                 if ((value = strchr(this_char,'=')) != NULL) {
2811                         *value++ = 0;
2812                 } else {
2813                         pr_err("tmpfs: No value for mount option '%s'\n",
2814                                this_char);
2815                         goto error;
2816                 }
2817 
2818                 if (!strcmp(this_char,"size")) {
2819                         unsigned long long size;
2820                         size = memparse(value,&rest);
2821                         if (*rest == '%') {
2822                                 size <<= PAGE_SHIFT;
2823                                 size *= totalram_pages;
2824                                 do_div(size, 100);
2825                                 rest++;
2826                         }
2827                         if (*rest)
2828                                 goto bad_val;
2829                         sbinfo->max_blocks =
2830                                 DIV_ROUND_UP(size, PAGE_SIZE);
2831                 } else if (!strcmp(this_char,"nr_blocks")) {
2832                         sbinfo->max_blocks = memparse(value, &rest);
2833                         if (*rest)
2834                                 goto bad_val;
2835                 } else if (!strcmp(this_char,"nr_inodes")) {
2836                         sbinfo->max_inodes = memparse(value, &rest);
2837                         if (*rest)
2838                                 goto bad_val;
2839                 } else if (!strcmp(this_char,"mode")) {
2840                         if (remount)
2841                                 continue;
2842                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2843                         if (*rest)
2844                                 goto bad_val;
2845                 } else if (!strcmp(this_char,"uid")) {
2846                         if (remount)
2847                                 continue;
2848                         uid = simple_strtoul(value, &rest, 0);
2849                         if (*rest)
2850                                 goto bad_val;
2851                         sbinfo->uid = make_kuid(current_user_ns(), uid);
2852                         if (!uid_valid(sbinfo->uid))
2853                                 goto bad_val;
2854                 } else if (!strcmp(this_char,"gid")) {
2855                         if (remount)
2856                                 continue;
2857                         gid = simple_strtoul(value, &rest, 0);
2858                         if (*rest)
2859                                 goto bad_val;
2860                         sbinfo->gid = make_kgid(current_user_ns(), gid);
2861                         if (!gid_valid(sbinfo->gid))
2862                                 goto bad_val;
2863                 } else if (!strcmp(this_char,"mpol")) {
2864                         mpol_put(mpol);
2865                         mpol = NULL;
2866                         if (mpol_parse_str(value, &mpol))
2867                                 goto bad_val;
2868                 } else {
2869                         pr_err("tmpfs: Bad mount option %s\n", this_char);
2870                         goto error;
2871                 }
2872         }
2873         sbinfo->mpol = mpol;
2874         return 0;
2875 
2876 bad_val:
2877         pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
2878                value, this_char);
2879 error:
2880         mpol_put(mpol);
2881         return 1;
2882 
2883 }
2884 
2885 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2886 {
2887         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2888         struct shmem_sb_info config = *sbinfo;
2889         unsigned long inodes;
2890         int error = -EINVAL;
2891 
2892         config.mpol = NULL;
2893         if (shmem_parse_options(data, &config, true))
2894                 return error;
2895 
2896         spin_lock(&sbinfo->stat_lock);
2897         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2898         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2899                 goto out;
2900         if (config.max_inodes < inodes)
2901                 goto out;
2902         /*
2903          * Those tests disallow limited->unlimited while any are in use;
2904          * but we must separately disallow unlimited->limited, because
2905          * in that case we have no record of how much is already in use.
2906          */
2907         if (config.max_blocks && !sbinfo->max_blocks)
2908                 goto out;
2909         if (config.max_inodes && !sbinfo->max_inodes)
2910                 goto out;
2911 
2912         error = 0;
2913         sbinfo->max_blocks  = config.max_blocks;
2914         sbinfo->max_inodes  = config.max_inodes;
2915         sbinfo->free_inodes = config.max_inodes - inodes;
2916 
2917         /*
2918          * Preserve previous mempolicy unless mpol remount option was specified.
2919          */
2920         if (config.mpol) {
2921                 mpol_put(sbinfo->mpol);
2922                 sbinfo->mpol = config.mpol;     /* transfers initial ref */
2923         }
2924 out:
2925         spin_unlock(&sbinfo->stat_lock);
2926         return error;
2927 }
2928 
2929 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
2930 {
2931         struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
2932 
2933         if (sbinfo->max_blocks != shmem_default_max_blocks())
2934                 seq_printf(seq, ",size=%luk",
2935                         sbinfo->max_blocks << (PAGE_SHIFT - 10));
2936         if (sbinfo->max_inodes != shmem_default_max_inodes())
2937                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2938         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2939                 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
2940         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
2941                 seq_printf(seq, ",uid=%u",
2942                                 from_kuid_munged(&init_user_ns, sbinfo->uid));
2943         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
2944                 seq_printf(seq, ",gid=%u",
2945                                 from_kgid_munged(&init_user_ns, sbinfo->gid));
2946         shmem_show_mpol(seq, sbinfo->mpol);
2947         return 0;
2948 }
2949 
2950 #define MFD_NAME_PREFIX "memfd:"
2951 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
2952 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
2953 
2954 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
2955 
2956 SYSCALL_DEFINE2(memfd_create,
2957                 const char __user *, uname,
2958                 unsigned int, flags)
2959 {
2960         struct shmem_inode_info *info;
2961         struct file *file;
2962         int fd, error;
2963         char *name;
2964         long len;
2965 
2966         if (flags & ~(unsigned int)MFD_ALL_FLAGS)
2967                 return -EINVAL;
2968 
2969         /* length includes terminating zero */
2970         len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
2971         if (len <= 0)
2972                 return -EFAULT;
2973         if (len > MFD_NAME_MAX_LEN + 1)
2974                 return -EINVAL;
2975 
2976         name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
2977         if (!name)
2978                 return -ENOMEM;
2979 
2980         strcpy(name, MFD_NAME_PREFIX);
2981         if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
2982                 error = -EFAULT;
2983                 goto err_name;
2984         }
2985 
2986         /* terminating-zero may have changed after strnlen_user() returned */
2987         if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
2988                 error = -EFAULT;
2989                 goto err_name;
2990         }
2991 
2992         fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
2993         if (fd < 0) {
2994                 error = fd;
2995                 goto err_name;
2996         }
2997 
2998         file = shmem_file_setup(name, 0, VM_NORESERVE);
2999         if (IS_ERR(file)) {
3000                 error = PTR_ERR(file);
3001                 goto err_fd;
3002         }
3003         info = SHMEM_I(file_inode(file));
3004         file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
3005         file->f_flags |= O_RDWR | O_LARGEFILE;
3006         if (flags & MFD_ALLOW_SEALING)
3007                 info->seals &= ~F_SEAL_SEAL;
3008 
3009         fd_install(fd, file);
3010         kfree(name);
3011         return fd;
3012 
3013 err_fd:
3014         put_unused_fd(fd);
3015 err_name:
3016         kfree(name);
3017         return error;
3018 }
3019 
3020 #endif /* CONFIG_TMPFS */
3021 
3022 static void shmem_put_super(struct super_block *sb)
3023 {
3024         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3025 
3026         percpu_counter_destroy(&sbinfo->used_blocks);
3027         mpol_put(sbinfo->mpol);
3028         kfree(sbinfo);
3029         sb->s_fs_info = NULL;
3030 }
3031 
3032 int shmem_fill_super(struct super_block *sb, void *data, int silent)
3033 {
3034         struct inode *inode;
3035         struct shmem_sb_info *sbinfo;
3036         int err = -ENOMEM;
3037 
3038         /* Round up to L1_CACHE_BYTES to resist false sharing */
3039         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3040                                 L1_CACHE_BYTES), GFP_KERNEL);
3041         if (!sbinfo)
3042                 return -ENOMEM;
3043 
3044         sbinfo->mode = S_IRWXUGO | S_ISVTX;
3045         sbinfo->uid = current_fsuid();
3046         sbinfo->gid = current_fsgid();
3047         sb->s_fs_info = sbinfo;
3048 
3049 #ifdef CONFIG_TMPFS
3050         /*
3051          * Per default we only allow half of the physical ram per
3052          * tmpfs instance, limiting inodes to one per page of lowmem;
3053          * but the internal instance is left unlimited.
3054          */
3055         if (!(sb->s_flags & MS_KERNMOUNT)) {
3056                 sbinfo->max_blocks = shmem_default_max_blocks();
3057                 sbinfo->max_inodes = shmem_default_max_inodes();
3058                 if (shmem_parse_options(data, sbinfo, false)) {
3059                         err = -EINVAL;
3060                         goto failed;
3061                 }
3062         } else {
3063                 sb->s_flags |= MS_NOUSER;
3064         }
3065         sb->s_export_op = &shmem_export_ops;
3066         sb->s_flags |= MS_NOSEC;
3067 #else
3068         sb->s_flags |= MS_NOUSER;
3069 #endif
3070 
3071         spin_lock_init(&sbinfo->stat_lock);
3072         if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3073                 goto failed;
3074         sbinfo->free_inodes = sbinfo->max_inodes;
3075 
3076         sb->s_maxbytes = MAX_LFS_FILESIZE;
3077         sb->s_blocksize = PAGE_SIZE;
3078         sb->s_blocksize_bits = PAGE_SHIFT;
3079         sb->s_magic = TMPFS_MAGIC;
3080         sb->s_op = &shmem_ops;
3081         sb->s_time_gran = 1;
3082 #ifdef CONFIG_TMPFS_XATTR
3083         sb->s_xattr = shmem_xattr_handlers;
3084 #endif
3085 #ifdef CONFIG_TMPFS_POSIX_ACL
3086         sb->s_flags |= MS_POSIXACL;
3087 #endif
3088 
3089         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3090         if (!inode)
3091                 goto failed;
3092         inode->i_uid = sbinfo->uid;
3093         inode->i_gid = sbinfo->gid;
3094         sb->s_root = d_make_root(inode);
3095         if (!sb->s_root)
3096                 goto failed;
3097         return 0;
3098 
3099 failed:
3100         shmem_put_super(sb);
3101         return err;
3102 }
3103 
3104 static struct kmem_cache *shmem_inode_cachep;
3105 
3106 static struct inode *shmem_alloc_inode(struct super_block *sb)
3107 {
3108         struct shmem_inode_info *info;
3109         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3110         if (!info)
3111                 return NULL;
3112         return &info->vfs_inode;
3113 }
3114 
3115 static void shmem_destroy_callback(struct rcu_head *head)
3116 {
3117         struct inode *inode = container_of(head, struct inode, i_rcu);
3118         if (S_ISLNK(inode->i_mode))
3119                 kfree(inode->i_link);
3120         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3121 }
3122 
3123 static void shmem_destroy_inode(struct inode *inode)
3124 {
3125         if (S_ISREG(inode->i_mode))
3126                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3127         call_rcu(&inode->i_rcu, shmem_destroy_callback);
3128 }
3129 
3130 static void shmem_init_inode(void *foo)
3131 {
3132         struct shmem_inode_info *info = foo;
3133         inode_init_once(&info->vfs_inode);
3134 }
3135 
3136 static int shmem_init_inodecache(void)
3137 {
3138         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3139                                 sizeof(struct shmem_inode_info),
3140                                 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3141         return 0;
3142 }
3143 
3144 static void shmem_destroy_inodecache(void)
3145 {
3146         kmem_cache_destroy(shmem_inode_cachep);
3147 }
3148 
3149 static const struct address_space_operations shmem_aops = {
3150         .writepage      = shmem_writepage,
3151         .set_page_dirty = __set_page_dirty_no_writeback,
3152 #ifdef CONFIG_TMPFS
3153         .write_begin    = shmem_write_begin,
3154         .write_end      = shmem_write_end,
3155 #endif
3156 #ifdef CONFIG_MIGRATION
3157         .migratepage    = migrate_page,
3158 #endif
3159         .error_remove_page = generic_error_remove_page,
3160 };
3161 
3162 static const struct file_operations shmem_file_operations = {
3163         .mmap           = shmem_mmap,
3164 #ifdef CONFIG_TMPFS
3165         .llseek         = shmem_file_llseek,
3166         .read_iter      = shmem_file_read_iter,
3167         .write_iter     = generic_file_write_iter,
3168         .fsync          = noop_fsync,
3169         .splice_read    = shmem_file_splice_read,
3170         .splice_write   = iter_file_splice_write,
3171         .fallocate      = shmem_fallocate,
3172 #endif
3173 };
3174 
3175 static const struct inode_operations shmem_inode_operations = {
3176         .getattr        = shmem_getattr,
3177         .setattr        = shmem_setattr,
3178 #ifdef CONFIG_TMPFS_XATTR
3179         .setxattr       = generic_setxattr,
3180         .getxattr       = generic_getxattr,
3181         .listxattr      = shmem_listxattr,
3182         .removexattr    = generic_removexattr,
3183         .set_acl        = simple_set_acl,
3184 #endif
3185 };
3186 
3187 static const struct inode_operations shmem_dir_inode_operations = {
3188 #ifdef CONFIG_TMPFS
3189         .create         = shmem_create,
3190         .lookup         = simple_lookup,
3191         .link           = shmem_link,
3192         .unlink         = shmem_unlink,
3193         .symlink        = shmem_symlink,
3194         .mkdir          = shmem_mkdir,
3195         .rmdir          = shmem_rmdir,
3196         .mknod          = shmem_mknod,
3197         .rename2        = shmem_rename2,
3198         .tmpfile        = shmem_tmpfile,
3199 #endif
3200 #ifdef CONFIG_TMPFS_XATTR
3201         .setxattr       = generic_setxattr,
3202         .getxattr       = generic_getxattr,
3203         .listxattr      = shmem_listxattr,
3204         .removexattr    = generic_removexattr,
3205 #endif
3206 #ifdef CONFIG_TMPFS_POSIX_ACL
3207         .setattr        = shmem_setattr,
3208         .set_acl        = simple_set_acl,
3209 #endif
3210 };
3211 
3212 static const struct inode_operations shmem_special_inode_operations = {
3213 #ifdef CONFIG_TMPFS_XATTR
3214         .setxattr       = generic_setxattr,
3215         .getxattr       = generic_getxattr,
3216         .listxattr      = shmem_listxattr,
3217         .removexattr    = generic_removexattr,
3218 #endif
3219 #ifdef CONFIG_TMPFS_POSIX_ACL
3220         .setattr        = shmem_setattr,
3221         .set_acl        = simple_set_acl,
3222 #endif
3223 };
3224 
3225 static const struct super_operations shmem_ops = {
3226         .alloc_inode    = shmem_alloc_inode,
3227         .destroy_inode  = shmem_destroy_inode,
3228 #ifdef CONFIG_TMPFS
3229         .statfs         = shmem_statfs,
3230         .remount_fs     = shmem_remount_fs,
3231         .show_options   = shmem_show_options,
3232 #endif
3233         .evict_inode    = shmem_evict_inode,
3234         .drop_inode     = generic_delete_inode,
3235         .put_super      = shmem_put_super,
3236 };
3237 
3238 static const struct vm_operations_struct shmem_vm_ops = {
3239         .fault          = shmem_fault,
3240         .map_pages      = filemap_map_pages,
3241 #ifdef CONFIG_NUMA
3242         .set_policy     = shmem_set_policy,
3243         .get_policy     = shmem_get_policy,
3244 #endif
3245 };
3246 
3247 static struct dentry *shmem_mount(struct file_system_type *fs_type,
3248         int flags, const char *dev_name, void *data)
3249 {
3250         return mount_nodev(fs_type, flags, data, shmem_fill_super);
3251 }
3252 
3253 static struct file_system_type shmem_fs_type = {
3254         .owner          = THIS_MODULE,
3255         .name           = "tmpfs",
3256         .mount          = shmem_mount,
3257         .kill_sb        = kill_litter_super,
3258         .fs_flags       = FS_USERNS_MOUNT,
3259 };
3260 
3261 int __init shmem_init(void)
3262 {
3263         int error;
3264 
3265         /* If rootfs called this, don't re-init */
3266         if (shmem_inode_cachep)
3267                 return 0;
3268 
3269         error = shmem_init_inodecache();
3270         if (error)
3271                 goto out3;
3272 
3273         error = register_filesystem(&shmem_fs_type);
3274         if (error) {
3275                 pr_err("Could not register tmpfs\n");
3276                 goto out2;
3277         }
3278 
3279         shm_mnt = kern_mount(&shmem_fs_type);
3280         if (IS_ERR(shm_mnt)) {
3281                 error = PTR_ERR(shm_mnt);
3282                 pr_err("Could not kern_mount tmpfs\n");
3283                 goto out1;
3284         }
3285         return 0;
3286 
3287 out1:
3288         unregister_filesystem(&shmem_fs_type);
3289 out2:
3290         shmem_destroy_inodecache();
3291 out3:
3292         shm_mnt = ERR_PTR(error);
3293         return error;
3294 }
3295 
3296 #else /* !CONFIG_SHMEM */
3297 
3298 /*
3299  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3300  *
3301  * This is intended for small system where the benefits of the full
3302  * shmem code (swap-backed and resource-limited) are outweighed by
3303  * their complexity. On systems without swap this code should be
3304  * effectively equivalent, but much lighter weight.
3305  */
3306 
3307 static struct file_system_type shmem_fs_type = {
3308         .name           = "tmpfs",
3309         .mount          = ramfs_mount,
3310         .kill_sb        = kill_litter_super,
3311         .fs_flags       = FS_USERNS_MOUNT,
3312 };
3313 
3314 int __init shmem_init(void)
3315 {
3316         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3317 
3318         shm_mnt = kern_mount(&shmem_fs_type);
3319         BUG_ON(IS_ERR(shm_mnt));
3320 
3321         return 0;
3322 }
3323 
3324 int shmem_unuse(swp_entry_t swap, struct page *page)
3325 {
3326         return 0;
3327 }
3328 
3329 int shmem_lock(struct file *file, int lock, struct user_struct *user)
3330 {
3331         return 0;
3332 }
3333 
3334 void shmem_unlock_mapping(struct address_space *mapping)
3335 {
3336 }
3337 
3338 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
3339 {
3340         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
3341 }
3342 EXPORT_SYMBOL_GPL(shmem_truncate_range);
3343 
3344 #define shmem_vm_ops                            generic_file_vm_ops
3345 #define shmem_file_operations                   ramfs_file_operations
3346 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
3347 #define shmem_acct_size(flags, size)            0
3348 #define shmem_unacct_size(flags, size)          do {} while (0)
3349 
3350 #endif /* CONFIG_SHMEM */
3351 
3352 /* common code */
3353 
3354 static struct dentry_operations anon_ops = {
3355         .d_dname = simple_dname
3356 };
3357 
3358 static struct file *__shmem_file_setup(const char *name, loff_t size,
3359                                        unsigned long flags, unsigned int i_flags)
3360 {
3361         struct file *res;
3362         struct inode *inode;
3363         struct path path;
3364         struct super_block *sb;
3365         struct qstr this;
3366 
3367         if (IS_ERR(shm_mnt))
3368                 return ERR_CAST(shm_mnt);
3369 
3370         if (size < 0 || size > MAX_LFS_FILESIZE)
3371                 return ERR_PTR(-EINVAL);
3372 
3373         if (shmem_acct_size(flags, size))
3374                 return ERR_PTR(-ENOMEM);
3375 
3376         res = ERR_PTR(-ENOMEM);
3377         this.name = name;
3378         this.len = strlen(name);
3379         this.hash = 0; /* will go */
3380         sb = shm_mnt->mnt_sb;
3381         path.mnt = mntget(shm_mnt);
3382         path.dentry = d_alloc_pseudo(sb, &this);
3383         if (!path.dentry)
3384                 goto put_memory;
3385         d_set_d_op(path.dentry, &anon_ops);
3386 
3387         res = ERR_PTR(-ENOSPC);
3388         inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
3389         if (!inode)
3390                 goto put_memory;
3391 
3392         inode->i_flags |= i_flags;
3393         d_instantiate(path.dentry, inode);
3394         inode->i_size = size;
3395         clear_nlink(inode);     /* It is unlinked */
3396         res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
3397         if (IS_ERR(res))
3398                 goto put_path;
3399 
3400         res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
3401                   &shmem_file_operations);
3402         if (IS_ERR(res))
3403                 goto put_path;
3404 
3405         return res;
3406 
3407 put_memory:
3408         shmem_unacct_size(flags, size);
3409 put_path:
3410         path_put(&path);
3411         return res;
3412 }
3413 
3414 /**
3415  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3416  *      kernel internal.  There will be NO LSM permission checks against the
3417  *      underlying inode.  So users of this interface must do LSM checks at a
3418  *      higher layer.  The users are the big_key and shm implementations.  LSM
3419  *      checks are provided at the key or shm level rather than the inode.
3420  * @name: name for dentry (to be seen in /proc/<pid>/maps
3421  * @size: size to be set for the file
3422  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3423  */
3424 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
3425 {
3426         return __shmem_file_setup(name, size, flags, S_PRIVATE);
3427 }
3428 
3429 /**
3430  * shmem_file_setup - get an unlinked file living in tmpfs
3431  * @name: name for dentry (to be seen in /proc/<pid>/maps
3432  * @size: size to be set for the file
3433  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3434  */
3435 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
3436 {
3437         return __shmem_file_setup(name, size, flags, 0);
3438 }
3439 EXPORT_SYMBOL_GPL(shmem_file_setup);
3440 
3441 /**
3442  * shmem_zero_setup - setup a shared anonymous mapping
3443  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3444  */
3445 int shmem_zero_setup(struct vm_area_struct *vma)
3446 {
3447         struct file *file;
3448         loff_t size = vma->vm_end - vma->vm_start;
3449 
3450         /*
3451          * Cloning a new file under mmap_sem leads to a lock ordering conflict
3452          * between XFS directory reading and selinux: since this file is only
3453          * accessible to the user through its mapping, use S_PRIVATE flag to
3454          * bypass file security, in the same way as shmem_kernel_file_setup().
3455          */
3456         file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE);
3457         if (IS_ERR(file))
3458                 return PTR_ERR(file);
3459 
3460         if (vma->vm_file)
3461                 fput(vma->vm_file);
3462         vma->vm_file = file;
3463         vma->vm_ops = &shmem_vm_ops;
3464         return 0;
3465 }
3466 
3467 /**
3468  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3469  * @mapping:    the page's address_space
3470  * @index:      the page index
3471  * @gfp:        the page allocator flags to use if allocating
3472  *
3473  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3474  * with any new page allocations done using the specified allocation flags.
3475  * But read_cache_page_gfp() uses the ->readpage() method: which does not
3476  * suit tmpfs, since it may have pages in swapcache, and needs to find those
3477  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3478  *
3479  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
3480  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
3481  */
3482 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3483                                          pgoff_t index, gfp_t gfp)
3484 {
3485 #ifdef CONFIG_SHMEM
3486         struct inode *inode = mapping->host;
3487         struct page *page;
3488         int error;
3489 
3490         BUG_ON(mapping->a_ops != &shmem_aops);
3491         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
3492                                   gfp, NULL, NULL);
3493         if (error)
3494                 page = ERR_PTR(error);
3495         else
3496                 unlock_page(page);
3497         return page;
3498 #else
3499         /*
3500          * The tiny !SHMEM case uses ramfs without swap
3501          */
3502         return read_cache_page_gfp(mapping, index, gfp);
3503 #endif
3504 }
3505 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
3506 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us