Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/mm/mincore.c

  1 /*
  2  *      linux/mm/mincore.c
  3  *
  4  * Copyright (C) 1994-2006  Linus Torvalds
  5  */
  6 
  7 /*
  8  * The mincore() system call.
  9  */
 10 #include <linux/pagemap.h>
 11 #include <linux/gfp.h>
 12 #include <linux/mm.h>
 13 #include <linux/mman.h>
 14 #include <linux/syscalls.h>
 15 #include <linux/swap.h>
 16 #include <linux/swapops.h>
 17 #include <linux/hugetlb.h>
 18 
 19 #include <linux/uaccess.h>
 20 #include <asm/pgtable.h>
 21 
 22 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
 23                         unsigned long end, struct mm_walk *walk)
 24 {
 25 #ifdef CONFIG_HUGETLB_PAGE
 26         unsigned char present;
 27         unsigned char *vec = walk->private;
 28 
 29         /*
 30          * Hugepages under user process are always in RAM and never
 31          * swapped out, but theoretically it needs to be checked.
 32          */
 33         present = pte && !huge_pte_none(huge_ptep_get(pte));
 34         for (; addr != end; vec++, addr += PAGE_SIZE)
 35                 *vec = present;
 36         walk->private = vec;
 37 #else
 38         BUG();
 39 #endif
 40         return 0;
 41 }
 42 
 43 /*
 44  * Later we can get more picky about what "in core" means precisely.
 45  * For now, simply check to see if the page is in the page cache,
 46  * and is up to date; i.e. that no page-in operation would be required
 47  * at this time if an application were to map and access this page.
 48  */
 49 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
 50 {
 51         unsigned char present = 0;
 52         struct page *page;
 53 
 54         /*
 55          * When tmpfs swaps out a page from a file, any process mapping that
 56          * file will not get a swp_entry_t in its pte, but rather it is like
 57          * any other file mapping (ie. marked !present and faulted in with
 58          * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
 59          */
 60 #ifdef CONFIG_SWAP
 61         if (shmem_mapping(mapping)) {
 62                 page = find_get_entry(mapping, pgoff);
 63                 /*
 64                  * shmem/tmpfs may return swap: account for swapcache
 65                  * page too.
 66                  */
 67                 if (radix_tree_exceptional_entry(page)) {
 68                         swp_entry_t swp = radix_to_swp_entry(page);
 69                         page = find_get_page(swap_address_space(swp),
 70                                              swp_offset(swp));
 71                 }
 72         } else
 73                 page = find_get_page(mapping, pgoff);
 74 #else
 75         page = find_get_page(mapping, pgoff);
 76 #endif
 77         if (page) {
 78                 present = PageUptodate(page);
 79                 put_page(page);
 80         }
 81 
 82         return present;
 83 }
 84 
 85 static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
 86                                 struct vm_area_struct *vma, unsigned char *vec)
 87 {
 88         unsigned long nr = (end - addr) >> PAGE_SHIFT;
 89         int i;
 90 
 91         if (vma->vm_file) {
 92                 pgoff_t pgoff;
 93 
 94                 pgoff = linear_page_index(vma, addr);
 95                 for (i = 0; i < nr; i++, pgoff++)
 96                         vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
 97         } else {
 98                 for (i = 0; i < nr; i++)
 99                         vec[i] = 0;
100         }
101         return nr;
102 }
103 
104 static int mincore_unmapped_range(unsigned long addr, unsigned long end,
105                                    struct mm_walk *walk)
106 {
107         walk->private += __mincore_unmapped_range(addr, end,
108                                                   walk->vma, walk->private);
109         return 0;
110 }
111 
112 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
113                         struct mm_walk *walk)
114 {
115         spinlock_t *ptl;
116         struct vm_area_struct *vma = walk->vma;
117         pte_t *ptep;
118         unsigned char *vec = walk->private;
119         int nr = (end - addr) >> PAGE_SHIFT;
120 
121         ptl = pmd_trans_huge_lock(pmd, vma);
122         if (ptl) {
123                 memset(vec, 1, nr);
124                 spin_unlock(ptl);
125                 goto out;
126         }
127 
128         if (pmd_trans_unstable(pmd)) {
129                 __mincore_unmapped_range(addr, end, vma, vec);
130                 goto out;
131         }
132 
133         ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
134         for (; addr != end; ptep++, addr += PAGE_SIZE) {
135                 pte_t pte = *ptep;
136 
137                 if (pte_none(pte))
138                         __mincore_unmapped_range(addr, addr + PAGE_SIZE,
139                                                  vma, vec);
140                 else if (pte_present(pte))
141                         *vec = 1;
142                 else { /* pte is a swap entry */
143                         swp_entry_t entry = pte_to_swp_entry(pte);
144 
145                         if (non_swap_entry(entry)) {
146                                 /*
147                                  * migration or hwpoison entries are always
148                                  * uptodate
149                                  */
150                                 *vec = 1;
151                         } else {
152 #ifdef CONFIG_SWAP
153                                 *vec = mincore_page(swap_address_space(entry),
154                                                     swp_offset(entry));
155 #else
156                                 WARN_ON(1);
157                                 *vec = 1;
158 #endif
159                         }
160                 }
161                 vec++;
162         }
163         pte_unmap_unlock(ptep - 1, ptl);
164 out:
165         walk->private += nr;
166         cond_resched();
167         return 0;
168 }
169 
170 /*
171  * Do a chunk of "sys_mincore()". We've already checked
172  * all the arguments, we hold the mmap semaphore: we should
173  * just return the amount of info we're asked for.
174  */
175 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
176 {
177         struct vm_area_struct *vma;
178         unsigned long end;
179         int err;
180         struct mm_walk mincore_walk = {
181                 .pmd_entry = mincore_pte_range,
182                 .pte_hole = mincore_unmapped_range,
183                 .hugetlb_entry = mincore_hugetlb,
184                 .private = vec,
185         };
186 
187         vma = find_vma(current->mm, addr);
188         if (!vma || addr < vma->vm_start)
189                 return -ENOMEM;
190         mincore_walk.mm = vma->vm_mm;
191         end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
192         err = walk_page_range(addr, end, &mincore_walk);
193         if (err < 0)
194                 return err;
195         return (end - addr) >> PAGE_SHIFT;
196 }
197 
198 /*
199  * The mincore(2) system call.
200  *
201  * mincore() returns the memory residency status of the pages in the
202  * current process's address space specified by [addr, addr + len).
203  * The status is returned in a vector of bytes.  The least significant
204  * bit of each byte is 1 if the referenced page is in memory, otherwise
205  * it is zero.
206  *
207  * Because the status of a page can change after mincore() checks it
208  * but before it returns to the application, the returned vector may
209  * contain stale information.  Only locked pages are guaranteed to
210  * remain in memory.
211  *
212  * return values:
213  *  zero    - success
214  *  -EFAULT - vec points to an illegal address
215  *  -EINVAL - addr is not a multiple of PAGE_SIZE
216  *  -ENOMEM - Addresses in the range [addr, addr + len] are
217  *              invalid for the address space of this process, or
218  *              specify one or more pages which are not currently
219  *              mapped
220  *  -EAGAIN - A kernel resource was temporarily unavailable.
221  */
222 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
223                 unsigned char __user *, vec)
224 {
225         long retval;
226         unsigned long pages;
227         unsigned char *tmp;
228 
229         /* Check the start address: needs to be page-aligned.. */
230         if (start & ~PAGE_MASK)
231                 return -EINVAL;
232 
233         /* ..and we need to be passed a valid user-space range */
234         if (!access_ok(VERIFY_READ, (void __user *) start, len))
235                 return -ENOMEM;
236 
237         /* This also avoids any overflows on PAGE_ALIGN */
238         pages = len >> PAGE_SHIFT;
239         pages += (offset_in_page(len)) != 0;
240 
241         if (!access_ok(VERIFY_WRITE, vec, pages))
242                 return -EFAULT;
243 
244         tmp = (void *) __get_free_page(GFP_USER);
245         if (!tmp)
246                 return -EAGAIN;
247 
248         retval = 0;
249         while (pages) {
250                 /*
251                  * Do at most PAGE_SIZE entries per iteration, due to
252                  * the temporary buffer size.
253                  */
254                 down_read(&current->mm->mmap_sem);
255                 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
256                 up_read(&current->mm->mmap_sem);
257 
258                 if (retval <= 0)
259                         break;
260                 if (copy_to_user(vec, tmp, retval)) {
261                         retval = -EFAULT;
262                         break;
263                 }
264                 pages -= retval;
265                 vec += retval;
266                 start += retval << PAGE_SHIFT;
267                 retval = 0;
268         }
269         free_page((unsigned long) tmp);
270         return retval;
271 }
272 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us