Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/fs/file.c

  1 /*
  2  *  linux/fs/file.c
  3  *
  4  *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
  5  *
  6  *  Manage the dynamic fd arrays in the process files_struct.
  7  */
  8 
  9 #include <linux/syscalls.h>
 10 #include <linux/export.h>
 11 #include <linux/fs.h>
 12 #include <linux/mm.h>
 13 #include <linux/mmzone.h>
 14 #include <linux/time.h>
 15 #include <linux/sched.h>
 16 #include <linux/slab.h>
 17 #include <linux/vmalloc.h>
 18 #include <linux/file.h>
 19 #include <linux/fdtable.h>
 20 #include <linux/bitops.h>
 21 #include <linux/interrupt.h>
 22 #include <linux/spinlock.h>
 23 #include <linux/rcupdate.h>
 24 #include <linux/workqueue.h>
 25 
 26 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
 27 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
 28 /* our min() is unusable in constant expressions ;-/ */
 29 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
 30 unsigned int sysctl_nr_open_max =
 31         __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
 32 
 33 static void *alloc_fdmem(size_t size)
 34 {
 35         /*
 36          * Very large allocations can stress page reclaim, so fall back to
 37          * vmalloc() if the allocation size will be considered "large" by the VM.
 38          */
 39         if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
 40                 void *data = kmalloc(size, GFP_KERNEL_ACCOUNT |
 41                                      __GFP_NOWARN | __GFP_NORETRY);
 42                 if (data != NULL)
 43                         return data;
 44         }
 45         return __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM, PAGE_KERNEL);
 46 }
 47 
 48 static void __free_fdtable(struct fdtable *fdt)
 49 {
 50         kvfree(fdt->fd);
 51         kvfree(fdt->open_fds);
 52         kfree(fdt);
 53 }
 54 
 55 static void free_fdtable_rcu(struct rcu_head *rcu)
 56 {
 57         __free_fdtable(container_of(rcu, struct fdtable, rcu));
 58 }
 59 
 60 #define BITBIT_NR(nr)   BITS_TO_LONGS(BITS_TO_LONGS(nr))
 61 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
 62 
 63 /*
 64  * Copy 'count' fd bits from the old table to the new table and clear the extra
 65  * space if any.  This does not copy the file pointers.  Called with the files
 66  * spinlock held for write.
 67  */
 68 static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
 69                             unsigned int count)
 70 {
 71         unsigned int cpy, set;
 72 
 73         cpy = count / BITS_PER_BYTE;
 74         set = (nfdt->max_fds - count) / BITS_PER_BYTE;
 75         memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
 76         memset((char *)nfdt->open_fds + cpy, 0, set);
 77         memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
 78         memset((char *)nfdt->close_on_exec + cpy, 0, set);
 79 
 80         cpy = BITBIT_SIZE(count);
 81         set = BITBIT_SIZE(nfdt->max_fds) - cpy;
 82         memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
 83         memset((char *)nfdt->full_fds_bits + cpy, 0, set);
 84 }
 85 
 86 /*
 87  * Copy all file descriptors from the old table to the new, expanded table and
 88  * clear the extra space.  Called with the files spinlock held for write.
 89  */
 90 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
 91 {
 92         unsigned int cpy, set;
 93 
 94         BUG_ON(nfdt->max_fds < ofdt->max_fds);
 95 
 96         cpy = ofdt->max_fds * sizeof(struct file *);
 97         set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
 98         memcpy(nfdt->fd, ofdt->fd, cpy);
 99         memset((char *)nfdt->fd + cpy, 0, set);
100 
101         copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
102 }
103 
104 static struct fdtable * alloc_fdtable(unsigned int nr)
105 {
106         struct fdtable *fdt;
107         void *data;
108 
109         /*
110          * Figure out how many fds we actually want to support in this fdtable.
111          * Allocation steps are keyed to the size of the fdarray, since it
112          * grows far faster than any of the other dynamic data. We try to fit
113          * the fdarray into comfortable page-tuned chunks: starting at 1024B
114          * and growing in powers of two from there on.
115          */
116         nr /= (1024 / sizeof(struct file *));
117         nr = roundup_pow_of_two(nr + 1);
118         nr *= (1024 / sizeof(struct file *));
119         /*
120          * Note that this can drive nr *below* what we had passed if sysctl_nr_open
121          * had been set lower between the check in expand_files() and here.  Deal
122          * with that in caller, it's cheaper that way.
123          *
124          * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
125          * bitmaps handling below becomes unpleasant, to put it mildly...
126          */
127         if (unlikely(nr > sysctl_nr_open))
128                 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
129 
130         fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
131         if (!fdt)
132                 goto out;
133         fdt->max_fds = nr;
134         data = alloc_fdmem(nr * sizeof(struct file *));
135         if (!data)
136                 goto out_fdt;
137         fdt->fd = data;
138 
139         data = alloc_fdmem(max_t(size_t,
140                                  2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES));
141         if (!data)
142                 goto out_arr;
143         fdt->open_fds = data;
144         data += nr / BITS_PER_BYTE;
145         fdt->close_on_exec = data;
146         data += nr / BITS_PER_BYTE;
147         fdt->full_fds_bits = data;
148 
149         return fdt;
150 
151 out_arr:
152         kvfree(fdt->fd);
153 out_fdt:
154         kfree(fdt);
155 out:
156         return NULL;
157 }
158 
159 /*
160  * Expand the file descriptor table.
161  * This function will allocate a new fdtable and both fd array and fdset, of
162  * the given size.
163  * Return <0 error code on error; 1 on successful completion.
164  * The files->file_lock should be held on entry, and will be held on exit.
165  */
166 static int expand_fdtable(struct files_struct *files, unsigned int nr)
167         __releases(files->file_lock)
168         __acquires(files->file_lock)
169 {
170         struct fdtable *new_fdt, *cur_fdt;
171 
172         spin_unlock(&files->file_lock);
173         new_fdt = alloc_fdtable(nr);
174 
175         /* make sure all __fd_install() have seen resize_in_progress
176          * or have finished their rcu_read_lock_sched() section.
177          */
178         if (atomic_read(&files->count) > 1)
179                 synchronize_sched();
180 
181         spin_lock(&files->file_lock);
182         if (!new_fdt)
183                 return -ENOMEM;
184         /*
185          * extremely unlikely race - sysctl_nr_open decreased between the check in
186          * caller and alloc_fdtable().  Cheaper to catch it here...
187          */
188         if (unlikely(new_fdt->max_fds <= nr)) {
189                 __free_fdtable(new_fdt);
190                 return -EMFILE;
191         }
192         cur_fdt = files_fdtable(files);
193         BUG_ON(nr < cur_fdt->max_fds);
194         copy_fdtable(new_fdt, cur_fdt);
195         rcu_assign_pointer(files->fdt, new_fdt);
196         if (cur_fdt != &files->fdtab)
197                 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
198         /* coupled with smp_rmb() in __fd_install() */
199         smp_wmb();
200         return 1;
201 }
202 
203 /*
204  * Expand files.
205  * This function will expand the file structures, if the requested size exceeds
206  * the current capacity and there is room for expansion.
207  * Return <0 error code on error; 0 when nothing done; 1 when files were
208  * expanded and execution may have blocked.
209  * The files->file_lock should be held on entry, and will be held on exit.
210  */
211 static int expand_files(struct files_struct *files, unsigned int nr)
212         __releases(files->file_lock)
213         __acquires(files->file_lock)
214 {
215         struct fdtable *fdt;
216         int expanded = 0;
217 
218 repeat:
219         fdt = files_fdtable(files);
220 
221         /* Do we need to expand? */
222         if (nr < fdt->max_fds)
223                 return expanded;
224 
225         /* Can we expand? */
226         if (nr >= sysctl_nr_open)
227                 return -EMFILE;
228 
229         if (unlikely(files->resize_in_progress)) {
230                 spin_unlock(&files->file_lock);
231                 expanded = 1;
232                 wait_event(files->resize_wait, !files->resize_in_progress);
233                 spin_lock(&files->file_lock);
234                 goto repeat;
235         }
236 
237         /* All good, so we try */
238         files->resize_in_progress = true;
239         expanded = expand_fdtable(files, nr);
240         files->resize_in_progress = false;
241 
242         wake_up_all(&files->resize_wait);
243         return expanded;
244 }
245 
246 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt)
247 {
248         __set_bit(fd, fdt->close_on_exec);
249 }
250 
251 static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt)
252 {
253         if (test_bit(fd, fdt->close_on_exec))
254                 __clear_bit(fd, fdt->close_on_exec);
255 }
256 
257 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
258 {
259         __set_bit(fd, fdt->open_fds);
260         fd /= BITS_PER_LONG;
261         if (!~fdt->open_fds[fd])
262                 __set_bit(fd, fdt->full_fds_bits);
263 }
264 
265 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
266 {
267         __clear_bit(fd, fdt->open_fds);
268         __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
269 }
270 
271 static unsigned int count_open_files(struct fdtable *fdt)
272 {
273         unsigned int size = fdt->max_fds;
274         unsigned int i;
275 
276         /* Find the last open fd */
277         for (i = size / BITS_PER_LONG; i > 0; ) {
278                 if (fdt->open_fds[--i])
279                         break;
280         }
281         i = (i + 1) * BITS_PER_LONG;
282         return i;
283 }
284 
285 /*
286  * Allocate a new files structure and copy contents from the
287  * passed in files structure.
288  * errorp will be valid only when the returned files_struct is NULL.
289  */
290 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
291 {
292         struct files_struct *newf;
293         struct file **old_fds, **new_fds;
294         unsigned int open_files, i;
295         struct fdtable *old_fdt, *new_fdt;
296 
297         *errorp = -ENOMEM;
298         newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
299         if (!newf)
300                 goto out;
301 
302         atomic_set(&newf->count, 1);
303 
304         spin_lock_init(&newf->file_lock);
305         newf->resize_in_progress = false;
306         init_waitqueue_head(&newf->resize_wait);
307         newf->next_fd = 0;
308         new_fdt = &newf->fdtab;
309         new_fdt->max_fds = NR_OPEN_DEFAULT;
310         new_fdt->close_on_exec = newf->close_on_exec_init;
311         new_fdt->open_fds = newf->open_fds_init;
312         new_fdt->full_fds_bits = newf->full_fds_bits_init;
313         new_fdt->fd = &newf->fd_array[0];
314 
315         spin_lock(&oldf->file_lock);
316         old_fdt = files_fdtable(oldf);
317         open_files = count_open_files(old_fdt);
318 
319         /*
320          * Check whether we need to allocate a larger fd array and fd set.
321          */
322         while (unlikely(open_files > new_fdt->max_fds)) {
323                 spin_unlock(&oldf->file_lock);
324 
325                 if (new_fdt != &newf->fdtab)
326                         __free_fdtable(new_fdt);
327 
328                 new_fdt = alloc_fdtable(open_files - 1);
329                 if (!new_fdt) {
330                         *errorp = -ENOMEM;
331                         goto out_release;
332                 }
333 
334                 /* beyond sysctl_nr_open; nothing to do */
335                 if (unlikely(new_fdt->max_fds < open_files)) {
336                         __free_fdtable(new_fdt);
337                         *errorp = -EMFILE;
338                         goto out_release;
339                 }
340 
341                 /*
342                  * Reacquire the oldf lock and a pointer to its fd table
343                  * who knows it may have a new bigger fd table. We need
344                  * the latest pointer.
345                  */
346                 spin_lock(&oldf->file_lock);
347                 old_fdt = files_fdtable(oldf);
348                 open_files = count_open_files(old_fdt);
349         }
350 
351         copy_fd_bitmaps(new_fdt, old_fdt, open_files);
352 
353         old_fds = old_fdt->fd;
354         new_fds = new_fdt->fd;
355 
356         for (i = open_files; i != 0; i--) {
357                 struct file *f = *old_fds++;
358                 if (f) {
359                         get_file(f);
360                 } else {
361                         /*
362                          * The fd may be claimed in the fd bitmap but not yet
363                          * instantiated in the files array if a sibling thread
364                          * is partway through open().  So make sure that this
365                          * fd is available to the new process.
366                          */
367                         __clear_open_fd(open_files - i, new_fdt);
368                 }
369                 rcu_assign_pointer(*new_fds++, f);
370         }
371         spin_unlock(&oldf->file_lock);
372 
373         /* clear the remainder */
374         memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
375 
376         rcu_assign_pointer(newf->fdt, new_fdt);
377 
378         return newf;
379 
380 out_release:
381         kmem_cache_free(files_cachep, newf);
382 out:
383         return NULL;
384 }
385 
386 static struct fdtable *close_files(struct files_struct * files)
387 {
388         /*
389          * It is safe to dereference the fd table without RCU or
390          * ->file_lock because this is the last reference to the
391          * files structure.
392          */
393         struct fdtable *fdt = rcu_dereference_raw(files->fdt);
394         unsigned int i, j = 0;
395 
396         for (;;) {
397                 unsigned long set;
398                 i = j * BITS_PER_LONG;
399                 if (i >= fdt->max_fds)
400                         break;
401                 set = fdt->open_fds[j++];
402                 while (set) {
403                         if (set & 1) {
404                                 struct file * file = xchg(&fdt->fd[i], NULL);
405                                 if (file) {
406                                         filp_close(file, files);
407                                         cond_resched_rcu_qs();
408                                 }
409                         }
410                         i++;
411                         set >>= 1;
412                 }
413         }
414 
415         return fdt;
416 }
417 
418 struct files_struct *get_files_struct(struct task_struct *task)
419 {
420         struct files_struct *files;
421 
422         task_lock(task);
423         files = task->files;
424         if (files)
425                 atomic_inc(&files->count);
426         task_unlock(task);
427 
428         return files;
429 }
430 
431 void put_files_struct(struct files_struct *files)
432 {
433         if (atomic_dec_and_test(&files->count)) {
434                 struct fdtable *fdt = close_files(files);
435 
436                 /* free the arrays if they are not embedded */
437                 if (fdt != &files->fdtab)
438                         __free_fdtable(fdt);
439                 kmem_cache_free(files_cachep, files);
440         }
441 }
442 
443 void reset_files_struct(struct files_struct *files)
444 {
445         struct task_struct *tsk = current;
446         struct files_struct *old;
447 
448         old = tsk->files;
449         task_lock(tsk);
450         tsk->files = files;
451         task_unlock(tsk);
452         put_files_struct(old);
453 }
454 
455 void exit_files(struct task_struct *tsk)
456 {
457         struct files_struct * files = tsk->files;
458 
459         if (files) {
460                 task_lock(tsk);
461                 tsk->files = NULL;
462                 task_unlock(tsk);
463                 put_files_struct(files);
464         }
465 }
466 
467 struct files_struct init_files = {
468         .count          = ATOMIC_INIT(1),
469         .fdt            = &init_files.fdtab,
470         .fdtab          = {
471                 .max_fds        = NR_OPEN_DEFAULT,
472                 .fd             = &init_files.fd_array[0],
473                 .close_on_exec  = init_files.close_on_exec_init,
474                 .open_fds       = init_files.open_fds_init,
475                 .full_fds_bits  = init_files.full_fds_bits_init,
476         },
477         .file_lock      = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
478 };
479 
480 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
481 {
482         unsigned int maxfd = fdt->max_fds;
483         unsigned int maxbit = maxfd / BITS_PER_LONG;
484         unsigned int bitbit = start / BITS_PER_LONG;
485 
486         bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
487         if (bitbit > maxfd)
488                 return maxfd;
489         if (bitbit > start)
490                 start = bitbit;
491         return find_next_zero_bit(fdt->open_fds, maxfd, start);
492 }
493 
494 /*
495  * allocate a file descriptor, mark it busy.
496  */
497 int __alloc_fd(struct files_struct *files,
498                unsigned start, unsigned end, unsigned flags)
499 {
500         unsigned int fd;
501         int error;
502         struct fdtable *fdt;
503 
504         spin_lock(&files->file_lock);
505 repeat:
506         fdt = files_fdtable(files);
507         fd = start;
508         if (fd < files->next_fd)
509                 fd = files->next_fd;
510 
511         if (fd < fdt->max_fds)
512                 fd = find_next_fd(fdt, fd);
513 
514         /*
515          * N.B. For clone tasks sharing a files structure, this test
516          * will limit the total number of files that can be opened.
517          */
518         error = -EMFILE;
519         if (fd >= end)
520                 goto out;
521 
522         error = expand_files(files, fd);
523         if (error < 0)
524                 goto out;
525 
526         /*
527          * If we needed to expand the fs array we
528          * might have blocked - try again.
529          */
530         if (error)
531                 goto repeat;
532 
533         if (start <= files->next_fd)
534                 files->next_fd = fd + 1;
535 
536         __set_open_fd(fd, fdt);
537         if (flags & O_CLOEXEC)
538                 __set_close_on_exec(fd, fdt);
539         else
540                 __clear_close_on_exec(fd, fdt);
541         error = fd;
542 #if 1
543         /* Sanity check */
544         if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
545                 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
546                 rcu_assign_pointer(fdt->fd[fd], NULL);
547         }
548 #endif
549 
550 out:
551         spin_unlock(&files->file_lock);
552         return error;
553 }
554 
555 static int alloc_fd(unsigned start, unsigned flags)
556 {
557         return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
558 }
559 
560 int get_unused_fd_flags(unsigned flags)
561 {
562         return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
563 }
564 EXPORT_SYMBOL(get_unused_fd_flags);
565 
566 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
567 {
568         struct fdtable *fdt = files_fdtable(files);
569         __clear_open_fd(fd, fdt);
570         if (fd < files->next_fd)
571                 files->next_fd = fd;
572 }
573 
574 void put_unused_fd(unsigned int fd)
575 {
576         struct files_struct *files = current->files;
577         spin_lock(&files->file_lock);
578         __put_unused_fd(files, fd);
579         spin_unlock(&files->file_lock);
580 }
581 
582 EXPORT_SYMBOL(put_unused_fd);
583 
584 /*
585  * Install a file pointer in the fd array.
586  *
587  * The VFS is full of places where we drop the files lock between
588  * setting the open_fds bitmap and installing the file in the file
589  * array.  At any such point, we are vulnerable to a dup2() race
590  * installing a file in the array before us.  We need to detect this and
591  * fput() the struct file we are about to overwrite in this case.
592  *
593  * It should never happen - if we allow dup2() do it, _really_ bad things
594  * will follow.
595  *
596  * NOTE: __fd_install() variant is really, really low-level; don't
597  * use it unless you are forced to by truly lousy API shoved down
598  * your throat.  'files' *MUST* be either current->files or obtained
599  * by get_files_struct(current) done by whoever had given it to you,
600  * or really bad things will happen.  Normally you want to use
601  * fd_install() instead.
602  */
603 
604 void __fd_install(struct files_struct *files, unsigned int fd,
605                 struct file *file)
606 {
607         struct fdtable *fdt;
608 
609         might_sleep();
610         rcu_read_lock_sched();
611 
612         while (unlikely(files->resize_in_progress)) {
613                 rcu_read_unlock_sched();
614                 wait_event(files->resize_wait, !files->resize_in_progress);
615                 rcu_read_lock_sched();
616         }
617         /* coupled with smp_wmb() in expand_fdtable() */
618         smp_rmb();
619         fdt = rcu_dereference_sched(files->fdt);
620         BUG_ON(fdt->fd[fd] != NULL);
621         rcu_assign_pointer(fdt->fd[fd], file);
622         rcu_read_unlock_sched();
623 }
624 
625 void fd_install(unsigned int fd, struct file *file)
626 {
627         __fd_install(current->files, fd, file);
628 }
629 
630 EXPORT_SYMBOL(fd_install);
631 
632 /*
633  * The same warnings as for __alloc_fd()/__fd_install() apply here...
634  */
635 int __close_fd(struct files_struct *files, unsigned fd)
636 {
637         struct file *file;
638         struct fdtable *fdt;
639 
640         spin_lock(&files->file_lock);
641         fdt = files_fdtable(files);
642         if (fd >= fdt->max_fds)
643                 goto out_unlock;
644         file = fdt->fd[fd];
645         if (!file)
646                 goto out_unlock;
647         rcu_assign_pointer(fdt->fd[fd], NULL);
648         __clear_close_on_exec(fd, fdt);
649         __put_unused_fd(files, fd);
650         spin_unlock(&files->file_lock);
651         return filp_close(file, files);
652 
653 out_unlock:
654         spin_unlock(&files->file_lock);
655         return -EBADF;
656 }
657 
658 void do_close_on_exec(struct files_struct *files)
659 {
660         unsigned i;
661         struct fdtable *fdt;
662 
663         /* exec unshares first */
664         spin_lock(&files->file_lock);
665         for (i = 0; ; i++) {
666                 unsigned long set;
667                 unsigned fd = i * BITS_PER_LONG;
668                 fdt = files_fdtable(files);
669                 if (fd >= fdt->max_fds)
670                         break;
671                 set = fdt->close_on_exec[i];
672                 if (!set)
673                         continue;
674                 fdt->close_on_exec[i] = 0;
675                 for ( ; set ; fd++, set >>= 1) {
676                         struct file *file;
677                         if (!(set & 1))
678                                 continue;
679                         file = fdt->fd[fd];
680                         if (!file)
681                                 continue;
682                         rcu_assign_pointer(fdt->fd[fd], NULL);
683                         __put_unused_fd(files, fd);
684                         spin_unlock(&files->file_lock);
685                         filp_close(file, files);
686                         cond_resched();
687                         spin_lock(&files->file_lock);
688                 }
689 
690         }
691         spin_unlock(&files->file_lock);
692 }
693 
694 static struct file *__fget(unsigned int fd, fmode_t mask)
695 {
696         struct files_struct *files = current->files;
697         struct file *file;
698 
699         rcu_read_lock();
700 loop:
701         file = fcheck_files(files, fd);
702         if (file) {
703                 /* File object ref couldn't be taken.
704                  * dup2() atomicity guarantee is the reason
705                  * we loop to catch the new file (or NULL pointer)
706                  */
707                 if (file->f_mode & mask)
708                         file = NULL;
709                 else if (!get_file_rcu(file))
710                         goto loop;
711         }
712         rcu_read_unlock();
713 
714         return file;
715 }
716 
717 struct file *fget(unsigned int fd)
718 {
719         return __fget(fd, FMODE_PATH);
720 }
721 EXPORT_SYMBOL(fget);
722 
723 struct file *fget_raw(unsigned int fd)
724 {
725         return __fget(fd, 0);
726 }
727 EXPORT_SYMBOL(fget_raw);
728 
729 /*
730  * Lightweight file lookup - no refcnt increment if fd table isn't shared.
731  *
732  * You can use this instead of fget if you satisfy all of the following
733  * conditions:
734  * 1) You must call fput_light before exiting the syscall and returning control
735  *    to userspace (i.e. you cannot remember the returned struct file * after
736  *    returning to userspace).
737  * 2) You must not call filp_close on the returned struct file * in between
738  *    calls to fget_light and fput_light.
739  * 3) You must not clone the current task in between the calls to fget_light
740  *    and fput_light.
741  *
742  * The fput_needed flag returned by fget_light should be passed to the
743  * corresponding fput_light.
744  */
745 static unsigned long __fget_light(unsigned int fd, fmode_t mask)
746 {
747         struct files_struct *files = current->files;
748         struct file *file;
749 
750         if (atomic_read(&files->count) == 1) {
751                 file = __fcheck_files(files, fd);
752                 if (!file || unlikely(file->f_mode & mask))
753                         return 0;
754                 return (unsigned long)file;
755         } else {
756                 file = __fget(fd, mask);
757                 if (!file)
758                         return 0;
759                 return FDPUT_FPUT | (unsigned long)file;
760         }
761 }
762 unsigned long __fdget(unsigned int fd)
763 {
764         return __fget_light(fd, FMODE_PATH);
765 }
766 EXPORT_SYMBOL(__fdget);
767 
768 unsigned long __fdget_raw(unsigned int fd)
769 {
770         return __fget_light(fd, 0);
771 }
772 
773 unsigned long __fdget_pos(unsigned int fd)
774 {
775         unsigned long v = __fdget(fd);
776         struct file *file = (struct file *)(v & ~3);
777 
778         if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
779                 if (file_count(file) > 1) {
780                         v |= FDPUT_POS_UNLOCK;
781                         mutex_lock(&file->f_pos_lock);
782                 }
783         }
784         return v;
785 }
786 
787 void __f_unlock_pos(struct file *f)
788 {
789         mutex_unlock(&f->f_pos_lock);
790 }
791 
792 /*
793  * We only lock f_pos if we have threads or if the file might be
794  * shared with another process. In both cases we'll have an elevated
795  * file count (done either by fdget() or by fork()).
796  */
797 
798 void set_close_on_exec(unsigned int fd, int flag)
799 {
800         struct files_struct *files = current->files;
801         struct fdtable *fdt;
802         spin_lock(&files->file_lock);
803         fdt = files_fdtable(files);
804         if (flag)
805                 __set_close_on_exec(fd, fdt);
806         else
807                 __clear_close_on_exec(fd, fdt);
808         spin_unlock(&files->file_lock);
809 }
810 
811 bool get_close_on_exec(unsigned int fd)
812 {
813         struct files_struct *files = current->files;
814         struct fdtable *fdt;
815         bool res;
816         rcu_read_lock();
817         fdt = files_fdtable(files);
818         res = close_on_exec(fd, fdt);
819         rcu_read_unlock();
820         return res;
821 }
822 
823 static int do_dup2(struct files_struct *files,
824         struct file *file, unsigned fd, unsigned flags)
825 __releases(&files->file_lock)
826 {
827         struct file *tofree;
828         struct fdtable *fdt;
829 
830         /*
831          * We need to detect attempts to do dup2() over allocated but still
832          * not finished descriptor.  NB: OpenBSD avoids that at the price of
833          * extra work in their equivalent of fget() - they insert struct
834          * file immediately after grabbing descriptor, mark it larval if
835          * more work (e.g. actual opening) is needed and make sure that
836          * fget() treats larval files as absent.  Potentially interesting,
837          * but while extra work in fget() is trivial, locking implications
838          * and amount of surgery on open()-related paths in VFS are not.
839          * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
840          * deadlocks in rather amusing ways, AFAICS.  All of that is out of
841          * scope of POSIX or SUS, since neither considers shared descriptor
842          * tables and this condition does not arise without those.
843          */
844         fdt = files_fdtable(files);
845         tofree = fdt->fd[fd];
846         if (!tofree && fd_is_open(fd, fdt))
847                 goto Ebusy;
848         get_file(file);
849         rcu_assign_pointer(fdt->fd[fd], file);
850         __set_open_fd(fd, fdt);
851         if (flags & O_CLOEXEC)
852                 __set_close_on_exec(fd, fdt);
853         else
854                 __clear_close_on_exec(fd, fdt);
855         spin_unlock(&files->file_lock);
856 
857         if (tofree)
858                 filp_close(tofree, files);
859 
860         return fd;
861 
862 Ebusy:
863         spin_unlock(&files->file_lock);
864         return -EBUSY;
865 }
866 
867 int replace_fd(unsigned fd, struct file *file, unsigned flags)
868 {
869         int err;
870         struct files_struct *files = current->files;
871 
872         if (!file)
873                 return __close_fd(files, fd);
874 
875         if (fd >= rlimit(RLIMIT_NOFILE))
876                 return -EBADF;
877 
878         spin_lock(&files->file_lock);
879         err = expand_files(files, fd);
880         if (unlikely(err < 0))
881                 goto out_unlock;
882         return do_dup2(files, file, fd, flags);
883 
884 out_unlock:
885         spin_unlock(&files->file_lock);
886         return err;
887 }
888 
889 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
890 {
891         int err = -EBADF;
892         struct file *file;
893         struct files_struct *files = current->files;
894 
895         if ((flags & ~O_CLOEXEC) != 0)
896                 return -EINVAL;
897 
898         if (unlikely(oldfd == newfd))
899                 return -EINVAL;
900 
901         if (newfd >= rlimit(RLIMIT_NOFILE))
902                 return -EBADF;
903 
904         spin_lock(&files->file_lock);
905         err = expand_files(files, newfd);
906         file = fcheck(oldfd);
907         if (unlikely(!file))
908                 goto Ebadf;
909         if (unlikely(err < 0)) {
910                 if (err == -EMFILE)
911                         goto Ebadf;
912                 goto out_unlock;
913         }
914         return do_dup2(files, file, newfd, flags);
915 
916 Ebadf:
917         err = -EBADF;
918 out_unlock:
919         spin_unlock(&files->file_lock);
920         return err;
921 }
922 
923 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
924 {
925         if (unlikely(newfd == oldfd)) { /* corner case */
926                 struct files_struct *files = current->files;
927                 int retval = oldfd;
928 
929                 rcu_read_lock();
930                 if (!fcheck_files(files, oldfd))
931                         retval = -EBADF;
932                 rcu_read_unlock();
933                 return retval;
934         }
935         return sys_dup3(oldfd, newfd, 0);
936 }
937 
938 SYSCALL_DEFINE1(dup, unsigned int, fildes)
939 {
940         int ret = -EBADF;
941         struct file *file = fget_raw(fildes);
942 
943         if (file) {
944                 ret = get_unused_fd_flags(0);
945                 if (ret >= 0)
946                         fd_install(ret, file);
947                 else
948                         fput(file);
949         }
950         return ret;
951 }
952 
953 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
954 {
955         int err;
956         if (from >= rlimit(RLIMIT_NOFILE))
957                 return -EINVAL;
958         err = alloc_fd(from, flags);
959         if (err >= 0) {
960                 get_file(file);
961                 fd_install(err, file);
962         }
963         return err;
964 }
965 
966 int iterate_fd(struct files_struct *files, unsigned n,
967                 int (*f)(const void *, struct file *, unsigned),
968                 const void *p)
969 {
970         struct fdtable *fdt;
971         int res = 0;
972         if (!files)
973                 return 0;
974         spin_lock(&files->file_lock);
975         for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
976                 struct file *file;
977                 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
978                 if (!file)
979                         continue;
980                 res = f(p, file, n);
981                 if (res)
982                         break;
983         }
984         spin_unlock(&files->file_lock);
985         return res;
986 }
987 EXPORT_SYMBOL(iterate_fd);
988 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us