Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/fs/dcache.c

  1 /*
  2  * fs/dcache.c
  3  *
  4  * Complete reimplementation
  5  * (C) 1997 Thomas Schoebel-Theuer,
  6  * with heavy changes by Linus Torvalds
  7  */
  8 
  9 /*
 10  * Notes on the allocation strategy:
 11  *
 12  * The dcache is a master of the icache - whenever a dcache entry
 13  * exists, the inode will always exist. "iput()" is done either when
 14  * the dcache entry is deleted or garbage collected.
 15  */
 16 
 17 #include <linux/syscalls.h>
 18 #include <linux/string.h>
 19 #include <linux/mm.h>
 20 #include <linux/fs.h>
 21 #include <linux/fsnotify.h>
 22 #include <linux/slab.h>
 23 #include <linux/init.h>
 24 #include <linux/hash.h>
 25 #include <linux/cache.h>
 26 #include <linux/export.h>
 27 #include <linux/mount.h>
 28 #include <linux/file.h>
 29 #include <asm/uaccess.h>
 30 #include <linux/security.h>
 31 #include <linux/seqlock.h>
 32 #include <linux/swap.h>
 33 #include <linux/bootmem.h>
 34 #include <linux/fs_struct.h>
 35 #include <linux/hardirq.h>
 36 #include <linux/bit_spinlock.h>
 37 #include <linux/rculist_bl.h>
 38 #include <linux/prefetch.h>
 39 #include <linux/ratelimit.h>
 40 #include <linux/list_lru.h>
 41 #include "internal.h"
 42 #include "mount.h"
 43 
 44 /*
 45  * Usage:
 46  * dcache->d_inode->i_lock protects:
 47  *   - i_dentry, d_alias, d_inode of aliases
 48  * dcache_hash_bucket lock protects:
 49  *   - the dcache hash table
 50  * s_anon bl list spinlock protects:
 51  *   - the s_anon list (see __d_drop)
 52  * dentry->d_sb->s_dentry_lru_lock protects:
 53  *   - the dcache lru lists and counters
 54  * d_lock protects:
 55  *   - d_flags
 56  *   - d_name
 57  *   - d_lru
 58  *   - d_count
 59  *   - d_unhashed()
 60  *   - d_parent and d_subdirs
 61  *   - childrens' d_child and d_parent
 62  *   - d_alias, d_inode
 63  *
 64  * Ordering:
 65  * dentry->d_inode->i_lock
 66  *   dentry->d_lock
 67  *     dentry->d_sb->s_dentry_lru_lock
 68  *     dcache_hash_bucket lock
 69  *     s_anon lock
 70  *
 71  * If there is an ancestor relationship:
 72  * dentry->d_parent->...->d_parent->d_lock
 73  *   ...
 74  *     dentry->d_parent->d_lock
 75  *       dentry->d_lock
 76  *
 77  * If no ancestor relationship:
 78  * if (dentry1 < dentry2)
 79  *   dentry1->d_lock
 80  *     dentry2->d_lock
 81  */
 82 int sysctl_vfs_cache_pressure __read_mostly = 100;
 83 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 84 
 85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 86 
 87 EXPORT_SYMBOL(rename_lock);
 88 
 89 static struct kmem_cache *dentry_cache __read_mostly;
 90 
 91 /*
 92  * This is the single most critical data structure when it comes
 93  * to the dcache: the hashtable for lookups. Somebody should try
 94  * to make this good - I've just made it work.
 95  *
 96  * This hash-function tries to avoid losing too many bits of hash
 97  * information, yet avoid using a prime hash-size or similar.
 98  */
 99 
100 static unsigned int d_hash_mask __read_mostly;
101 static unsigned int d_hash_shift __read_mostly;
102 
103 static struct hlist_bl_head *dentry_hashtable __read_mostly;
104 
105 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
106                                         unsigned int hash)
107 {
108         hash += (unsigned long) parent / L1_CACHE_BYTES;
109         hash = hash + (hash >> d_hash_shift);
110         return dentry_hashtable + (hash & d_hash_mask);
111 }
112 
113 /* Statistics gathering. */
114 struct dentry_stat_t dentry_stat = {
115         .age_limit = 45,
116 };
117 
118 static DEFINE_PER_CPU(long, nr_dentry);
119 static DEFINE_PER_CPU(long, nr_dentry_unused);
120 
121 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
122 
123 /*
124  * Here we resort to our own counters instead of using generic per-cpu counters
125  * for consistency with what the vfs inode code does. We are expected to harvest
126  * better code and performance by having our own specialized counters.
127  *
128  * Please note that the loop is done over all possible CPUs, not over all online
129  * CPUs. The reason for this is that we don't want to play games with CPUs going
130  * on and off. If one of them goes off, we will just keep their counters.
131  *
132  * glommer: See cffbc8a for details, and if you ever intend to change this,
133  * please update all vfs counters to match.
134  */
135 static long get_nr_dentry(void)
136 {
137         int i;
138         long sum = 0;
139         for_each_possible_cpu(i)
140                 sum += per_cpu(nr_dentry, i);
141         return sum < 0 ? 0 : sum;
142 }
143 
144 static long get_nr_dentry_unused(void)
145 {
146         int i;
147         long sum = 0;
148         for_each_possible_cpu(i)
149                 sum += per_cpu(nr_dentry_unused, i);
150         return sum < 0 ? 0 : sum;
151 }
152 
153 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
154                    size_t *lenp, loff_t *ppos)
155 {
156         dentry_stat.nr_dentry = get_nr_dentry();
157         dentry_stat.nr_unused = get_nr_dentry_unused();
158         return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
159 }
160 #endif
161 
162 /*
163  * Compare 2 name strings, return 0 if they match, otherwise non-zero.
164  * The strings are both count bytes long, and count is non-zero.
165  */
166 #ifdef CONFIG_DCACHE_WORD_ACCESS
167 
168 #include <asm/word-at-a-time.h>
169 /*
170  * NOTE! 'cs' and 'scount' come from a dentry, so it has a
171  * aligned allocation for this particular component. We don't
172  * strictly need the load_unaligned_zeropad() safety, but it
173  * doesn't hurt either.
174  *
175  * In contrast, 'ct' and 'tcount' can be from a pathname, and do
176  * need the careful unaligned handling.
177  */
178 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
179 {
180         unsigned long a,b,mask;
181 
182         for (;;) {
183                 a = *(unsigned long *)cs;
184                 b = load_unaligned_zeropad(ct);
185                 if (tcount < sizeof(unsigned long))
186                         break;
187                 if (unlikely(a != b))
188                         return 1;
189                 cs += sizeof(unsigned long);
190                 ct += sizeof(unsigned long);
191                 tcount -= sizeof(unsigned long);
192                 if (!tcount)
193                         return 0;
194         }
195         mask = bytemask_from_count(tcount);
196         return unlikely(!!((a ^ b) & mask));
197 }
198 
199 #else
200 
201 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
202 {
203         do {
204                 if (*cs != *ct)
205                         return 1;
206                 cs++;
207                 ct++;
208                 tcount--;
209         } while (tcount);
210         return 0;
211 }
212 
213 #endif
214 
215 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
216 {
217         const unsigned char *cs;
218         /*
219          * Be careful about RCU walk racing with rename:
220          * use ACCESS_ONCE to fetch the name pointer.
221          *
222          * NOTE! Even if a rename will mean that the length
223          * was not loaded atomically, we don't care. The
224          * RCU walk will check the sequence count eventually,
225          * and catch it. And we won't overrun the buffer,
226          * because we're reading the name pointer atomically,
227          * and a dentry name is guaranteed to be properly
228          * terminated with a NUL byte.
229          *
230          * End result: even if 'len' is wrong, we'll exit
231          * early because the data cannot match (there can
232          * be no NUL in the ct/tcount data)
233          */
234         cs = ACCESS_ONCE(dentry->d_name.name);
235         smp_read_barrier_depends();
236         return dentry_string_cmp(cs, ct, tcount);
237 }
238 
239 static void __d_free(struct rcu_head *head)
240 {
241         struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
242 
243         WARN_ON(!hlist_unhashed(&dentry->d_alias));
244         if (dname_external(dentry))
245                 kfree(dentry->d_name.name);
246         kmem_cache_free(dentry_cache, dentry); 
247 }
248 
249 static void dentry_free(struct dentry *dentry)
250 {
251         /* if dentry was never visible to RCU, immediate free is OK */
252         if (!(dentry->d_flags & DCACHE_RCUACCESS))
253                 __d_free(&dentry->d_u.d_rcu);
254         else
255                 call_rcu(&dentry->d_u.d_rcu, __d_free);
256 }
257 
258 /**
259  * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
260  * @dentry: the target dentry
261  * After this call, in-progress rcu-walk path lookup will fail. This
262  * should be called after unhashing, and after changing d_inode (if
263  * the dentry has not already been unhashed).
264  */
265 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
266 {
267         assert_spin_locked(&dentry->d_lock);
268         /* Go through a barrier */
269         write_seqcount_barrier(&dentry->d_seq);
270 }
271 
272 /*
273  * Release the dentry's inode, using the filesystem
274  * d_iput() operation if defined. Dentry has no refcount
275  * and is unhashed.
276  */
277 static void dentry_iput(struct dentry * dentry)
278         __releases(dentry->d_lock)
279         __releases(dentry->d_inode->i_lock)
280 {
281         struct inode *inode = dentry->d_inode;
282         if (inode) {
283                 dentry->d_inode = NULL;
284                 hlist_del_init(&dentry->d_alias);
285                 spin_unlock(&dentry->d_lock);
286                 spin_unlock(&inode->i_lock);
287                 if (!inode->i_nlink)
288                         fsnotify_inoderemove(inode);
289                 if (dentry->d_op && dentry->d_op->d_iput)
290                         dentry->d_op->d_iput(dentry, inode);
291                 else
292                         iput(inode);
293         } else {
294                 spin_unlock(&dentry->d_lock);
295         }
296 }
297 
298 /*
299  * Release the dentry's inode, using the filesystem
300  * d_iput() operation if defined. dentry remains in-use.
301  */
302 static void dentry_unlink_inode(struct dentry * dentry)
303         __releases(dentry->d_lock)
304         __releases(dentry->d_inode->i_lock)
305 {
306         struct inode *inode = dentry->d_inode;
307         __d_clear_type(dentry);
308         dentry->d_inode = NULL;
309         hlist_del_init(&dentry->d_alias);
310         dentry_rcuwalk_barrier(dentry);
311         spin_unlock(&dentry->d_lock);
312         spin_unlock(&inode->i_lock);
313         if (!inode->i_nlink)
314                 fsnotify_inoderemove(inode);
315         if (dentry->d_op && dentry->d_op->d_iput)
316                 dentry->d_op->d_iput(dentry, inode);
317         else
318                 iput(inode);
319 }
320 
321 /*
322  * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
323  * is in use - which includes both the "real" per-superblock
324  * LRU list _and_ the DCACHE_SHRINK_LIST use.
325  *
326  * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
327  * on the shrink list (ie not on the superblock LRU list).
328  *
329  * The per-cpu "nr_dentry_unused" counters are updated with
330  * the DCACHE_LRU_LIST bit.
331  *
332  * These helper functions make sure we always follow the
333  * rules. d_lock must be held by the caller.
334  */
335 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
336 static void d_lru_add(struct dentry *dentry)
337 {
338         D_FLAG_VERIFY(dentry, 0);
339         dentry->d_flags |= DCACHE_LRU_LIST;
340         this_cpu_inc(nr_dentry_unused);
341         WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
342 }
343 
344 static void d_lru_del(struct dentry *dentry)
345 {
346         D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
347         dentry->d_flags &= ~DCACHE_LRU_LIST;
348         this_cpu_dec(nr_dentry_unused);
349         WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
350 }
351 
352 static void d_shrink_del(struct dentry *dentry)
353 {
354         D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
355         list_del_init(&dentry->d_lru);
356         dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
357         this_cpu_dec(nr_dentry_unused);
358 }
359 
360 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
361 {
362         D_FLAG_VERIFY(dentry, 0);
363         list_add(&dentry->d_lru, list);
364         dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
365         this_cpu_inc(nr_dentry_unused);
366 }
367 
368 /*
369  * These can only be called under the global LRU lock, ie during the
370  * callback for freeing the LRU list. "isolate" removes it from the
371  * LRU lists entirely, while shrink_move moves it to the indicated
372  * private list.
373  */
374 static void d_lru_isolate(struct dentry *dentry)
375 {
376         D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
377         dentry->d_flags &= ~DCACHE_LRU_LIST;
378         this_cpu_dec(nr_dentry_unused);
379         list_del_init(&dentry->d_lru);
380 }
381 
382 static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
383 {
384         D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
385         dentry->d_flags |= DCACHE_SHRINK_LIST;
386         list_move_tail(&dentry->d_lru, list);
387 }
388 
389 /*
390  * dentry_lru_(add|del)_list) must be called with d_lock held.
391  */
392 static void dentry_lru_add(struct dentry *dentry)
393 {
394         if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
395                 d_lru_add(dentry);
396 }
397 
398 /**
399  * d_drop - drop a dentry
400  * @dentry: dentry to drop
401  *
402  * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
403  * be found through a VFS lookup any more. Note that this is different from
404  * deleting the dentry - d_delete will try to mark the dentry negative if
405  * possible, giving a successful _negative_ lookup, while d_drop will
406  * just make the cache lookup fail.
407  *
408  * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
409  * reason (NFS timeouts or autofs deletes).
410  *
411  * __d_drop requires dentry->d_lock.
412  */
413 void __d_drop(struct dentry *dentry)
414 {
415         if (!d_unhashed(dentry)) {
416                 struct hlist_bl_head *b;
417                 /*
418                  * Hashed dentries are normally on the dentry hashtable,
419                  * with the exception of those newly allocated by
420                  * d_obtain_alias, which are always IS_ROOT:
421                  */
422                 if (unlikely(IS_ROOT(dentry)))
423                         b = &dentry->d_sb->s_anon;
424                 else
425                         b = d_hash(dentry->d_parent, dentry->d_name.hash);
426 
427                 hlist_bl_lock(b);
428                 __hlist_bl_del(&dentry->d_hash);
429                 dentry->d_hash.pprev = NULL;
430                 hlist_bl_unlock(b);
431                 dentry_rcuwalk_barrier(dentry);
432         }
433 }
434 EXPORT_SYMBOL(__d_drop);
435 
436 void d_drop(struct dentry *dentry)
437 {
438         spin_lock(&dentry->d_lock);
439         __d_drop(dentry);
440         spin_unlock(&dentry->d_lock);
441 }
442 EXPORT_SYMBOL(d_drop);
443 
444 static void __dentry_kill(struct dentry *dentry)
445 {
446         struct dentry *parent = NULL;
447         bool can_free = true;
448         if (!IS_ROOT(dentry))
449                 parent = dentry->d_parent;
450 
451         /*
452          * The dentry is now unrecoverably dead to the world.
453          */
454         lockref_mark_dead(&dentry->d_lockref);
455 
456         /*
457          * inform the fs via d_prune that this dentry is about to be
458          * unhashed and destroyed.
459          */
460         if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
461                 dentry->d_op->d_prune(dentry);
462 
463         if (dentry->d_flags & DCACHE_LRU_LIST) {
464                 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
465                         d_lru_del(dentry);
466         }
467         /* if it was on the hash then remove it */
468         __d_drop(dentry);
469         list_del(&dentry->d_u.d_child);
470         /*
471          * Inform d_walk() that we are no longer attached to the
472          * dentry tree
473          */
474         dentry->d_flags |= DCACHE_DENTRY_KILLED;
475         if (parent)
476                 spin_unlock(&parent->d_lock);
477         dentry_iput(dentry);
478         /*
479          * dentry_iput drops the locks, at which point nobody (except
480          * transient RCU lookups) can reach this dentry.
481          */
482         BUG_ON((int)dentry->d_lockref.count > 0);
483         this_cpu_dec(nr_dentry);
484         if (dentry->d_op && dentry->d_op->d_release)
485                 dentry->d_op->d_release(dentry);
486 
487         spin_lock(&dentry->d_lock);
488         if (dentry->d_flags & DCACHE_SHRINK_LIST) {
489                 dentry->d_flags |= DCACHE_MAY_FREE;
490                 can_free = false;
491         }
492         spin_unlock(&dentry->d_lock);
493         if (likely(can_free))
494                 dentry_free(dentry);
495 }
496 
497 /*
498  * Finish off a dentry we've decided to kill.
499  * dentry->d_lock must be held, returns with it unlocked.
500  * If ref is non-zero, then decrement the refcount too.
501  * Returns dentry requiring refcount drop, or NULL if we're done.
502  */
503 static struct dentry *dentry_kill(struct dentry *dentry)
504         __releases(dentry->d_lock)
505 {
506         struct inode *inode = dentry->d_inode;
507         struct dentry *parent = NULL;
508 
509         if (inode && unlikely(!spin_trylock(&inode->i_lock)))
510                 goto failed;
511 
512         if (!IS_ROOT(dentry)) {
513                 parent = dentry->d_parent;
514                 if (unlikely(!spin_trylock(&parent->d_lock))) {
515                         if (inode)
516                                 spin_unlock(&inode->i_lock);
517                         goto failed;
518                 }
519         }
520 
521         __dentry_kill(dentry);
522         return parent;
523 
524 failed:
525         spin_unlock(&dentry->d_lock);
526         cpu_relax();
527         return dentry; /* try again with same dentry */
528 }
529 
530 static inline struct dentry *lock_parent(struct dentry *dentry)
531 {
532         struct dentry *parent = dentry->d_parent;
533         if (IS_ROOT(dentry))
534                 return NULL;
535         if (unlikely((int)dentry->d_lockref.count < 0))
536                 return NULL;
537         if (likely(spin_trylock(&parent->d_lock)))
538                 return parent;
539         rcu_read_lock();
540         spin_unlock(&dentry->d_lock);
541 again:
542         parent = ACCESS_ONCE(dentry->d_parent);
543         spin_lock(&parent->d_lock);
544         /*
545          * We can't blindly lock dentry until we are sure
546          * that we won't violate the locking order.
547          * Any changes of dentry->d_parent must have
548          * been done with parent->d_lock held, so
549          * spin_lock() above is enough of a barrier
550          * for checking if it's still our child.
551          */
552         if (unlikely(parent != dentry->d_parent)) {
553                 spin_unlock(&parent->d_lock);
554                 goto again;
555         }
556         rcu_read_unlock();
557         if (parent != dentry)
558                 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
559         else
560                 parent = NULL;
561         return parent;
562 }
563 
564 /* 
565  * This is dput
566  *
567  * This is complicated by the fact that we do not want to put
568  * dentries that are no longer on any hash chain on the unused
569  * list: we'd much rather just get rid of them immediately.
570  *
571  * However, that implies that we have to traverse the dentry
572  * tree upwards to the parents which might _also_ now be
573  * scheduled for deletion (it may have been only waiting for
574  * its last child to go away).
575  *
576  * This tail recursion is done by hand as we don't want to depend
577  * on the compiler to always get this right (gcc generally doesn't).
578  * Real recursion would eat up our stack space.
579  */
580 
581 /*
582  * dput - release a dentry
583  * @dentry: dentry to release 
584  *
585  * Release a dentry. This will drop the usage count and if appropriate
586  * call the dentry unlink method as well as removing it from the queues and
587  * releasing its resources. If the parent dentries were scheduled for release
588  * they too may now get deleted.
589  */
590 void dput(struct dentry *dentry)
591 {
592         if (unlikely(!dentry))
593                 return;
594 
595 repeat:
596         if (lockref_put_or_lock(&dentry->d_lockref))
597                 return;
598 
599         /* Unreachable? Get rid of it */
600         if (unlikely(d_unhashed(dentry)))
601                 goto kill_it;
602 
603         if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
604                 if (dentry->d_op->d_delete(dentry))
605                         goto kill_it;
606         }
607 
608         if (!(dentry->d_flags & DCACHE_REFERENCED))
609                 dentry->d_flags |= DCACHE_REFERENCED;
610         dentry_lru_add(dentry);
611 
612         dentry->d_lockref.count--;
613         spin_unlock(&dentry->d_lock);
614         return;
615 
616 kill_it:
617         dentry = dentry_kill(dentry);
618         if (dentry)
619                 goto repeat;
620 }
621 EXPORT_SYMBOL(dput);
622 
623 /**
624  * d_invalidate - invalidate a dentry
625  * @dentry: dentry to invalidate
626  *
627  * Try to invalidate the dentry if it turns out to be
628  * possible. If there are other dentries that can be
629  * reached through this one we can't delete it and we
630  * return -EBUSY. On success we return 0.
631  *
632  * no dcache lock.
633  */
634  
635 int d_invalidate(struct dentry * dentry)
636 {
637         /*
638          * If it's already been dropped, return OK.
639          */
640         spin_lock(&dentry->d_lock);
641         if (d_unhashed(dentry)) {
642                 spin_unlock(&dentry->d_lock);
643                 return 0;
644         }
645         /*
646          * Check whether to do a partial shrink_dcache
647          * to get rid of unused child entries.
648          */
649         if (!list_empty(&dentry->d_subdirs)) {
650                 spin_unlock(&dentry->d_lock);
651                 shrink_dcache_parent(dentry);
652                 spin_lock(&dentry->d_lock);
653         }
654 
655         /*
656          * Somebody else still using it?
657          *
658          * If it's a directory, we can't drop it
659          * for fear of somebody re-populating it
660          * with children (even though dropping it
661          * would make it unreachable from the root,
662          * we might still populate it if it was a
663          * working directory or similar).
664          * We also need to leave mountpoints alone,
665          * directory or not.
666          */
667         if (dentry->d_lockref.count > 1 && dentry->d_inode) {
668                 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
669                         spin_unlock(&dentry->d_lock);
670                         return -EBUSY;
671                 }
672         }
673 
674         __d_drop(dentry);
675         spin_unlock(&dentry->d_lock);
676         return 0;
677 }
678 EXPORT_SYMBOL(d_invalidate);
679 
680 /* This must be called with d_lock held */
681 static inline void __dget_dlock(struct dentry *dentry)
682 {
683         dentry->d_lockref.count++;
684 }
685 
686 static inline void __dget(struct dentry *dentry)
687 {
688         lockref_get(&dentry->d_lockref);
689 }
690 
691 struct dentry *dget_parent(struct dentry *dentry)
692 {
693         int gotref;
694         struct dentry *ret;
695 
696         /*
697          * Do optimistic parent lookup without any
698          * locking.
699          */
700         rcu_read_lock();
701         ret = ACCESS_ONCE(dentry->d_parent);
702         gotref = lockref_get_not_zero(&ret->d_lockref);
703         rcu_read_unlock();
704         if (likely(gotref)) {
705                 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
706                         return ret;
707                 dput(ret);
708         }
709 
710 repeat:
711         /*
712          * Don't need rcu_dereference because we re-check it was correct under
713          * the lock.
714          */
715         rcu_read_lock();
716         ret = dentry->d_parent;
717         spin_lock(&ret->d_lock);
718         if (unlikely(ret != dentry->d_parent)) {
719                 spin_unlock(&ret->d_lock);
720                 rcu_read_unlock();
721                 goto repeat;
722         }
723         rcu_read_unlock();
724         BUG_ON(!ret->d_lockref.count);
725         ret->d_lockref.count++;
726         spin_unlock(&ret->d_lock);
727         return ret;
728 }
729 EXPORT_SYMBOL(dget_parent);
730 
731 /**
732  * d_find_alias - grab a hashed alias of inode
733  * @inode: inode in question
734  * @want_discon:  flag, used by d_splice_alias, to request
735  *          that only a DISCONNECTED alias be returned.
736  *
737  * If inode has a hashed alias, or is a directory and has any alias,
738  * acquire the reference to alias and return it. Otherwise return NULL.
739  * Notice that if inode is a directory there can be only one alias and
740  * it can be unhashed only if it has no children, or if it is the root
741  * of a filesystem.
742  *
743  * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
744  * any other hashed alias over that one unless @want_discon is set,
745  * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
746  */
747 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
748 {
749         struct dentry *alias, *discon_alias;
750 
751 again:
752         discon_alias = NULL;
753         hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
754                 spin_lock(&alias->d_lock);
755                 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
756                         if (IS_ROOT(alias) &&
757                             (alias->d_flags & DCACHE_DISCONNECTED)) {
758                                 discon_alias = alias;
759                         } else if (!want_discon) {
760                                 __dget_dlock(alias);
761                                 spin_unlock(&alias->d_lock);
762                                 return alias;
763                         }
764                 }
765                 spin_unlock(&alias->d_lock);
766         }
767         if (discon_alias) {
768                 alias = discon_alias;
769                 spin_lock(&alias->d_lock);
770                 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
771                         if (IS_ROOT(alias) &&
772                             (alias->d_flags & DCACHE_DISCONNECTED)) {
773                                 __dget_dlock(alias);
774                                 spin_unlock(&alias->d_lock);
775                                 return alias;
776                         }
777                 }
778                 spin_unlock(&alias->d_lock);
779                 goto again;
780         }
781         return NULL;
782 }
783 
784 struct dentry *d_find_alias(struct inode *inode)
785 {
786         struct dentry *de = NULL;
787 
788         if (!hlist_empty(&inode->i_dentry)) {
789                 spin_lock(&inode->i_lock);
790                 de = __d_find_alias(inode, 0);
791                 spin_unlock(&inode->i_lock);
792         }
793         return de;
794 }
795 EXPORT_SYMBOL(d_find_alias);
796 
797 /*
798  *      Try to kill dentries associated with this inode.
799  * WARNING: you must own a reference to inode.
800  */
801 void d_prune_aliases(struct inode *inode)
802 {
803         struct dentry *dentry;
804 restart:
805         spin_lock(&inode->i_lock);
806         hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
807                 spin_lock(&dentry->d_lock);
808                 if (!dentry->d_lockref.count) {
809                         /*
810                          * inform the fs via d_prune that this dentry
811                          * is about to be unhashed and destroyed.
812                          */
813                         if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
814                             !d_unhashed(dentry))
815                                 dentry->d_op->d_prune(dentry);
816 
817                         __dget_dlock(dentry);
818                         __d_drop(dentry);
819                         spin_unlock(&dentry->d_lock);
820                         spin_unlock(&inode->i_lock);
821                         dput(dentry);
822                         goto restart;
823                 }
824                 spin_unlock(&dentry->d_lock);
825         }
826         spin_unlock(&inode->i_lock);
827 }
828 EXPORT_SYMBOL(d_prune_aliases);
829 
830 static void shrink_dentry_list(struct list_head *list)
831 {
832         struct dentry *dentry, *parent;
833 
834         while (!list_empty(list)) {
835                 struct inode *inode;
836                 dentry = list_entry(list->prev, struct dentry, d_lru);
837                 spin_lock(&dentry->d_lock);
838                 parent = lock_parent(dentry);
839 
840                 /*
841                  * The dispose list is isolated and dentries are not accounted
842                  * to the LRU here, so we can simply remove it from the list
843                  * here regardless of whether it is referenced or not.
844                  */
845                 d_shrink_del(dentry);
846 
847                 /*
848                  * We found an inuse dentry which was not removed from
849                  * the LRU because of laziness during lookup. Do not free it.
850                  */
851                 if ((int)dentry->d_lockref.count > 0) {
852                         spin_unlock(&dentry->d_lock);
853                         if (parent)
854                                 spin_unlock(&parent->d_lock);
855                         continue;
856                 }
857 
858 
859                 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
860                         bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
861                         spin_unlock(&dentry->d_lock);
862                         if (parent)
863                                 spin_unlock(&parent->d_lock);
864                         if (can_free)
865                                 dentry_free(dentry);
866                         continue;
867                 }
868 
869                 inode = dentry->d_inode;
870                 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
871                         d_shrink_add(dentry, list);
872                         spin_unlock(&dentry->d_lock);
873                         if (parent)
874                                 spin_unlock(&parent->d_lock);
875                         continue;
876                 }
877 
878                 __dentry_kill(dentry);
879 
880                 /*
881                  * We need to prune ancestors too. This is necessary to prevent
882                  * quadratic behavior of shrink_dcache_parent(), but is also
883                  * expected to be beneficial in reducing dentry cache
884                  * fragmentation.
885                  */
886                 dentry = parent;
887                 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
888                         parent = lock_parent(dentry);
889                         if (dentry->d_lockref.count != 1) {
890                                 dentry->d_lockref.count--;
891                                 spin_unlock(&dentry->d_lock);
892                                 if (parent)
893                                         spin_unlock(&parent->d_lock);
894                                 break;
895                         }
896                         inode = dentry->d_inode;        /* can't be NULL */
897                         if (unlikely(!spin_trylock(&inode->i_lock))) {
898                                 spin_unlock(&dentry->d_lock);
899                                 if (parent)
900                                         spin_unlock(&parent->d_lock);
901                                 cpu_relax();
902                                 continue;
903                         }
904                         __dentry_kill(dentry);
905                         dentry = parent;
906                 }
907         }
908 }
909 
910 static enum lru_status
911 dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
912 {
913         struct list_head *freeable = arg;
914         struct dentry   *dentry = container_of(item, struct dentry, d_lru);
915 
916 
917         /*
918          * we are inverting the lru lock/dentry->d_lock here,
919          * so use a trylock. If we fail to get the lock, just skip
920          * it
921          */
922         if (!spin_trylock(&dentry->d_lock))
923                 return LRU_SKIP;
924 
925         /*
926          * Referenced dentries are still in use. If they have active
927          * counts, just remove them from the LRU. Otherwise give them
928          * another pass through the LRU.
929          */
930         if (dentry->d_lockref.count) {
931                 d_lru_isolate(dentry);
932                 spin_unlock(&dentry->d_lock);
933                 return LRU_REMOVED;
934         }
935 
936         if (dentry->d_flags & DCACHE_REFERENCED) {
937                 dentry->d_flags &= ~DCACHE_REFERENCED;
938                 spin_unlock(&dentry->d_lock);
939 
940                 /*
941                  * The list move itself will be made by the common LRU code. At
942                  * this point, we've dropped the dentry->d_lock but keep the
943                  * lru lock. This is safe to do, since every list movement is
944                  * protected by the lru lock even if both locks are held.
945                  *
946                  * This is guaranteed by the fact that all LRU management
947                  * functions are intermediated by the LRU API calls like
948                  * list_lru_add and list_lru_del. List movement in this file
949                  * only ever occur through this functions or through callbacks
950                  * like this one, that are called from the LRU API.
951                  *
952                  * The only exceptions to this are functions like
953                  * shrink_dentry_list, and code that first checks for the
954                  * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
955                  * operating only with stack provided lists after they are
956                  * properly isolated from the main list.  It is thus, always a
957                  * local access.
958                  */
959                 return LRU_ROTATE;
960         }
961 
962         d_lru_shrink_move(dentry, freeable);
963         spin_unlock(&dentry->d_lock);
964 
965         return LRU_REMOVED;
966 }
967 
968 /**
969  * prune_dcache_sb - shrink the dcache
970  * @sb: superblock
971  * @nr_to_scan : number of entries to try to free
972  * @nid: which node to scan for freeable entities
973  *
974  * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
975  * done when we need more memory an called from the superblock shrinker
976  * function.
977  *
978  * This function may fail to free any resources if all the dentries are in
979  * use.
980  */
981 long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
982                      int nid)
983 {
984         LIST_HEAD(dispose);
985         long freed;
986 
987         freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
988                                        &dispose, &nr_to_scan);
989         shrink_dentry_list(&dispose);
990         return freed;
991 }
992 
993 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
994                                                 spinlock_t *lru_lock, void *arg)
995 {
996         struct list_head *freeable = arg;
997         struct dentry   *dentry = container_of(item, struct dentry, d_lru);
998 
999         /*
1000          * we are inverting the lru lock/dentry->d_lock here,
1001          * so use a trylock. If we fail to get the lock, just skip
1002          * it
1003          */
1004         if (!spin_trylock(&dentry->d_lock))
1005                 return LRU_SKIP;
1006 
1007         d_lru_shrink_move(dentry, freeable);
1008         spin_unlock(&dentry->d_lock);
1009 
1010         return LRU_REMOVED;
1011 }
1012 
1013 
1014 /**
1015  * shrink_dcache_sb - shrink dcache for a superblock
1016  * @sb: superblock
1017  *
1018  * Shrink the dcache for the specified super block. This is used to free
1019  * the dcache before unmounting a file system.
1020  */
1021 void shrink_dcache_sb(struct super_block *sb)
1022 {
1023         long freed;
1024 
1025         do {
1026                 LIST_HEAD(dispose);
1027 
1028                 freed = list_lru_walk(&sb->s_dentry_lru,
1029                         dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1030 
1031                 this_cpu_sub(nr_dentry_unused, freed);
1032                 shrink_dentry_list(&dispose);
1033         } while (freed > 0);
1034 }
1035 EXPORT_SYMBOL(shrink_dcache_sb);
1036 
1037 /**
1038  * enum d_walk_ret - action to talke during tree walk
1039  * @D_WALK_CONTINUE:    contrinue walk
1040  * @D_WALK_QUIT:        quit walk
1041  * @D_WALK_NORETRY:     quit when retry is needed
1042  * @D_WALK_SKIP:        skip this dentry and its children
1043  */
1044 enum d_walk_ret {
1045         D_WALK_CONTINUE,
1046         D_WALK_QUIT,
1047         D_WALK_NORETRY,
1048         D_WALK_SKIP,
1049 };
1050 
1051 /**
1052  * d_walk - walk the dentry tree
1053  * @parent:     start of walk
1054  * @data:       data passed to @enter() and @finish()
1055  * @enter:      callback when first entering the dentry
1056  * @finish:     callback when successfully finished the walk
1057  *
1058  * The @enter() and @finish() callbacks are called with d_lock held.
1059  */
1060 static void d_walk(struct dentry *parent, void *data,
1061                    enum d_walk_ret (*enter)(void *, struct dentry *),
1062                    void (*finish)(void *))
1063 {
1064         struct dentry *this_parent;
1065         struct list_head *next;
1066         unsigned seq = 0;
1067         enum d_walk_ret ret;
1068         bool retry = true;
1069 
1070 again:
1071         read_seqbegin_or_lock(&rename_lock, &seq);
1072         this_parent = parent;
1073         spin_lock(&this_parent->d_lock);
1074 
1075         ret = enter(data, this_parent);
1076         switch (ret) {
1077         case D_WALK_CONTINUE:
1078                 break;
1079         case D_WALK_QUIT:
1080         case D_WALK_SKIP:
1081                 goto out_unlock;
1082         case D_WALK_NORETRY:
1083                 retry = false;
1084                 break;
1085         }
1086 repeat:
1087         next = this_parent->d_subdirs.next;
1088 resume:
1089         while (next != &this_parent->d_subdirs) {
1090                 struct list_head *tmp = next;
1091                 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1092                 next = tmp->next;
1093 
1094                 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1095 
1096                 ret = enter(data, dentry);
1097                 switch (ret) {
1098                 case D_WALK_CONTINUE:
1099                         break;
1100                 case D_WALK_QUIT:
1101                         spin_unlock(&dentry->d_lock);
1102                         goto out_unlock;
1103                 case D_WALK_NORETRY:
1104                         retry = false;
1105                         break;
1106                 case D_WALK_SKIP:
1107                         spin_unlock(&dentry->d_lock);
1108                         continue;
1109                 }
1110 
1111                 if (!list_empty(&dentry->d_subdirs)) {
1112                         spin_unlock(&this_parent->d_lock);
1113                         spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1114                         this_parent = dentry;
1115                         spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1116                         goto repeat;
1117                 }
1118                 spin_unlock(&dentry->d_lock);
1119         }
1120         /*
1121          * All done at this level ... ascend and resume the search.
1122          */
1123         if (this_parent != parent) {
1124                 struct dentry *child = this_parent;
1125                 this_parent = child->d_parent;
1126 
1127                 rcu_read_lock();
1128                 spin_unlock(&child->d_lock);
1129                 spin_lock(&this_parent->d_lock);
1130 
1131                 /*
1132                  * might go back up the wrong parent if we have had a rename
1133                  * or deletion
1134                  */
1135                 if (this_parent != child->d_parent ||
1136                          (child->d_flags & DCACHE_DENTRY_KILLED) ||
1137                          need_seqretry(&rename_lock, seq)) {
1138                         spin_unlock(&this_parent->d_lock);
1139                         rcu_read_unlock();
1140                         goto rename_retry;
1141                 }
1142                 rcu_read_unlock();
1143                 next = child->d_u.d_child.next;
1144                 goto resume;
1145         }
1146         if (need_seqretry(&rename_lock, seq)) {
1147                 spin_unlock(&this_parent->d_lock);
1148                 goto rename_retry;
1149         }
1150         if (finish)
1151                 finish(data);
1152 
1153 out_unlock:
1154         spin_unlock(&this_parent->d_lock);
1155         done_seqretry(&rename_lock, seq);
1156         return;
1157 
1158 rename_retry:
1159         if (!retry)
1160                 return;
1161         seq = 1;
1162         goto again;
1163 }
1164 
1165 /*
1166  * Search for at least 1 mount point in the dentry's subdirs.
1167  * We descend to the next level whenever the d_subdirs
1168  * list is non-empty and continue searching.
1169  */
1170 
1171 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1172 {
1173         int *ret = data;
1174         if (d_mountpoint(dentry)) {
1175                 *ret = 1;
1176                 return D_WALK_QUIT;
1177         }
1178         return D_WALK_CONTINUE;
1179 }
1180 
1181 /**
1182  * have_submounts - check for mounts over a dentry
1183  * @parent: dentry to check.
1184  *
1185  * Return true if the parent or its subdirectories contain
1186  * a mount point
1187  */
1188 int have_submounts(struct dentry *parent)
1189 {
1190         int ret = 0;
1191 
1192         d_walk(parent, &ret, check_mount, NULL);
1193 
1194         return ret;
1195 }
1196 EXPORT_SYMBOL(have_submounts);
1197 
1198 /*
1199  * Called by mount code to set a mountpoint and check if the mountpoint is
1200  * reachable (e.g. NFS can unhash a directory dentry and then the complete
1201  * subtree can become unreachable).
1202  *
1203  * Only one of check_submounts_and_drop() and d_set_mounted() must succeed.  For
1204  * this reason take rename_lock and d_lock on dentry and ancestors.
1205  */
1206 int d_set_mounted(struct dentry *dentry)
1207 {
1208         struct dentry *p;
1209         int ret = -ENOENT;
1210         write_seqlock(&rename_lock);
1211         for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1212                 /* Need exclusion wrt. check_submounts_and_drop() */
1213                 spin_lock(&p->d_lock);
1214                 if (unlikely(d_unhashed(p))) {
1215                         spin_unlock(&p->d_lock);
1216                         goto out;
1217                 }
1218                 spin_unlock(&p->d_lock);
1219         }
1220         spin_lock(&dentry->d_lock);
1221         if (!d_unlinked(dentry)) {
1222                 dentry->d_flags |= DCACHE_MOUNTED;
1223                 ret = 0;
1224         }
1225         spin_unlock(&dentry->d_lock);
1226 out:
1227         write_sequnlock(&rename_lock);
1228         return ret;
1229 }
1230 
1231 /*
1232  * Search the dentry child list of the specified parent,
1233  * and move any unused dentries to the end of the unused
1234  * list for prune_dcache(). We descend to the next level
1235  * whenever the d_subdirs list is non-empty and continue
1236  * searching.
1237  *
1238  * It returns zero iff there are no unused children,
1239  * otherwise  it returns the number of children moved to
1240  * the end of the unused list. This may not be the total
1241  * number of unused children, because select_parent can
1242  * drop the lock and return early due to latency
1243  * constraints.
1244  */
1245 
1246 struct select_data {
1247         struct dentry *start;
1248         struct list_head dispose;
1249         int found;
1250 };
1251 
1252 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1253 {
1254         struct select_data *data = _data;
1255         enum d_walk_ret ret = D_WALK_CONTINUE;
1256 
1257         if (data->start == dentry)
1258                 goto out;
1259 
1260         if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1261                 data->found++;
1262         } else {
1263                 if (dentry->d_flags & DCACHE_LRU_LIST)
1264                         d_lru_del(dentry);
1265                 if (!dentry->d_lockref.count) {
1266                         d_shrink_add(dentry, &data->dispose);
1267                         data->found++;
1268                 }
1269         }
1270         /*
1271          * We can return to the caller if we have found some (this
1272          * ensures forward progress). We'll be coming back to find
1273          * the rest.
1274          */
1275         if (!list_empty(&data->dispose))
1276                 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1277 out:
1278         return ret;
1279 }
1280 
1281 /**
1282  * shrink_dcache_parent - prune dcache
1283  * @parent: parent of entries to prune
1284  *
1285  * Prune the dcache to remove unused children of the parent dentry.
1286  */
1287 void shrink_dcache_parent(struct dentry *parent)
1288 {
1289         for (;;) {
1290                 struct select_data data;
1291 
1292                 INIT_LIST_HEAD(&data.dispose);
1293                 data.start = parent;
1294                 data.found = 0;
1295 
1296                 d_walk(parent, &data, select_collect, NULL);
1297                 if (!data.found)
1298                         break;
1299 
1300                 shrink_dentry_list(&data.dispose);
1301                 cond_resched();
1302         }
1303 }
1304 EXPORT_SYMBOL(shrink_dcache_parent);
1305 
1306 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1307 {
1308         /* it has busy descendents; complain about those instead */
1309         if (!list_empty(&dentry->d_subdirs))
1310                 return D_WALK_CONTINUE;
1311 
1312         /* root with refcount 1 is fine */
1313         if (dentry == _data && dentry->d_lockref.count == 1)
1314                 return D_WALK_CONTINUE;
1315 
1316         printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1317                         " still in use (%d) [unmount of %s %s]\n",
1318                        dentry,
1319                        dentry->d_inode ?
1320                        dentry->d_inode->i_ino : 0UL,
1321                        dentry,
1322                        dentry->d_lockref.count,
1323                        dentry->d_sb->s_type->name,
1324                        dentry->d_sb->s_id);
1325         WARN_ON(1);
1326         return D_WALK_CONTINUE;
1327 }
1328 
1329 static void do_one_tree(struct dentry *dentry)
1330 {
1331         shrink_dcache_parent(dentry);
1332         d_walk(dentry, dentry, umount_check, NULL);
1333         d_drop(dentry);
1334         dput(dentry);
1335 }
1336 
1337 /*
1338  * destroy the dentries attached to a superblock on unmounting
1339  */
1340 void shrink_dcache_for_umount(struct super_block *sb)
1341 {
1342         struct dentry *dentry;
1343 
1344         WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1345 
1346         dentry = sb->s_root;
1347         sb->s_root = NULL;
1348         do_one_tree(dentry);
1349 
1350         while (!hlist_bl_empty(&sb->s_anon)) {
1351                 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1352                 do_one_tree(dentry);
1353         }
1354 }
1355 
1356 static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
1357 {
1358         struct select_data *data = _data;
1359 
1360         if (d_mountpoint(dentry)) {
1361                 data->found = -EBUSY;
1362                 return D_WALK_QUIT;
1363         }
1364 
1365         return select_collect(_data, dentry);
1366 }
1367 
1368 static void check_and_drop(void *_data)
1369 {
1370         struct select_data *data = _data;
1371 
1372         if (d_mountpoint(data->start))
1373                 data->found = -EBUSY;
1374         if (!data->found)
1375                 __d_drop(data->start);
1376 }
1377 
1378 /**
1379  * check_submounts_and_drop - prune dcache, check for submounts and drop
1380  *
1381  * All done as a single atomic operation relative to has_unlinked_ancestor().
1382  * Returns 0 if successfully unhashed @parent.  If there were submounts then
1383  * return -EBUSY.
1384  *
1385  * @dentry: dentry to prune and drop
1386  */
1387 int check_submounts_and_drop(struct dentry *dentry)
1388 {
1389         int ret = 0;
1390 
1391         /* Negative dentries can be dropped without further checks */
1392         if (!dentry->d_inode) {
1393                 d_drop(dentry);
1394                 goto out;
1395         }
1396 
1397         for (;;) {
1398                 struct select_data data;
1399 
1400                 INIT_LIST_HEAD(&data.dispose);
1401                 data.start = dentry;
1402                 data.found = 0;
1403 
1404                 d_walk(dentry, &data, check_and_collect, check_and_drop);
1405                 ret = data.found;
1406 
1407                 if (!list_empty(&data.dispose))
1408                         shrink_dentry_list(&data.dispose);
1409 
1410                 if (ret <= 0)
1411                         break;
1412 
1413                 cond_resched();
1414         }
1415 
1416 out:
1417         return ret;
1418 }
1419 EXPORT_SYMBOL(check_submounts_and_drop);
1420 
1421 /**
1422  * __d_alloc    -       allocate a dcache entry
1423  * @sb: filesystem it will belong to
1424  * @name: qstr of the name
1425  *
1426  * Allocates a dentry. It returns %NULL if there is insufficient memory
1427  * available. On a success the dentry is returned. The name passed in is
1428  * copied and the copy passed in may be reused after this call.
1429  */
1430  
1431 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1432 {
1433         struct dentry *dentry;
1434         char *dname;
1435 
1436         dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1437         if (!dentry)
1438                 return NULL;
1439 
1440         /*
1441          * We guarantee that the inline name is always NUL-terminated.
1442          * This way the memcpy() done by the name switching in rename
1443          * will still always have a NUL at the end, even if we might
1444          * be overwriting an internal NUL character
1445          */
1446         dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1447         if (name->len > DNAME_INLINE_LEN-1) {
1448                 dname = kmalloc(name->len + 1, GFP_KERNEL);
1449                 if (!dname) {
1450                         kmem_cache_free(dentry_cache, dentry); 
1451                         return NULL;
1452                 }
1453         } else  {
1454                 dname = dentry->d_iname;
1455         }       
1456 
1457         dentry->d_name.len = name->len;
1458         dentry->d_name.hash = name->hash;
1459         memcpy(dname, name->name, name->len);
1460         dname[name->len] = 0;
1461 
1462         /* Make sure we always see the terminating NUL character */
1463         smp_wmb();
1464         dentry->d_name.name = dname;
1465 
1466         dentry->d_lockref.count = 1;
1467         dentry->d_flags = 0;
1468         spin_lock_init(&dentry->d_lock);
1469         seqcount_init(&dentry->d_seq);
1470         dentry->d_inode = NULL;
1471         dentry->d_parent = dentry;
1472         dentry->d_sb = sb;
1473         dentry->d_op = NULL;
1474         dentry->d_fsdata = NULL;
1475         INIT_HLIST_BL_NODE(&dentry->d_hash);
1476         INIT_LIST_HEAD(&dentry->d_lru);
1477         INIT_LIST_HEAD(&dentry->d_subdirs);
1478         INIT_HLIST_NODE(&dentry->d_alias);
1479         INIT_LIST_HEAD(&dentry->d_u.d_child);
1480         d_set_d_op(dentry, dentry->d_sb->s_d_op);
1481 
1482         this_cpu_inc(nr_dentry);
1483 
1484         return dentry;
1485 }
1486 
1487 /**
1488  * d_alloc      -       allocate a dcache entry
1489  * @parent: parent of entry to allocate
1490  * @name: qstr of the name
1491  *
1492  * Allocates a dentry. It returns %NULL if there is insufficient memory
1493  * available. On a success the dentry is returned. The name passed in is
1494  * copied and the copy passed in may be reused after this call.
1495  */
1496 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1497 {
1498         struct dentry *dentry = __d_alloc(parent->d_sb, name);
1499         if (!dentry)
1500                 return NULL;
1501 
1502         spin_lock(&parent->d_lock);
1503         /*
1504          * don't need child lock because it is not subject
1505          * to concurrency here
1506          */
1507         __dget_dlock(parent);
1508         dentry->d_parent = parent;
1509         list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1510         spin_unlock(&parent->d_lock);
1511 
1512         return dentry;
1513 }
1514 EXPORT_SYMBOL(d_alloc);
1515 
1516 /**
1517  * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1518  * @sb: the superblock
1519  * @name: qstr of the name
1520  *
1521  * For a filesystem that just pins its dentries in memory and never
1522  * performs lookups at all, return an unhashed IS_ROOT dentry.
1523  */
1524 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1525 {
1526         return __d_alloc(sb, name);
1527 }
1528 EXPORT_SYMBOL(d_alloc_pseudo);
1529 
1530 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1531 {
1532         struct qstr q;
1533 
1534         q.name = name;
1535         q.len = strlen(name);
1536         q.hash = full_name_hash(q.name, q.len);
1537         return d_alloc(parent, &q);
1538 }
1539 EXPORT_SYMBOL(d_alloc_name);
1540 
1541 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1542 {
1543         WARN_ON_ONCE(dentry->d_op);
1544         WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH  |
1545                                 DCACHE_OP_COMPARE       |
1546                                 DCACHE_OP_REVALIDATE    |
1547                                 DCACHE_OP_WEAK_REVALIDATE       |
1548                                 DCACHE_OP_DELETE ));
1549         dentry->d_op = op;
1550         if (!op)
1551                 return;
1552         if (op->d_hash)
1553                 dentry->d_flags |= DCACHE_OP_HASH;
1554         if (op->d_compare)
1555                 dentry->d_flags |= DCACHE_OP_COMPARE;
1556         if (op->d_revalidate)
1557                 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1558         if (op->d_weak_revalidate)
1559                 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1560         if (op->d_delete)
1561                 dentry->d_flags |= DCACHE_OP_DELETE;
1562         if (op->d_prune)
1563                 dentry->d_flags |= DCACHE_OP_PRUNE;
1564 
1565 }
1566 EXPORT_SYMBOL(d_set_d_op);
1567 
1568 static unsigned d_flags_for_inode(struct inode *inode)
1569 {
1570         unsigned add_flags = DCACHE_FILE_TYPE;
1571 
1572         if (!inode)
1573                 return DCACHE_MISS_TYPE;
1574 
1575         if (S_ISDIR(inode->i_mode)) {
1576                 add_flags = DCACHE_DIRECTORY_TYPE;
1577                 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1578                         if (unlikely(!inode->i_op->lookup))
1579                                 add_flags = DCACHE_AUTODIR_TYPE;
1580                         else
1581                                 inode->i_opflags |= IOP_LOOKUP;
1582                 }
1583         } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1584                 if (unlikely(inode->i_op->follow_link))
1585                         add_flags = DCACHE_SYMLINK_TYPE;
1586                 else
1587                         inode->i_opflags |= IOP_NOFOLLOW;
1588         }
1589 
1590         if (unlikely(IS_AUTOMOUNT(inode)))
1591                 add_flags |= DCACHE_NEED_AUTOMOUNT;
1592         return add_flags;
1593 }
1594 
1595 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1596 {
1597         unsigned add_flags = d_flags_for_inode(inode);
1598 
1599         spin_lock(&dentry->d_lock);
1600         __d_set_type(dentry, add_flags);
1601         if (inode)
1602                 hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1603         dentry->d_inode = inode;
1604         dentry_rcuwalk_barrier(dentry);
1605         spin_unlock(&dentry->d_lock);
1606         fsnotify_d_instantiate(dentry, inode);
1607 }
1608 
1609 /**
1610  * d_instantiate - fill in inode information for a dentry
1611  * @entry: dentry to complete
1612  * @inode: inode to attach to this dentry
1613  *
1614  * Fill in inode information in the entry.
1615  *
1616  * This turns negative dentries into productive full members
1617  * of society.
1618  *
1619  * NOTE! This assumes that the inode count has been incremented
1620  * (or otherwise set) by the caller to indicate that it is now
1621  * in use by the dcache.
1622  */
1623  
1624 void d_instantiate(struct dentry *entry, struct inode * inode)
1625 {
1626         BUG_ON(!hlist_unhashed(&entry->d_alias));
1627         if (inode)
1628                 spin_lock(&inode->i_lock);
1629         __d_instantiate(entry, inode);
1630         if (inode)
1631                 spin_unlock(&inode->i_lock);
1632         security_d_instantiate(entry, inode);
1633 }
1634 EXPORT_SYMBOL(d_instantiate);
1635 
1636 /**
1637  * d_instantiate_unique - instantiate a non-aliased dentry
1638  * @entry: dentry to instantiate
1639  * @inode: inode to attach to this dentry
1640  *
1641  * Fill in inode information in the entry. On success, it returns NULL.
1642  * If an unhashed alias of "entry" already exists, then we return the
1643  * aliased dentry instead and drop one reference to inode.
1644  *
1645  * Note that in order to avoid conflicts with rename() etc, the caller
1646  * had better be holding the parent directory semaphore.
1647  *
1648  * This also assumes that the inode count has been incremented
1649  * (or otherwise set) by the caller to indicate that it is now
1650  * in use by the dcache.
1651  */
1652 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1653                                              struct inode *inode)
1654 {
1655         struct dentry *alias;
1656         int len = entry->d_name.len;
1657         const char *name = entry->d_name.name;
1658         unsigned int hash = entry->d_name.hash;
1659 
1660         if (!inode) {
1661                 __d_instantiate(entry, NULL);
1662                 return NULL;
1663         }
1664 
1665         hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1666                 /*
1667                  * Don't need alias->d_lock here, because aliases with
1668                  * d_parent == entry->d_parent are not subject to name or
1669                  * parent changes, because the parent inode i_mutex is held.
1670                  */
1671                 if (alias->d_name.hash != hash)
1672                         continue;
1673                 if (alias->d_parent != entry->d_parent)
1674                         continue;
1675                 if (alias->d_name.len != len)
1676                         continue;
1677                 if (dentry_cmp(alias, name, len))
1678                         continue;
1679                 __dget(alias);
1680                 return alias;
1681         }
1682 
1683         __d_instantiate(entry, inode);
1684         return NULL;
1685 }
1686 
1687 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1688 {
1689         struct dentry *result;
1690 
1691         BUG_ON(!hlist_unhashed(&entry->d_alias));
1692 
1693         if (inode)
1694                 spin_lock(&inode->i_lock);
1695         result = __d_instantiate_unique(entry, inode);
1696         if (inode)
1697                 spin_unlock(&inode->i_lock);
1698 
1699         if (!result) {
1700                 security_d_instantiate(entry, inode);
1701                 return NULL;
1702         }
1703 
1704         BUG_ON(!d_unhashed(result));
1705         iput(inode);
1706         return result;
1707 }
1708 
1709 EXPORT_SYMBOL(d_instantiate_unique);
1710 
1711 /**
1712  * d_instantiate_no_diralias - instantiate a non-aliased dentry
1713  * @entry: dentry to complete
1714  * @inode: inode to attach to this dentry
1715  *
1716  * Fill in inode information in the entry.  If a directory alias is found, then
1717  * return an error (and drop inode).  Together with d_materialise_unique() this
1718  * guarantees that a directory inode may never have more than one alias.
1719  */
1720 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1721 {
1722         BUG_ON(!hlist_unhashed(&entry->d_alias));
1723 
1724         spin_lock(&inode->i_lock);
1725         if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1726                 spin_unlock(&inode->i_lock);
1727                 iput(inode);
1728                 return -EBUSY;
1729         }
1730         __d_instantiate(entry, inode);
1731         spin_unlock(&inode->i_lock);
1732         security_d_instantiate(entry, inode);
1733 
1734         return 0;
1735 }
1736 EXPORT_SYMBOL(d_instantiate_no_diralias);
1737 
1738 struct dentry *d_make_root(struct inode *root_inode)
1739 {
1740         struct dentry *res = NULL;
1741 
1742         if (root_inode) {
1743                 static const struct qstr name = QSTR_INIT("/", 1);
1744 
1745                 res = __d_alloc(root_inode->i_sb, &name);
1746                 if (res)
1747                         d_instantiate(res, root_inode);
1748                 else
1749                         iput(root_inode);
1750         }
1751         return res;
1752 }
1753 EXPORT_SYMBOL(d_make_root);
1754 
1755 static struct dentry * __d_find_any_alias(struct inode *inode)
1756 {
1757         struct dentry *alias;
1758 
1759         if (hlist_empty(&inode->i_dentry))
1760                 return NULL;
1761         alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1762         __dget(alias);
1763         return alias;
1764 }
1765 
1766 /**
1767  * d_find_any_alias - find any alias for a given inode
1768  * @inode: inode to find an alias for
1769  *
1770  * If any aliases exist for the given inode, take and return a
1771  * reference for one of them.  If no aliases exist, return %NULL.
1772  */
1773 struct dentry *d_find_any_alias(struct inode *inode)
1774 {
1775         struct dentry *de;
1776 
1777         spin_lock(&inode->i_lock);
1778         de = __d_find_any_alias(inode);
1779         spin_unlock(&inode->i_lock);
1780         return de;
1781 }
1782 EXPORT_SYMBOL(d_find_any_alias);
1783 
1784 /**
1785  * d_obtain_alias - find or allocate a dentry for a given inode
1786  * @inode: inode to allocate the dentry for
1787  *
1788  * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1789  * similar open by handle operations.  The returned dentry may be anonymous,
1790  * or may have a full name (if the inode was already in the cache).
1791  *
1792  * When called on a directory inode, we must ensure that the inode only ever
1793  * has one dentry.  If a dentry is found, that is returned instead of
1794  * allocating a new one.
1795  *
1796  * On successful return, the reference to the inode has been transferred
1797  * to the dentry.  In case of an error the reference on the inode is released.
1798  * To make it easier to use in export operations a %NULL or IS_ERR inode may
1799  * be passed in and will be the error will be propagate to the return value,
1800  * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1801  */
1802 struct dentry *d_obtain_alias(struct inode *inode)
1803 {
1804         static const struct qstr anonstring = QSTR_INIT("/", 1);
1805         struct dentry *tmp;
1806         struct dentry *res;
1807         unsigned add_flags;
1808 
1809         if (!inode)
1810                 return ERR_PTR(-ESTALE);
1811         if (IS_ERR(inode))
1812                 return ERR_CAST(inode);
1813 
1814         res = d_find_any_alias(inode);
1815         if (res)
1816                 goto out_iput;
1817 
1818         tmp = __d_alloc(inode->i_sb, &anonstring);
1819         if (!tmp) {
1820                 res = ERR_PTR(-ENOMEM);
1821                 goto out_iput;
1822         }
1823 
1824         spin_lock(&inode->i_lock);
1825         res = __d_find_any_alias(inode);
1826         if (res) {
1827                 spin_unlock(&inode->i_lock);
1828                 dput(tmp);
1829                 goto out_iput;
1830         }
1831 
1832         /* attach a disconnected dentry */
1833         add_flags = d_flags_for_inode(inode) | DCACHE_DISCONNECTED;
1834 
1835         spin_lock(&tmp->d_lock);
1836         tmp->d_inode = inode;
1837         tmp->d_flags |= add_flags;
1838         hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1839         hlist_bl_lock(&tmp->d_sb->s_anon);
1840         hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1841         hlist_bl_unlock(&tmp->d_sb->s_anon);
1842         spin_unlock(&tmp->d_lock);
1843         spin_unlock(&inode->i_lock);
1844         security_d_instantiate(tmp, inode);
1845 
1846         return tmp;
1847 
1848  out_iput:
1849         if (res && !IS_ERR(res))
1850                 security_d_instantiate(res, inode);
1851         iput(inode);
1852         return res;
1853 }
1854 EXPORT_SYMBOL(d_obtain_alias);
1855 
1856 /**
1857  * d_splice_alias - splice a disconnected dentry into the tree if one exists
1858  * @inode:  the inode which may have a disconnected dentry
1859  * @dentry: a negative dentry which we want to point to the inode.
1860  *
1861  * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1862  * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1863  * and return it, else simply d_add the inode to the dentry and return NULL.
1864  *
1865  * This is needed in the lookup routine of any filesystem that is exportable
1866  * (via knfsd) so that we can build dcache paths to directories effectively.
1867  *
1868  * If a dentry was found and moved, then it is returned.  Otherwise NULL
1869  * is returned.  This matches the expected return value of ->lookup.
1870  *
1871  * Cluster filesystems may call this function with a negative, hashed dentry.
1872  * In that case, we know that the inode will be a regular file, and also this
1873  * will only occur during atomic_open. So we need to check for the dentry
1874  * being already hashed only in the final case.
1875  */
1876 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1877 {
1878         struct dentry *new = NULL;
1879 
1880         if (IS_ERR(inode))
1881                 return ERR_CAST(inode);
1882 
1883         if (inode && S_ISDIR(inode->i_mode)) {
1884                 spin_lock(&inode->i_lock);
1885                 new = __d_find_alias(inode, 1);
1886                 if (new) {
1887                         BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1888                         spin_unlock(&inode->i_lock);
1889                         security_d_instantiate(new, inode);
1890                         d_move(new, dentry);
1891                         iput(inode);
1892                 } else {
1893                         /* already taking inode->i_lock, so d_add() by hand */
1894                         __d_instantiate(dentry, inode);
1895                         spin_unlock(&inode->i_lock);
1896                         security_d_instantiate(dentry, inode);
1897                         d_rehash(dentry);
1898                 }
1899         } else {
1900                 d_instantiate(dentry, inode);
1901                 if (d_unhashed(dentry))
1902                         d_rehash(dentry);
1903         }
1904         return new;
1905 }
1906 EXPORT_SYMBOL(d_splice_alias);
1907 
1908 /**
1909  * d_add_ci - lookup or allocate new dentry with case-exact name
1910  * @inode:  the inode case-insensitive lookup has found
1911  * @dentry: the negative dentry that was passed to the parent's lookup func
1912  * @name:   the case-exact name to be associated with the returned dentry
1913  *
1914  * This is to avoid filling the dcache with case-insensitive names to the
1915  * same inode, only the actual correct case is stored in the dcache for
1916  * case-insensitive filesystems.
1917  *
1918  * For a case-insensitive lookup match and if the the case-exact dentry
1919  * already exists in in the dcache, use it and return it.
1920  *
1921  * If no entry exists with the exact case name, allocate new dentry with
1922  * the exact case, and return the spliced entry.
1923  */
1924 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1925                         struct qstr *name)
1926 {
1927         struct dentry *found;
1928         struct dentry *new;
1929 
1930         /*
1931          * First check if a dentry matching the name already exists,
1932          * if not go ahead and create it now.
1933          */
1934         found = d_hash_and_lookup(dentry->d_parent, name);
1935         if (unlikely(IS_ERR(found)))
1936                 goto err_out;
1937         if (!found) {
1938                 new = d_alloc(dentry->d_parent, name);
1939                 if (!new) {
1940                         found = ERR_PTR(-ENOMEM);
1941                         goto err_out;
1942                 }
1943 
1944                 found = d_splice_alias(inode, new);
1945                 if (found) {
1946                         dput(new);
1947                         return found;
1948                 }
1949                 return new;
1950         }
1951 
1952         /*
1953          * If a matching dentry exists, and it's not negative use it.
1954          *
1955          * Decrement the reference count to balance the iget() done
1956          * earlier on.
1957          */
1958         if (found->d_inode) {
1959                 if (unlikely(found->d_inode != inode)) {
1960                         /* This can't happen because bad inodes are unhashed. */
1961                         BUG_ON(!is_bad_inode(inode));
1962                         BUG_ON(!is_bad_inode(found->d_inode));
1963                 }
1964                 iput(inode);
1965                 return found;
1966         }
1967 
1968         /*
1969          * Negative dentry: instantiate it unless the inode is a directory and
1970          * already has a dentry.
1971          */
1972         new = d_splice_alias(inode, found);
1973         if (new) {
1974                 dput(found);
1975                 found = new;
1976         }
1977         return found;
1978 
1979 err_out:
1980         iput(inode);
1981         return found;
1982 }
1983 EXPORT_SYMBOL(d_add_ci);
1984 
1985 /*
1986  * Do the slow-case of the dentry name compare.
1987  *
1988  * Unlike the dentry_cmp() function, we need to atomically
1989  * load the name and length information, so that the
1990  * filesystem can rely on them, and can use the 'name' and
1991  * 'len' information without worrying about walking off the
1992  * end of memory etc.
1993  *
1994  * Thus the read_seqcount_retry() and the "duplicate" info
1995  * in arguments (the low-level filesystem should not look
1996  * at the dentry inode or name contents directly, since
1997  * rename can change them while we're in RCU mode).
1998  */
1999 enum slow_d_compare {
2000         D_COMP_OK,
2001         D_COMP_NOMATCH,
2002         D_COMP_SEQRETRY,
2003 };
2004 
2005 static noinline enum slow_d_compare slow_dentry_cmp(
2006                 const struct dentry *parent,
2007                 struct dentry *dentry,
2008                 unsigned int seq,
2009                 const struct qstr *name)
2010 {
2011         int tlen = dentry->d_name.len;
2012         const char *tname = dentry->d_name.name;
2013 
2014         if (read_seqcount_retry(&dentry->d_seq, seq)) {
2015                 cpu_relax();
2016                 return D_COMP_SEQRETRY;
2017         }
2018         if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2019                 return D_COMP_NOMATCH;
2020         return D_COMP_OK;
2021 }
2022 
2023 /**
2024  * __d_lookup_rcu - search for a dentry (racy, store-free)
2025  * @parent: parent dentry
2026  * @name: qstr of name we wish to find
2027  * @seqp: returns d_seq value at the point where the dentry was found
2028  * Returns: dentry, or NULL
2029  *
2030  * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2031  * resolution (store-free path walking) design described in
2032  * Documentation/filesystems/path-lookup.txt.
2033  *
2034  * This is not to be used outside core vfs.
2035  *
2036  * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2037  * held, and rcu_read_lock held. The returned dentry must not be stored into
2038  * without taking d_lock and checking d_seq sequence count against @seq
2039  * returned here.
2040  *
2041  * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2042  * function.
2043  *
2044  * Alternatively, __d_lookup_rcu may be called again to look up the child of
2045  * the returned dentry, so long as its parent's seqlock is checked after the
2046  * child is looked up. Thus, an interlocking stepping of sequence lock checks
2047  * is formed, giving integrity down the path walk.
2048  *
2049  * NOTE! The caller *has* to check the resulting dentry against the sequence
2050  * number we've returned before using any of the resulting dentry state!
2051  */
2052 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2053                                 const struct qstr *name,
2054                                 unsigned *seqp)
2055 {
2056         u64 hashlen = name->hash_len;
2057         const unsigned char *str = name->name;
2058         struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2059         struct hlist_bl_node *node;
2060         struct dentry *dentry;
2061 
2062         /*
2063          * Note: There is significant duplication with __d_lookup_rcu which is
2064          * required to prevent single threaded performance regressions
2065          * especially on architectures where smp_rmb (in seqcounts) are costly.
2066          * Keep the two functions in sync.
2067          */
2068 
2069         /*
2070          * The hash list is protected using RCU.
2071          *
2072          * Carefully use d_seq when comparing a candidate dentry, to avoid
2073          * races with d_move().
2074          *
2075          * It is possible that concurrent renames can mess up our list
2076          * walk here and result in missing our dentry, resulting in the
2077          * false-negative result. d_lookup() protects against concurrent
2078          * renames using rename_lock seqlock.
2079          *
2080          * See Documentation/filesystems/path-lookup.txt for more details.
2081          */
2082         hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2083                 unsigned seq;
2084 
2085 seqretry:
2086                 /*
2087                  * The dentry sequence count protects us from concurrent
2088                  * renames, and thus protects parent and name fields.
2089                  *
2090                  * The caller must perform a seqcount check in order
2091                  * to do anything useful with the returned dentry.
2092                  *
2093                  * NOTE! We do a "raw" seqcount_begin here. That means that
2094                  * we don't wait for the sequence count to stabilize if it
2095                  * is in the middle of a sequence change. If we do the slow
2096                  * dentry compare, we will do seqretries until it is stable,
2097                  * and if we end up with a successful lookup, we actually
2098                  * want to exit RCU lookup anyway.
2099                  */
2100                 seq = raw_seqcount_begin(&dentry->d_seq);
2101                 if (dentry->d_parent != parent)
2102                         continue;
2103                 if (d_unhashed(dentry))
2104                         continue;
2105 
2106                 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2107                         if (dentry->d_name.hash != hashlen_hash(hashlen))
2108                                 continue;
2109                         *seqp = seq;
2110                         switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2111                         case D_COMP_OK:
2112                                 return dentry;
2113                         case D_COMP_NOMATCH:
2114                                 continue;
2115                         default:
2116                                 goto seqretry;
2117                         }
2118                 }
2119 
2120                 if (dentry->d_name.hash_len != hashlen)
2121                         continue;
2122                 *seqp = seq;
2123                 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2124                         return dentry;
2125         }
2126         return NULL;
2127 }
2128 
2129 /**
2130  * d_lookup - search for a dentry
2131  * @parent: parent dentry
2132  * @name: qstr of name we wish to find
2133  * Returns: dentry, or NULL
2134  *
2135  * d_lookup searches the children of the parent dentry for the name in
2136  * question. If the dentry is found its reference count is incremented and the
2137  * dentry is returned. The caller must use dput to free the entry when it has
2138  * finished using it. %NULL is returned if the dentry does not exist.
2139  */
2140 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2141 {
2142         struct dentry *dentry;
2143         unsigned seq;
2144 
2145         do {
2146                 seq = read_seqbegin(&rename_lock);
2147                 dentry = __d_lookup(parent, name);
2148                 if (dentry)
2149                         break;
2150         } while (read_seqretry(&rename_lock, seq));
2151         return dentry;
2152 }
2153 EXPORT_SYMBOL(d_lookup);
2154 
2155 /**
2156  * __d_lookup - search for a dentry (racy)
2157  * @parent: parent dentry
2158  * @name: qstr of name we wish to find
2159  * Returns: dentry, or NULL
2160  *
2161  * __d_lookup is like d_lookup, however it may (rarely) return a
2162  * false-negative result due to unrelated rename activity.
2163  *
2164  * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2165  * however it must be used carefully, eg. with a following d_lookup in
2166  * the case of failure.
2167  *
2168  * __d_lookup callers must be commented.
2169  */
2170 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2171 {
2172         unsigned int len = name->len;
2173         unsigned int hash = name->hash;
2174         const unsigned char *str = name->name;
2175         struct hlist_bl_head *b = d_hash(parent, hash);
2176         struct hlist_bl_node *node;
2177         struct dentry *found = NULL;
2178         struct dentry *dentry;
2179 
2180         /*
2181          * Note: There is significant duplication with __d_lookup_rcu which is
2182          * required to prevent single threaded performance regressions
2183          * especially on architectures where smp_rmb (in seqcounts) are costly.
2184          * Keep the two functions in sync.
2185          */
2186 
2187         /*
2188          * The hash list is protected using RCU.
2189          *
2190          * Take d_lock when comparing a candidate dentry, to avoid races
2191          * with d_move().
2192          *
2193          * It is possible that concurrent renames can mess up our list
2194          * walk here and result in missing our dentry, resulting in the
2195          * false-negative result. d_lookup() protects against concurrent
2196          * renames using rename_lock seqlock.
2197          *
2198          * See Documentation/filesystems/path-lookup.txt for more details.
2199          */
2200         rcu_read_lock();
2201         
2202         hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2203 
2204                 if (dentry->d_name.hash != hash)
2205                         continue;
2206 
2207                 spin_lock(&dentry->d_lock);
2208                 if (dentry->d_parent != parent)
2209                         goto next;
2210                 if (d_unhashed(dentry))
2211                         goto next;
2212 
2213                 /*
2214                  * It is safe to compare names since d_move() cannot
2215                  * change the qstr (protected by d_lock).
2216                  */
2217                 if (parent->d_flags & DCACHE_OP_COMPARE) {
2218                         int tlen = dentry->d_name.len;
2219                         const char *tname = dentry->d_name.name;
2220                         if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2221                                 goto next;
2222                 } else {
2223                         if (dentry->d_name.len != len)
2224                                 goto next;
2225                         if (dentry_cmp(dentry, str, len))
2226                                 goto next;
2227                 }
2228 
2229                 dentry->d_lockref.count++;
2230                 found = dentry;
2231                 spin_unlock(&dentry->d_lock);
2232                 break;
2233 next:
2234                 spin_unlock(&dentry->d_lock);
2235         }
2236         rcu_read_unlock();
2237 
2238         return found;
2239 }
2240 
2241 /**
2242  * d_hash_and_lookup - hash the qstr then search for a dentry
2243  * @dir: Directory to search in
2244  * @name: qstr of name we wish to find
2245  *
2246  * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2247  */
2248 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2249 {
2250         /*
2251          * Check for a fs-specific hash function. Note that we must
2252          * calculate the standard hash first, as the d_op->d_hash()
2253          * routine may choose to leave the hash value unchanged.
2254          */
2255         name->hash = full_name_hash(name->name, name->len);
2256         if (dir->d_flags & DCACHE_OP_HASH) {
2257                 int err = dir->d_op->d_hash(dir, name);
2258                 if (unlikely(err < 0))
2259                         return ERR_PTR(err);
2260         }
2261         return d_lookup(dir, name);
2262 }
2263 EXPORT_SYMBOL(d_hash_and_lookup);
2264 
2265 /**
2266  * d_validate - verify dentry provided from insecure source (deprecated)
2267  * @dentry: The dentry alleged to be valid child of @dparent
2268  * @dparent: The parent dentry (known to be valid)
2269  *
2270  * An insecure source has sent us a dentry, here we verify it and dget() it.
2271  * This is used by ncpfs in its readdir implementation.
2272  * Zero is returned in the dentry is invalid.
2273  *
2274  * This function is slow for big directories, and deprecated, do not use it.
2275  */
2276 int d_validate(struct dentry *dentry, struct dentry *dparent)
2277 {
2278         struct dentry *child;
2279 
2280         spin_lock(&dparent->d_lock);
2281         list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2282                 if (dentry == child) {
2283                         spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2284                         __dget_dlock(dentry);
2285                         spin_unlock(&dentry->d_lock);
2286                         spin_unlock(&dparent->d_lock);
2287                         return 1;
2288                 }
2289         }
2290         spin_unlock(&dparent->d_lock);
2291 
2292         return 0;
2293 }
2294 EXPORT_SYMBOL(d_validate);
2295 
2296 /*
2297  * When a file is deleted, we have two options:
2298  * - turn this dentry into a negative dentry
2299  * - unhash this dentry and free it.
2300  *
2301  * Usually, we want to just turn this into
2302  * a negative dentry, but if anybody else is
2303  * currently using the dentry or the inode
2304  * we can't do that and we fall back on removing
2305  * it from the hash queues and waiting for
2306  * it to be deleted later when it has no users
2307  */
2308  
2309 /**
2310  * d_delete - delete a dentry
2311  * @dentry: The dentry to delete
2312  *
2313  * Turn the dentry into a negative dentry if possible, otherwise
2314  * remove it from the hash queues so it can be deleted later
2315  */
2316  
2317 void d_delete(struct dentry * dentry)
2318 {
2319         struct inode *inode;
2320         int isdir = 0;
2321         /*
2322          * Are we the only user?
2323          */
2324 again:
2325         spin_lock(&dentry->d_lock);
2326         inode = dentry->d_inode;
2327         isdir = S_ISDIR(inode->i_mode);
2328         if (dentry->d_lockref.count == 1) {
2329                 if (!spin_trylock(&inode->i_lock)) {
2330                         spin_unlock(&dentry->d_lock);
2331                         cpu_relax();
2332                         goto again;
2333                 }
2334                 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2335                 dentry_unlink_inode(dentry);
2336                 fsnotify_nameremove(dentry, isdir);
2337                 return;
2338         }
2339 
2340         if (!d_unhashed(dentry))
2341                 __d_drop(dentry);
2342 
2343         spin_unlock(&dentry->d_lock);
2344 
2345         fsnotify_nameremove(dentry, isdir);
2346 }
2347 EXPORT_SYMBOL(d_delete);
2348 
2349 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2350 {
2351         BUG_ON(!d_unhashed(entry));
2352         hlist_bl_lock(b);
2353         entry->d_flags |= DCACHE_RCUACCESS;
2354         hlist_bl_add_head_rcu(&entry->d_hash, b);
2355         hlist_bl_unlock(b);
2356 }
2357 
2358 static void _d_rehash(struct dentry * entry)
2359 {
2360         __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2361 }
2362 
2363 /**
2364  * d_rehash     - add an entry back to the hash
2365  * @entry: dentry to add to the hash
2366  *
2367  * Adds a dentry to the hash according to its name.
2368  */
2369  
2370 void d_rehash(struct dentry * entry)
2371 {
2372         spin_lock(&entry->d_lock);
2373         _d_rehash(entry);
2374         spin_unlock(&entry->d_lock);
2375 }
2376 EXPORT_SYMBOL(d_rehash);
2377 
2378 /**
2379  * dentry_update_name_case - update case insensitive dentry with a new name
2380  * @dentry: dentry to be updated
2381  * @name: new name
2382  *
2383  * Update a case insensitive dentry with new case of name.
2384  *
2385  * dentry must have been returned by d_lookup with name @name. Old and new
2386  * name lengths must match (ie. no d_compare which allows mismatched name
2387  * lengths).
2388  *
2389  * Parent inode i_mutex must be held over d_lookup and into this call (to
2390  * keep renames and concurrent inserts, and readdir(2) away).
2391  */
2392 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2393 {
2394         BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2395         BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2396 
2397         spin_lock(&dentry->d_lock);
2398         write_seqcount_begin(&dentry->d_seq);
2399         memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2400         write_seqcount_end(&dentry->d_seq);
2401         spin_unlock(&dentry->d_lock);
2402 }
2403 EXPORT_SYMBOL(dentry_update_name_case);
2404 
2405 static void switch_names(struct dentry *dentry, struct dentry *target)
2406 {
2407         if (dname_external(target)) {
2408                 if (dname_external(dentry)) {
2409                         /*
2410                          * Both external: swap the pointers
2411                          */
2412                         swap(target->d_name.name, dentry->d_name.name);
2413                 } else {
2414                         /*
2415                          * dentry:internal, target:external.  Steal target's
2416                          * storage and make target internal.
2417                          */
2418                         memcpy(target->d_iname, dentry->d_name.name,
2419                                         dentry->d_name.len + 1);
2420                         dentry->d_name.name = target->d_name.name;
2421                         target->d_name.name = target->d_iname;
2422                 }
2423         } else {
2424                 if (dname_external(dentry)) {
2425                         /*
2426                          * dentry:external, target:internal.  Give dentry's
2427                          * storage to target and make dentry internal
2428                          */
2429                         memcpy(dentry->d_iname, target->d_name.name,
2430                                         target->d_name.len + 1);
2431                         target->d_name.name = dentry->d_name.name;
2432                         dentry->d_name.name = dentry->d_iname;
2433                 } else {
2434                         /*
2435                          * Both are internal.
2436                          */
2437                         unsigned int i;
2438                         BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2439                         for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2440                                 swap(((long *) &dentry->d_iname)[i],
2441                                      ((long *) &target->d_iname)[i]);
2442                         }
2443                 }
2444         }
2445         swap(dentry->d_name.len, target->d_name.len);
2446 }
2447 
2448 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2449 {
2450         /*
2451          * XXXX: do we really need to take target->d_lock?
2452          */
2453         if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2454                 spin_lock(&target->d_parent->d_lock);
2455         else {
2456                 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2457                         spin_lock(&dentry->d_parent->d_lock);
2458                         spin_lock_nested(&target->d_parent->d_lock,
2459                                                 DENTRY_D_LOCK_NESTED);
2460                 } else {
2461                         spin_lock(&target->d_parent->d_lock);
2462                         spin_lock_nested(&dentry->d_parent->d_lock,
2463                                                 DENTRY_D_LOCK_NESTED);
2464                 }
2465         }
2466         if (target < dentry) {
2467                 spin_lock_nested(&target->d_lock, 2);
2468                 spin_lock_nested(&dentry->d_lock, 3);
2469         } else {
2470                 spin_lock_nested(&dentry->d_lock, 2);
2471                 spin_lock_nested(&target->d_lock, 3);
2472         }
2473 }
2474 
2475 static void dentry_unlock_parents_for_move(struct dentry *dentry,
2476                                         struct dentry *target)
2477 {
2478         if (target->d_parent != dentry->d_parent)
2479                 spin_unlock(&dentry->d_parent->d_lock);
2480         if (target->d_parent != target)
2481                 spin_unlock(&target->d_parent->d_lock);
2482 }
2483 
2484 /*
2485  * When switching names, the actual string doesn't strictly have to
2486  * be preserved in the target - because we're dropping the target
2487  * anyway. As such, we can just do a simple memcpy() to copy over
2488  * the new name before we switch.
2489  *
2490  * Note that we have to be a lot more careful about getting the hash
2491  * switched - we have to switch the hash value properly even if it
2492  * then no longer matches the actual (corrupted) string of the target.
2493  * The hash value has to match the hash queue that the dentry is on..
2494  */
2495 /*
2496  * __d_move - move a dentry
2497  * @dentry: entry to move
2498  * @target: new dentry
2499  * @exchange: exchange the two dentries
2500  *
2501  * Update the dcache to reflect the move of a file name. Negative
2502  * dcache entries should not be moved in this way. Caller must hold
2503  * rename_lock, the i_mutex of the source and target directories,
2504  * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2505  */
2506 static void __d_move(struct dentry *dentry, struct dentry *target,
2507                      bool exchange)
2508 {
2509         if (!dentry->d_inode)
2510                 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2511 
2512         BUG_ON(d_ancestor(dentry, target));
2513         BUG_ON(d_ancestor(target, dentry));
2514 
2515         dentry_lock_for_move(dentry, target);
2516 
2517         write_seqcount_begin(&dentry->d_seq);
2518         write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2519 
2520         /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2521 
2522         /*
2523          * Move the dentry to the target hash queue. Don't bother checking
2524          * for the same hash queue because of how unlikely it is.
2525          */
2526         __d_drop(dentry);
2527         __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2528 
2529         /*
2530          * Unhash the target (d_delete() is not usable here).  If exchanging
2531          * the two dentries, then rehash onto the other's hash queue.
2532          */
2533         __d_drop(target);
2534         if (exchange) {
2535                 __d_rehash(target,
2536                            d_hash(dentry->d_parent, dentry->d_name.hash));
2537         }
2538 
2539         list_del(&dentry->d_u.d_child);
2540         list_del(&target->d_u.d_child);
2541 
2542         /* Switch the names.. */
2543         switch_names(dentry, target);
2544         swap(dentry->d_name.hash, target->d_name.hash);
2545 
2546         /* ... and switch the parents */
2547         if (IS_ROOT(dentry)) {
2548                 dentry->d_parent = target->d_parent;
2549                 target->d_parent = target;
2550                 INIT_LIST_HEAD(&target->d_u.d_child);
2551         } else {
2552                 swap(dentry->d_parent, target->d_parent);
2553 
2554                 /* And add them back to the (new) parent lists */
2555                 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2556         }
2557 
2558         list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2559 
2560         write_seqcount_end(&target->d_seq);
2561         write_seqcount_end(&dentry->d_seq);
2562 
2563         dentry_unlock_parents_for_move(dentry, target);
2564         if (exchange)
2565                 fsnotify_d_move(target);
2566         spin_unlock(&target->d_lock);
2567         fsnotify_d_move(dentry);
2568         spin_unlock(&dentry->d_lock);
2569 }
2570 
2571 /*
2572  * d_move - move a dentry
2573  * @dentry: entry to move
2574  * @target: new dentry
2575  *
2576  * Update the dcache to reflect the move of a file name. Negative
2577  * dcache entries should not be moved in this way. See the locking
2578  * requirements for __d_move.
2579  */
2580 void d_move(struct dentry *dentry, struct dentry *target)
2581 {
2582         write_seqlock(&rename_lock);
2583         __d_move(dentry, target, false);
2584         write_sequnlock(&rename_lock);
2585 }
2586 EXPORT_SYMBOL(d_move);
2587 
2588 /*
2589  * d_exchange - exchange two dentries
2590  * @dentry1: first dentry
2591  * @dentry2: second dentry
2592  */
2593 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2594 {
2595         write_seqlock(&rename_lock);
2596 
2597         WARN_ON(!dentry1->d_inode);
2598         WARN_ON(!dentry2->d_inode);
2599         WARN_ON(IS_ROOT(dentry1));
2600         WARN_ON(IS_ROOT(dentry2));
2601 
2602         __d_move(dentry1, dentry2, true);
2603 
2604         write_sequnlock(&rename_lock);
2605 }
2606 
2607 /**
2608  * d_ancestor - search for an ancestor
2609  * @p1: ancestor dentry
2610  * @p2: child dentry
2611  *
2612  * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2613  * an ancestor of p2, else NULL.
2614  */
2615 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2616 {
2617         struct dentry *p;
2618 
2619         for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2620                 if (p->d_parent == p1)
2621                         return p;
2622         }
2623         return NULL;
2624 }
2625 
2626 /*
2627  * This helper attempts to cope with remotely renamed directories
2628  *
2629  * It assumes that the caller is already holding
2630  * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2631  *
2632  * Note: If ever the locking in lock_rename() changes, then please
2633  * remember to update this too...
2634  */
2635 static struct dentry *__d_unalias(struct inode *inode,
2636                 struct dentry *dentry, struct dentry *alias)
2637 {
2638         struct mutex *m1 = NULL, *m2 = NULL;
2639         struct dentry *ret = ERR_PTR(-EBUSY);
2640 
2641         /* If alias and dentry share a parent, then no extra locks required */
2642         if (alias->d_parent == dentry->d_parent)
2643                 goto out_unalias;
2644 
2645         /* See lock_rename() */
2646         if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2647                 goto out_err;
2648         m1 = &dentry->d_sb->s_vfs_rename_mutex;
2649         if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2650                 goto out_err;
2651         m2 = &alias->d_parent->d_inode->i_mutex;
2652 out_unalias:
2653         if (likely(!d_mountpoint(alias))) {
2654                 __d_move(alias, dentry, false);
2655                 ret = alias;
2656         }
2657 out_err:
2658         spin_unlock(&inode->i_lock);
2659         if (m2)
2660                 mutex_unlock(m2);
2661         if (m1)
2662                 mutex_unlock(m1);
2663         return ret;
2664 }
2665 
2666 /*
2667  * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2668  * named dentry in place of the dentry to be replaced.
2669  * returns with anon->d_lock held!
2670  */
2671 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2672 {
2673         struct dentry *dparent;
2674 
2675         dentry_lock_for_move(anon, dentry);
2676 
2677         write_seqcount_begin(&dentry->d_seq);
2678         write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
2679 
2680         dparent = dentry->d_parent;
2681 
2682         switch_names(dentry, anon);
2683         swap(dentry->d_name.hash, anon->d_name.hash);
2684 
2685         dentry->d_parent = dentry;
2686         list_del_init(&dentry->d_u.d_child);
2687         anon->d_parent = dparent;
2688         list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2689 
2690         write_seqcount_end(&dentry->d_seq);
2691         write_seqcount_end(&anon->d_seq);
2692 
2693         dentry_unlock_parents_for_move(anon, dentry);
2694         spin_unlock(&dentry->d_lock);
2695 
2696         /* anon->d_lock still locked, returns locked */
2697 }
2698 
2699 /**
2700  * d_materialise_unique - introduce an inode into the tree
2701  * @dentry: candidate dentry
2702  * @inode: inode to bind to the dentry, to which aliases may be attached
2703  *
2704  * Introduces an dentry into the tree, substituting an extant disconnected
2705  * root directory alias in its place if there is one. Caller must hold the
2706  * i_mutex of the parent directory.
2707  */
2708 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2709 {
2710         struct dentry *actual;
2711 
2712         BUG_ON(!d_unhashed(dentry));
2713 
2714         if (!inode) {
2715                 actual = dentry;
2716                 __d_instantiate(dentry, NULL);
2717                 d_rehash(actual);
2718                 goto out_nolock;
2719         }
2720 
2721         spin_lock(&inode->i_lock);
2722 
2723         if (S_ISDIR(inode->i_mode)) {
2724                 struct dentry *alias;
2725 
2726                 /* Does an aliased dentry already exist? */
2727                 alias = __d_find_alias(inode, 0);
2728                 if (alias) {
2729                         actual = alias;
2730                         write_seqlock(&rename_lock);
2731 
2732                         if (d_ancestor(alias, dentry)) {
2733                                 /* Check for loops */
2734                                 actual = ERR_PTR(-ELOOP);
2735                                 spin_unlock(&inode->i_lock);
2736                         } else if (IS_ROOT(alias)) {
2737                                 /* Is this an anonymous mountpoint that we
2738                                  * could splice into our tree? */
2739                                 __d_materialise_dentry(dentry, alias);
2740                                 write_sequnlock(&rename_lock);
2741                                 __d_drop(alias);
2742                                 goto found;
2743                         } else {
2744                                 /* Nope, but we must(!) avoid directory
2745                                  * aliasing. This drops inode->i_lock */
2746                                 actual = __d_unalias(inode, dentry, alias);
2747                         }
2748                         write_sequnlock(&rename_lock);
2749                         if (IS_ERR(actual)) {
2750                                 if (PTR_ERR(actual) == -ELOOP)
2751                                         pr_warn_ratelimited(
2752                                                 "VFS: Lookup of '%s' in %s %s"
2753                                                 " would have caused loop\n",
2754                                                 dentry->d_name.name,
2755                                                 inode->i_sb->s_type->name,
2756                                                 inode->i_sb->s_id);
2757                                 dput(alias);
2758                         }
2759                         goto out_nolock;
2760                 }
2761         }
2762 
2763         /* Add a unique reference */
2764         actual = __d_instantiate_unique(dentry, inode);
2765         if (!actual)
2766                 actual = dentry;
2767         else
2768                 BUG_ON(!d_unhashed(actual));
2769 
2770         spin_lock(&actual->d_lock);
2771 found:
2772         _d_rehash(actual);
2773         spin_unlock(&actual->d_lock);
2774         spin_unlock(&inode->i_lock);
2775 out_nolock:
2776         if (actual == dentry) {
2777                 security_d_instantiate(dentry, inode);
2778                 return NULL;
2779         }
2780 
2781         iput(inode);
2782         return actual;
2783 }
2784 EXPORT_SYMBOL_GPL(d_materialise_unique);
2785 
2786 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2787 {
2788         *buflen -= namelen;
2789         if (*buflen < 0)
2790                 return -ENAMETOOLONG;
2791         *buffer -= namelen;
2792         memcpy(*buffer, str, namelen);
2793         return 0;
2794 }
2795 
2796 /**
2797  * prepend_name - prepend a pathname in front of current buffer pointer
2798  * @buffer: buffer pointer
2799  * @buflen: allocated length of the buffer
2800  * @name:   name string and length qstr structure
2801  *
2802  * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2803  * make sure that either the old or the new name pointer and length are
2804  * fetched. However, there may be mismatch between length and pointer.
2805  * The length cannot be trusted, we need to copy it byte-by-byte until
2806  * the length is reached or a null byte is found. It also prepends "/" at
2807  * the beginning of the name. The sequence number check at the caller will
2808  * retry it again when a d_move() does happen. So any garbage in the buffer
2809  * due to mismatched pointer and length will be discarded.
2810  */
2811 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2812 {
2813         const char *dname = ACCESS_ONCE(name->name);
2814         u32 dlen = ACCESS_ONCE(name->len);
2815         char *p;
2816 
2817         *buflen -= dlen + 1;
2818         if (*buflen < 0)
2819                 return -ENAMETOOLONG;
2820         p = *buffer -= dlen + 1;
2821         *p++ = '/';
2822         while (dlen--) {
2823                 char c = *dname++;
2824                 if (!c)
2825                         break;
2826                 *p++ = c;
2827         }
2828         return 0;
2829 }
2830 
2831 /**
2832  * prepend_path - Prepend path string to a buffer
2833  * @path: the dentry/vfsmount to report
2834  * @root: root vfsmnt/dentry
2835  * @buffer: pointer to the end of the buffer
2836  * @buflen: pointer to buffer length
2837  *
2838  * The function will first try to write out the pathname without taking any
2839  * lock other than the RCU read lock to make sure that dentries won't go away.
2840  * It only checks the sequence number of the global rename_lock as any change
2841  * in the dentry's d_seq will be preceded by changes in the rename_lock
2842  * sequence number. If the sequence number had been changed, it will restart
2843  * the whole pathname back-tracing sequence again by taking the rename_lock.
2844  * In this case, there is no need to take the RCU read lock as the recursive
2845  * parent pointer references will keep the dentry chain alive as long as no
2846  * rename operation is performed.
2847  */
2848 static int prepend_path(const struct path *path,
2849                         const struct path *root,
2850                         char **buffer, int *buflen)
2851 {
2852         struct dentry *dentry;
2853         struct vfsmount *vfsmnt;
2854         struct mount *mnt;
2855         int error = 0;
2856         unsigned seq, m_seq = 0;
2857         char *bptr;
2858         int blen;
2859 
2860         rcu_read_lock();
2861 restart_mnt:
2862         read_seqbegin_or_lock(&mount_lock, &m_seq);
2863         seq = 0;
2864         rcu_read_lock();
2865 restart:
2866         bptr = *buffer;
2867         blen = *buflen;
2868         error = 0;
2869         dentry = path->dentry;
2870         vfsmnt = path->mnt;
2871         mnt = real_mount(vfsmnt);
2872         read_seqbegin_or_lock(&rename_lock, &seq);
2873         while (dentry != root->dentry || vfsmnt != root->mnt) {
2874                 struct dentry * parent;
2875 
2876                 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2877                         struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2878                         /* Global root? */
2879                         if (mnt != parent) {
2880                                 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2881                                 mnt = parent;
2882                                 vfsmnt = &mnt->mnt;
2883                                 continue;
2884                         }
2885                         /*
2886                          * Filesystems needing to implement special "root names"
2887                          * should do so with ->d_dname()
2888                          */
2889                         if (IS_ROOT(dentry) &&
2890                            (dentry->d_name.len != 1 ||
2891                             dentry->d_name.name[0] != '/')) {
2892                                 WARN(1, "Root dentry has weird name <%.*s>\n",
2893                                      (int) dentry->d_name.len,
2894                                      dentry->d_name.name);
2895                         }
2896                         if (!error)
2897                                 error = is_mounted(vfsmnt) ? 1 : 2;
2898                         break;
2899                 }
2900                 parent = dentry->d_parent;
2901                 prefetch(parent);
2902                 error = prepend_name(&bptr, &blen, &dentry->d_name);
2903                 if (error)
2904                         break;
2905 
2906                 dentry = parent;
2907         }
2908         if (!(seq & 1))
2909                 rcu_read_unlock();
2910         if (need_seqretry(&rename_lock, seq)) {
2911                 seq = 1;
2912                 goto restart;
2913         }
2914         done_seqretry(&rename_lock, seq);
2915 
2916         if (!(m_seq & 1))
2917                 rcu_read_unlock();
2918         if (need_seqretry(&mount_lock, m_seq)) {
2919                 m_seq = 1;
2920                 goto restart_mnt;
2921         }
2922         done_seqretry(&mount_lock, m_seq);
2923 
2924         if (error >= 0 && bptr == *buffer) {
2925                 if (--blen < 0)
2926                         error = -ENAMETOOLONG;
2927                 else
2928                         *--bptr = '/';
2929         }
2930         *buffer = bptr;
2931         *buflen = blen;
2932         return error;
2933 }
2934 
2935 /**
2936  * __d_path - return the path of a dentry
2937  * @path: the dentry/vfsmount to report
2938  * @root: root vfsmnt/dentry
2939  * @buf: buffer to return value in
2940  * @buflen: buffer length
2941  *
2942  * Convert a dentry into an ASCII path name.
2943  *
2944  * Returns a pointer into the buffer or an error code if the
2945  * path was too long.
2946  *
2947  * "buflen" should be positive.
2948  *
2949  * If the path is not reachable from the supplied root, return %NULL.
2950  */
2951 char *__d_path(const struct path *path,
2952                const struct path *root,
2953                char *buf, int buflen)
2954 {
2955         char *res = buf + buflen;
2956         int error;
2957 
2958         prepend(&res, &buflen, "\0", 1);
2959         error = prepend_path(path, root, &res, &buflen);
2960 
2961         if (error < 0)
2962                 return ERR_PTR(error);
2963         if (error > 0)
2964                 return NULL;
2965         return res;
2966 }
2967 
2968 char *d_absolute_path(const struct path *path,
2969                char *buf, int buflen)
2970 {
2971         struct path root = {};
2972         char *res = buf + buflen;
2973         int error;
2974 
2975         prepend(&res, &buflen, "\0", 1);
2976         error = prepend_path(path, &root, &res, &buflen);
2977 
2978         if (error > 1)
2979                 error = -EINVAL;
2980         if (error < 0)
2981                 return ERR_PTR(error);
2982         return res;
2983 }
2984 
2985 /*
2986  * same as __d_path but appends "(deleted)" for unlinked files.
2987  */
2988 static int path_with_deleted(const struct path *path,
2989                              const struct path *root,
2990                              char **buf, int *buflen)
2991 {
2992         prepend(buf, buflen, "\0", 1);
2993         if (d_unlinked(path->dentry)) {
2994                 int error = prepend(buf, buflen, " (deleted)", 10);
2995                 if (error)
2996                         return error;
2997         }
2998 
2999         return prepend_path(path, root, buf, buflen);
3000 }
3001 
3002 static int prepend_unreachable(char **buffer, int *buflen)
3003 {
3004         return prepend(buffer, buflen, "(unreachable)", 13);
3005 }
3006 
3007 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3008 {
3009         unsigned seq;
3010 
3011         do {
3012                 seq = read_seqcount_begin(&fs->seq);
3013                 *root = fs->root;
3014         } while (read_seqcount_retry(&fs->seq, seq));
3015 }
3016 
3017 /**
3018  * d_path - return the path of a dentry
3019  * @path: path to report
3020  * @buf: buffer to return value in
3021  * @buflen: buffer length
3022  *
3023  * Convert a dentry into an ASCII path name. If the entry has been deleted
3024  * the string " (deleted)" is appended. Note that this is ambiguous.
3025  *
3026  * Returns a pointer into the buffer or an error code if the path was
3027  * too long. Note: Callers should use the returned pointer, not the passed
3028  * in buffer, to use the name! The implementation often starts at an offset
3029  * into the buffer, and may leave 0 bytes at the start.
3030  *
3031  * "buflen" should be positive.
3032  */
3033 char *d_path(const struct path *path, char *buf, int buflen)
3034 {
3035         char *res = buf + buflen;
3036         struct path root;
3037         int error;
3038 
3039         /*
3040          * We have various synthetic filesystems that never get mounted.  On
3041          * these filesystems dentries are never used for lookup purposes, and
3042          * thus don't need to be hashed.  They also don't need a name until a
3043          * user wants to identify the object in /proc/pid/fd/.  The little hack
3044          * below allows us to generate a name for these objects on demand:
3045          *
3046          * Some pseudo inodes are mountable.  When they are mounted
3047          * path->dentry == path->mnt->mnt_root.  In that case don't call d_dname
3048          * and instead have d_path return the mounted path.
3049          */
3050         if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3051             (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3052                 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3053 
3054         rcu_read_lock();
3055         get_fs_root_rcu(current->fs, &root);
3056         error = path_with_deleted(path, &root, &res, &buflen);
3057         rcu_read_unlock();
3058 
3059         if (error < 0)
3060                 res = ERR_PTR(error);
3061         return res;
3062 }
3063 EXPORT_SYMBOL(d_path);
3064 
3065 /*
3066  * Helper function for dentry_operations.d_dname() members
3067  */
3068 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3069                         const char *fmt, ...)
3070 {
3071         va_list args;
3072         char temp[64];
3073         int sz;
3074 
3075         va_start(args, fmt);
3076         sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3077         va_end(args);
3078 
3079         if (sz > sizeof(temp) || sz > buflen)
3080                 return ERR_PTR(-ENAMETOOLONG);
3081 
3082         buffer += buflen - sz;
3083         return memcpy(buffer, temp, sz);
3084 }
3085 
3086 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3087 {
3088         char *end = buffer + buflen;
3089         /* these dentries are never renamed, so d_lock is not needed */
3090         if (prepend(&end, &buflen, " (deleted)", 11) ||
3091             prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3092             prepend(&end, &buflen, "/", 1))  
3093                 end = ERR_PTR(-ENAMETOOLONG);
3094         return end;
3095 }
3096 EXPORT_SYMBOL(simple_dname);
3097 
3098 /*
3099  * Write full pathname from the root of the filesystem into the buffer.
3100  */
3101 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3102 {
3103         struct dentry *dentry;
3104         char *end, *retval;
3105         int len, seq = 0;
3106         int error = 0;
3107 
3108         if (buflen < 2)
3109                 goto Elong;
3110 
3111         rcu_read_lock();
3112 restart:
3113         dentry = d;
3114         end = buf + buflen;
3115         len = buflen;
3116         prepend(&end, &len, "\0", 1);
3117         /* Get '/' right */
3118         retval = end-1;
3119         *retval = '/';
3120         read_seqbegin_or_lock(&rename_lock, &seq);
3121         while (!IS_ROOT(dentry)) {
3122                 struct dentry *parent = dentry->d_parent;
3123 
3124                 prefetch(parent);
3125                 error = prepend_name(&end, &len, &dentry->d_name);
3126                 if (error)
3127                         break;
3128 
3129                 retval = end;
3130                 dentry = parent;
3131         }
3132         if (!(seq & 1))
3133                 rcu_read_unlock();
3134         if (need_seqretry(&rename_lock, seq)) {
3135                 seq = 1;
3136                 goto restart;
3137         }
3138         done_seqretry(&rename_lock, seq);
3139         if (error)
3140                 goto Elong;
3141         return retval;
3142 Elong:
3143         return ERR_PTR(-ENAMETOOLONG);
3144 }
3145 
3146 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3147 {
3148         return __dentry_path(dentry, buf, buflen);
3149 }
3150 EXPORT_SYMBOL(dentry_path_raw);
3151 
3152 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3153 {
3154         char *p = NULL;
3155         char *retval;
3156 
3157         if (d_unlinked(dentry)) {
3158                 p = buf + buflen;
3159                 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3160                         goto Elong;
3161                 buflen++;
3162         }
3163         retval = __dentry_path(dentry, buf, buflen);
3164         if (!IS_ERR(retval) && p)
3165                 *p = '/';       /* restore '/' overriden with '\0' */
3166         return retval;
3167 Elong:
3168         return ERR_PTR(-ENAMETOOLONG);
3169 }
3170 
3171 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3172                                     struct path *pwd)
3173 {
3174         unsigned seq;
3175 
3176         do {
3177                 seq = read_seqcount_begin(&fs->seq);
3178                 *root = fs->root;
3179                 *pwd = fs->pwd;
3180         } while (read_seqcount_retry(&fs->seq, seq));
3181 }
3182 
3183 /*
3184  * NOTE! The user-level library version returns a
3185  * character pointer. The kernel system call just
3186  * returns the length of the buffer filled (which
3187  * includes the ending '\0' character), or a negative
3188  * error value. So libc would do something like
3189  *
3190  *      char *getcwd(char * buf, size_t size)
3191  *      {
3192  *              int retval;
3193  *
3194  *              retval = sys_getcwd(buf, size);
3195  *              if (retval >= 0)
3196  *                      return buf;
3197  *              errno = -retval;
3198  *              return NULL;
3199  *      }
3200  */
3201 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3202 {
3203         int error;
3204         struct path pwd, root;
3205         char *page = __getname();
3206 
3207         if (!page)
3208                 return -ENOMEM;
3209 
3210         rcu_read_lock();
3211         get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3212 
3213         error = -ENOENT;
3214         if (!d_unlinked(pwd.dentry)) {
3215                 unsigned long len;
3216                 char *cwd = page + PATH_MAX;
3217                 int buflen = PATH_MAX;
3218 
3219                 prepend(&cwd, &buflen, "\0", 1);
3220                 error = prepend_path(&pwd, &root, &cwd, &buflen);
3221                 rcu_read_unlock();
3222 
3223                 if (error < 0)
3224                         goto out;
3225 
3226                 /* Unreachable from current root */
3227                 if (error > 0) {
3228                         error = prepend_unreachable(&cwd, &buflen);
3229                         if (error)
3230                                 goto out;
3231                 }
3232 
3233                 error = -ERANGE;
3234                 len = PATH_MAX + page - cwd;
3235                 if (len <= size) {
3236                         error = len;
3237                         if (copy_to_user(buf, cwd, len))
3238                                 error = -EFAULT;
3239                 }
3240         } else {
3241                 rcu_read_unlock();
3242         }
3243 
3244 out:
3245         __putname(page);
3246         return error;
3247 }
3248 
3249 /*
3250  * Test whether new_dentry is a subdirectory of old_dentry.
3251  *
3252  * Trivially implemented using the dcache structure
3253  */
3254 
3255 /**
3256  * is_subdir - is new dentry a subdirectory of old_dentry
3257  * @new_dentry: new dentry
3258  * @old_dentry: old dentry
3259  *
3260  * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3261  * Returns 0 otherwise.
3262  * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3263  */
3264   
3265 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3266 {
3267         int result;
3268         unsigned seq;
3269 
3270         if (new_dentry == old_dentry)
3271                 return 1;
3272 
3273         do {
3274                 /* for restarting inner loop in case of seq retry */
3275                 seq = read_seqbegin(&rename_lock);
3276                 /*
3277                  * Need rcu_readlock to protect against the d_parent trashing
3278                  * due to d_move
3279                  */
3280                 rcu_read_lock();
3281                 if (d_ancestor(old_dentry, new_dentry))
3282                         result = 1;
3283                 else
3284                         result = 0;
3285                 rcu_read_unlock();
3286         } while (read_seqretry(&rename_lock, seq));
3287 
3288         return result;
3289 }
3290 
3291 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3292 {
3293         struct dentry *root = data;
3294         if (dentry != root) {
3295                 if (d_unhashed(dentry) || !dentry->d_inode)
3296                         return D_WALK_SKIP;
3297 
3298                 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3299                         dentry->d_flags |= DCACHE_GENOCIDE;
3300                         dentry->d_lockref.count--;
3301                 }
3302         }
3303         return D_WALK_CONTINUE;
3304 }
3305 
3306 void d_genocide(struct dentry *parent)
3307 {
3308         d_walk(parent, parent, d_genocide_kill, NULL);
3309 }
3310 
3311 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3312 {
3313         inode_dec_link_count(inode);
3314         BUG_ON(dentry->d_name.name != dentry->d_iname ||
3315                 !hlist_unhashed(&dentry->d_alias) ||
3316                 !d_unlinked(dentry));
3317         spin_lock(&dentry->d_parent->d_lock);
3318         spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3319         dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3320                                 (unsigned long long)inode->i_ino);
3321         spin_unlock(&dentry->d_lock);
3322         spin_unlock(&dentry->d_parent->d_lock);
3323         d_instantiate(dentry, inode);
3324 }
3325 EXPORT_SYMBOL(d_tmpfile);
3326 
3327 static __initdata unsigned long dhash_entries;
3328 static int __init set_dhash_entries(char *str)
3329 {
3330         if (!str)
3331                 return 0;
3332         dhash_entries = simple_strtoul(str, &str, 0);
3333         return 1;
3334 }
3335 __setup("dhash_entries=", set_dhash_entries);
3336 
3337 static void __init dcache_init_early(void)
3338 {
3339         unsigned int loop;
3340 
3341         /* If hashes are distributed across NUMA nodes, defer
3342          * hash allocation until vmalloc space is available.
3343          */
3344         if (hashdist)
3345                 return;
3346 
3347         dentry_hashtable =
3348                 alloc_large_system_hash("Dentry cache",
3349                                         sizeof(struct hlist_bl_head),
3350                                         dhash_entries,
3351                                         13,
3352                                         HASH_EARLY,
3353                                         &d_hash_shift,
3354                                         &d_hash_mask,
3355                                         0,
3356                                         0);
3357 
3358         for (loop = 0; loop < (1U << d_hash_shift); loop++)
3359                 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3360 }
3361 
3362 static void __init dcache_init(void)
3363 {
3364         unsigned int loop;
3365 
3366         /* 
3367          * A constructor could be added for stable state like the lists,
3368          * but it is probably not worth it because of the cache nature
3369          * of the dcache. 
3370          */
3371         dentry_cache = KMEM_CACHE(dentry,
3372                 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3373 
3374         /* Hash may have been set up in dcache_init_early */
3375         if (!hashdist)
3376                 return;
3377 
3378         dentry_hashtable =
3379                 alloc_large_system_hash("Dentry cache",
3380                                         sizeof(struct hlist_bl_head),
3381                                         dhash_entries,
3382                                         13,
3383                                         0,
3384                                         &d_hash_shift,
3385                                         &d_hash_mask,
3386                                         0,
3387                                         0);
3388 
3389         for (loop = 0; loop < (1U << d_hash_shift); loop++)
3390                 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3391 }
3392 
3393 /* SLAB cache for __getname() consumers */
3394 struct kmem_cache *names_cachep __read_mostly;
3395 EXPORT_SYMBOL(names_cachep);
3396 
3397 EXPORT_SYMBOL(d_genocide);
3398 
3399 void __init vfs_caches_init_early(void)
3400 {
3401         dcache_init_early();
3402         inode_init_early();
3403 }
3404 
3405 void __init vfs_caches_init(unsigned long mempages)
3406 {
3407         unsigned long reserve;
3408 
3409         /* Base hash sizes on available memory, with a reserve equal to
3410            150% of current kernel size */
3411 
3412         reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3413         mempages -= reserve;
3414 
3415         names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3416                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3417 
3418         dcache_init();
3419         inode_init();
3420         files_init(mempages);
3421         mnt_init();
3422         bdev_cache_init();
3423         chrdev_init();
3424 }
3425 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us