Version:  2.0.40 2.2.26 2.4.37 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7

Linux/fs/dcache.c

  1 /*
  2  * fs/dcache.c
  3  *
  4  * Complete reimplementation
  5  * (C) 1997 Thomas Schoebel-Theuer,
  6  * with heavy changes by Linus Torvalds
  7  */
  8 
  9 /*
 10  * Notes on the allocation strategy:
 11  *
 12  * The dcache is a master of the icache - whenever a dcache entry
 13  * exists, the inode will always exist. "iput()" is done either when
 14  * the dcache entry is deleted or garbage collected.
 15  */
 16 
 17 #include <linux/syscalls.h>
 18 #include <linux/string.h>
 19 #include <linux/mm.h>
 20 #include <linux/fs.h>
 21 #include <linux/fsnotify.h>
 22 #include <linux/slab.h>
 23 #include <linux/init.h>
 24 #include <linux/hash.h>
 25 #include <linux/cache.h>
 26 #include <linux/export.h>
 27 #include <linux/mount.h>
 28 #include <linux/file.h>
 29 #include <asm/uaccess.h>
 30 #include <linux/security.h>
 31 #include <linux/seqlock.h>
 32 #include <linux/swap.h>
 33 #include <linux/bootmem.h>
 34 #include <linux/fs_struct.h>
 35 #include <linux/hardirq.h>
 36 #include <linux/bit_spinlock.h>
 37 #include <linux/rculist_bl.h>
 38 #include <linux/prefetch.h>
 39 #include <linux/ratelimit.h>
 40 #include <linux/list_lru.h>
 41 #include <linux/kasan.h>
 42 
 43 #include "internal.h"
 44 #include "mount.h"
 45 
 46 /*
 47  * Usage:
 48  * dcache->d_inode->i_lock protects:
 49  *   - i_dentry, d_u.d_alias, d_inode of aliases
 50  * dcache_hash_bucket lock protects:
 51  *   - the dcache hash table
 52  * s_anon bl list spinlock protects:
 53  *   - the s_anon list (see __d_drop)
 54  * dentry->d_sb->s_dentry_lru_lock protects:
 55  *   - the dcache lru lists and counters
 56  * d_lock protects:
 57  *   - d_flags
 58  *   - d_name
 59  *   - d_lru
 60  *   - d_count
 61  *   - d_unhashed()
 62  *   - d_parent and d_subdirs
 63  *   - childrens' d_child and d_parent
 64  *   - d_u.d_alias, d_inode
 65  *
 66  * Ordering:
 67  * dentry->d_inode->i_lock
 68  *   dentry->d_lock
 69  *     dentry->d_sb->s_dentry_lru_lock
 70  *     dcache_hash_bucket lock
 71  *     s_anon lock
 72  *
 73  * If there is an ancestor relationship:
 74  * dentry->d_parent->...->d_parent->d_lock
 75  *   ...
 76  *     dentry->d_parent->d_lock
 77  *       dentry->d_lock
 78  *
 79  * If no ancestor relationship:
 80  * if (dentry1 < dentry2)
 81  *   dentry1->d_lock
 82  *     dentry2->d_lock
 83  */
 84 int sysctl_vfs_cache_pressure __read_mostly = 100;
 85 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 86 
 87 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 88 
 89 EXPORT_SYMBOL(rename_lock);
 90 
 91 static struct kmem_cache *dentry_cache __read_mostly;
 92 
 93 /*
 94  * This is the single most critical data structure when it comes
 95  * to the dcache: the hashtable for lookups. Somebody should try
 96  * to make this good - I've just made it work.
 97  *
 98  * This hash-function tries to avoid losing too many bits of hash
 99  * information, yet avoid using a prime hash-size or similar.
100  */
101 
102 static unsigned int d_hash_mask __read_mostly;
103 static unsigned int d_hash_shift __read_mostly;
104 
105 static struct hlist_bl_head *dentry_hashtable __read_mostly;
106 
107 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
108                                         unsigned int hash)
109 {
110         hash += (unsigned long) parent / L1_CACHE_BYTES;
111         return dentry_hashtable + hash_32(hash, d_hash_shift);
112 }
113 
114 #define IN_LOOKUP_SHIFT 10
115 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
116 
117 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
118                                         unsigned int hash)
119 {
120         hash += (unsigned long) parent / L1_CACHE_BYTES;
121         return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
122 }
123 
124 
125 /* Statistics gathering. */
126 struct dentry_stat_t dentry_stat = {
127         .age_limit = 45,
128 };
129 
130 static DEFINE_PER_CPU(long, nr_dentry);
131 static DEFINE_PER_CPU(long, nr_dentry_unused);
132 
133 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
134 
135 /*
136  * Here we resort to our own counters instead of using generic per-cpu counters
137  * for consistency with what the vfs inode code does. We are expected to harvest
138  * better code and performance by having our own specialized counters.
139  *
140  * Please note that the loop is done over all possible CPUs, not over all online
141  * CPUs. The reason for this is that we don't want to play games with CPUs going
142  * on and off. If one of them goes off, we will just keep their counters.
143  *
144  * glommer: See cffbc8a for details, and if you ever intend to change this,
145  * please update all vfs counters to match.
146  */
147 static long get_nr_dentry(void)
148 {
149         int i;
150         long sum = 0;
151         for_each_possible_cpu(i)
152                 sum += per_cpu(nr_dentry, i);
153         return sum < 0 ? 0 : sum;
154 }
155 
156 static long get_nr_dentry_unused(void)
157 {
158         int i;
159         long sum = 0;
160         for_each_possible_cpu(i)
161                 sum += per_cpu(nr_dentry_unused, i);
162         return sum < 0 ? 0 : sum;
163 }
164 
165 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
166                    size_t *lenp, loff_t *ppos)
167 {
168         dentry_stat.nr_dentry = get_nr_dentry();
169         dentry_stat.nr_unused = get_nr_dentry_unused();
170         return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
171 }
172 #endif
173 
174 /*
175  * Compare 2 name strings, return 0 if they match, otherwise non-zero.
176  * The strings are both count bytes long, and count is non-zero.
177  */
178 #ifdef CONFIG_DCACHE_WORD_ACCESS
179 
180 #include <asm/word-at-a-time.h>
181 /*
182  * NOTE! 'cs' and 'scount' come from a dentry, so it has a
183  * aligned allocation for this particular component. We don't
184  * strictly need the load_unaligned_zeropad() safety, but it
185  * doesn't hurt either.
186  *
187  * In contrast, 'ct' and 'tcount' can be from a pathname, and do
188  * need the careful unaligned handling.
189  */
190 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
191 {
192         unsigned long a,b,mask;
193 
194         for (;;) {
195                 a = *(unsigned long *)cs;
196                 b = load_unaligned_zeropad(ct);
197                 if (tcount < sizeof(unsigned long))
198                         break;
199                 if (unlikely(a != b))
200                         return 1;
201                 cs += sizeof(unsigned long);
202                 ct += sizeof(unsigned long);
203                 tcount -= sizeof(unsigned long);
204                 if (!tcount)
205                         return 0;
206         }
207         mask = bytemask_from_count(tcount);
208         return unlikely(!!((a ^ b) & mask));
209 }
210 
211 #else
212 
213 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
214 {
215         do {
216                 if (*cs != *ct)
217                         return 1;
218                 cs++;
219                 ct++;
220                 tcount--;
221         } while (tcount);
222         return 0;
223 }
224 
225 #endif
226 
227 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
228 {
229         const unsigned char *cs;
230         /*
231          * Be careful about RCU walk racing with rename:
232          * use ACCESS_ONCE to fetch the name pointer.
233          *
234          * NOTE! Even if a rename will mean that the length
235          * was not loaded atomically, we don't care. The
236          * RCU walk will check the sequence count eventually,
237          * and catch it. And we won't overrun the buffer,
238          * because we're reading the name pointer atomically,
239          * and a dentry name is guaranteed to be properly
240          * terminated with a NUL byte.
241          *
242          * End result: even if 'len' is wrong, we'll exit
243          * early because the data cannot match (there can
244          * be no NUL in the ct/tcount data)
245          */
246         cs = ACCESS_ONCE(dentry->d_name.name);
247         smp_read_barrier_depends();
248         return dentry_string_cmp(cs, ct, tcount);
249 }
250 
251 struct external_name {
252         union {
253                 atomic_t count;
254                 struct rcu_head head;
255         } u;
256         unsigned char name[];
257 };
258 
259 static inline struct external_name *external_name(struct dentry *dentry)
260 {
261         return container_of(dentry->d_name.name, struct external_name, name[0]);
262 }
263 
264 static void __d_free(struct rcu_head *head)
265 {
266         struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
267 
268         kmem_cache_free(dentry_cache, dentry); 
269 }
270 
271 static void __d_free_external(struct rcu_head *head)
272 {
273         struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
274         kfree(external_name(dentry));
275         kmem_cache_free(dentry_cache, dentry); 
276 }
277 
278 static inline int dname_external(const struct dentry *dentry)
279 {
280         return dentry->d_name.name != dentry->d_iname;
281 }
282 
283 static inline void __d_set_inode_and_type(struct dentry *dentry,
284                                           struct inode *inode,
285                                           unsigned type_flags)
286 {
287         unsigned flags;
288 
289         dentry->d_inode = inode;
290         flags = READ_ONCE(dentry->d_flags);
291         flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
292         flags |= type_flags;
293         WRITE_ONCE(dentry->d_flags, flags);
294 }
295 
296 static inline void __d_clear_type_and_inode(struct dentry *dentry)
297 {
298         unsigned flags = READ_ONCE(dentry->d_flags);
299 
300         flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
301         WRITE_ONCE(dentry->d_flags, flags);
302         dentry->d_inode = NULL;
303 }
304 
305 static void dentry_free(struct dentry *dentry)
306 {
307         WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
308         if (unlikely(dname_external(dentry))) {
309                 struct external_name *p = external_name(dentry);
310                 if (likely(atomic_dec_and_test(&p->u.count))) {
311                         call_rcu(&dentry->d_u.d_rcu, __d_free_external);
312                         return;
313                 }
314         }
315         /* if dentry was never visible to RCU, immediate free is OK */
316         if (!(dentry->d_flags & DCACHE_RCUACCESS))
317                 __d_free(&dentry->d_u.d_rcu);
318         else
319                 call_rcu(&dentry->d_u.d_rcu, __d_free);
320 }
321 
322 /**
323  * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups
324  * @dentry: the target dentry
325  * After this call, in-progress rcu-walk path lookup will fail. This
326  * should be called after unhashing, and after changing d_inode (if
327  * the dentry has not already been unhashed).
328  */
329 static inline void dentry_rcuwalk_invalidate(struct dentry *dentry)
330 {
331         lockdep_assert_held(&dentry->d_lock);
332         /* Go through am invalidation barrier */
333         write_seqcount_invalidate(&dentry->d_seq);
334 }
335 
336 /*
337  * Release the dentry's inode, using the filesystem
338  * d_iput() operation if defined. Dentry has no refcount
339  * and is unhashed.
340  */
341 static void dentry_iput(struct dentry * dentry)
342         __releases(dentry->d_lock)
343         __releases(dentry->d_inode->i_lock)
344 {
345         struct inode *inode = dentry->d_inode;
346         if (inode) {
347                 __d_clear_type_and_inode(dentry);
348                 hlist_del_init(&dentry->d_u.d_alias);
349                 spin_unlock(&dentry->d_lock);
350                 spin_unlock(&inode->i_lock);
351                 if (!inode->i_nlink)
352                         fsnotify_inoderemove(inode);
353                 if (dentry->d_op && dentry->d_op->d_iput)
354                         dentry->d_op->d_iput(dentry, inode);
355                 else
356                         iput(inode);
357         } else {
358                 spin_unlock(&dentry->d_lock);
359         }
360 }
361 
362 /*
363  * Release the dentry's inode, using the filesystem
364  * d_iput() operation if defined. dentry remains in-use.
365  */
366 static void dentry_unlink_inode(struct dentry * dentry)
367         __releases(dentry->d_lock)
368         __releases(dentry->d_inode->i_lock)
369 {
370         struct inode *inode = dentry->d_inode;
371 
372         raw_write_seqcount_begin(&dentry->d_seq);
373         __d_clear_type_and_inode(dentry);
374         hlist_del_init(&dentry->d_u.d_alias);
375         raw_write_seqcount_end(&dentry->d_seq);
376         spin_unlock(&dentry->d_lock);
377         spin_unlock(&inode->i_lock);
378         if (!inode->i_nlink)
379                 fsnotify_inoderemove(inode);
380         if (dentry->d_op && dentry->d_op->d_iput)
381                 dentry->d_op->d_iput(dentry, inode);
382         else
383                 iput(inode);
384 }
385 
386 /*
387  * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
388  * is in use - which includes both the "real" per-superblock
389  * LRU list _and_ the DCACHE_SHRINK_LIST use.
390  *
391  * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
392  * on the shrink list (ie not on the superblock LRU list).
393  *
394  * The per-cpu "nr_dentry_unused" counters are updated with
395  * the DCACHE_LRU_LIST bit.
396  *
397  * These helper functions make sure we always follow the
398  * rules. d_lock must be held by the caller.
399  */
400 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
401 static void d_lru_add(struct dentry *dentry)
402 {
403         D_FLAG_VERIFY(dentry, 0);
404         dentry->d_flags |= DCACHE_LRU_LIST;
405         this_cpu_inc(nr_dentry_unused);
406         WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
407 }
408 
409 static void d_lru_del(struct dentry *dentry)
410 {
411         D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
412         dentry->d_flags &= ~DCACHE_LRU_LIST;
413         this_cpu_dec(nr_dentry_unused);
414         WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
415 }
416 
417 static void d_shrink_del(struct dentry *dentry)
418 {
419         D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
420         list_del_init(&dentry->d_lru);
421         dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
422         this_cpu_dec(nr_dentry_unused);
423 }
424 
425 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
426 {
427         D_FLAG_VERIFY(dentry, 0);
428         list_add(&dentry->d_lru, list);
429         dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
430         this_cpu_inc(nr_dentry_unused);
431 }
432 
433 /*
434  * These can only be called under the global LRU lock, ie during the
435  * callback for freeing the LRU list. "isolate" removes it from the
436  * LRU lists entirely, while shrink_move moves it to the indicated
437  * private list.
438  */
439 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
440 {
441         D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
442         dentry->d_flags &= ~DCACHE_LRU_LIST;
443         this_cpu_dec(nr_dentry_unused);
444         list_lru_isolate(lru, &dentry->d_lru);
445 }
446 
447 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
448                               struct list_head *list)
449 {
450         D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
451         dentry->d_flags |= DCACHE_SHRINK_LIST;
452         list_lru_isolate_move(lru, &dentry->d_lru, list);
453 }
454 
455 /*
456  * dentry_lru_(add|del)_list) must be called with d_lock held.
457  */
458 static void dentry_lru_add(struct dentry *dentry)
459 {
460         if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
461                 d_lru_add(dentry);
462 }
463 
464 /**
465  * d_drop - drop a dentry
466  * @dentry: dentry to drop
467  *
468  * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
469  * be found through a VFS lookup any more. Note that this is different from
470  * deleting the dentry - d_delete will try to mark the dentry negative if
471  * possible, giving a successful _negative_ lookup, while d_drop will
472  * just make the cache lookup fail.
473  *
474  * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
475  * reason (NFS timeouts or autofs deletes).
476  *
477  * __d_drop requires dentry->d_lock.
478  */
479 void __d_drop(struct dentry *dentry)
480 {
481         if (!d_unhashed(dentry)) {
482                 struct hlist_bl_head *b;
483                 /*
484                  * Hashed dentries are normally on the dentry hashtable,
485                  * with the exception of those newly allocated by
486                  * d_obtain_alias, which are always IS_ROOT:
487                  */
488                 if (unlikely(IS_ROOT(dentry)))
489                         b = &dentry->d_sb->s_anon;
490                 else
491                         b = d_hash(dentry->d_parent, dentry->d_name.hash);
492 
493                 hlist_bl_lock(b);
494                 __hlist_bl_del(&dentry->d_hash);
495                 dentry->d_hash.pprev = NULL;
496                 hlist_bl_unlock(b);
497                 dentry_rcuwalk_invalidate(dentry);
498         }
499 }
500 EXPORT_SYMBOL(__d_drop);
501 
502 void d_drop(struct dentry *dentry)
503 {
504         spin_lock(&dentry->d_lock);
505         __d_drop(dentry);
506         spin_unlock(&dentry->d_lock);
507 }
508 EXPORT_SYMBOL(d_drop);
509 
510 static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
511 {
512         struct dentry *next;
513         /*
514          * Inform d_walk() and shrink_dentry_list() that we are no longer
515          * attached to the dentry tree
516          */
517         dentry->d_flags |= DCACHE_DENTRY_KILLED;
518         if (unlikely(list_empty(&dentry->d_child)))
519                 return;
520         __list_del_entry(&dentry->d_child);
521         /*
522          * Cursors can move around the list of children.  While we'd been
523          * a normal list member, it didn't matter - ->d_child.next would've
524          * been updated.  However, from now on it won't be and for the
525          * things like d_walk() it might end up with a nasty surprise.
526          * Normally d_walk() doesn't care about cursors moving around -
527          * ->d_lock on parent prevents that and since a cursor has no children
528          * of its own, we get through it without ever unlocking the parent.
529          * There is one exception, though - if we ascend from a child that
530          * gets killed as soon as we unlock it, the next sibling is found
531          * using the value left in its ->d_child.next.  And if _that_
532          * pointed to a cursor, and cursor got moved (e.g. by lseek())
533          * before d_walk() regains parent->d_lock, we'll end up skipping
534          * everything the cursor had been moved past.
535          *
536          * Solution: make sure that the pointer left behind in ->d_child.next
537          * points to something that won't be moving around.  I.e. skip the
538          * cursors.
539          */
540         while (dentry->d_child.next != &parent->d_subdirs) {
541                 next = list_entry(dentry->d_child.next, struct dentry, d_child);
542                 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
543                         break;
544                 dentry->d_child.next = next->d_child.next;
545         }
546 }
547 
548 static void __dentry_kill(struct dentry *dentry)
549 {
550         struct dentry *parent = NULL;
551         bool can_free = true;
552         if (!IS_ROOT(dentry))
553                 parent = dentry->d_parent;
554 
555         /*
556          * The dentry is now unrecoverably dead to the world.
557          */
558         lockref_mark_dead(&dentry->d_lockref);
559 
560         /*
561          * inform the fs via d_prune that this dentry is about to be
562          * unhashed and destroyed.
563          */
564         if (dentry->d_flags & DCACHE_OP_PRUNE)
565                 dentry->d_op->d_prune(dentry);
566 
567         if (dentry->d_flags & DCACHE_LRU_LIST) {
568                 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
569                         d_lru_del(dentry);
570         }
571         /* if it was on the hash then remove it */
572         __d_drop(dentry);
573         dentry_unlist(dentry, parent);
574         if (parent)
575                 spin_unlock(&parent->d_lock);
576         dentry_iput(dentry);
577         /*
578          * dentry_iput drops the locks, at which point nobody (except
579          * transient RCU lookups) can reach this dentry.
580          */
581         BUG_ON(dentry->d_lockref.count > 0);
582         this_cpu_dec(nr_dentry);
583         if (dentry->d_op && dentry->d_op->d_release)
584                 dentry->d_op->d_release(dentry);
585 
586         spin_lock(&dentry->d_lock);
587         if (dentry->d_flags & DCACHE_SHRINK_LIST) {
588                 dentry->d_flags |= DCACHE_MAY_FREE;
589                 can_free = false;
590         }
591         spin_unlock(&dentry->d_lock);
592         if (likely(can_free))
593                 dentry_free(dentry);
594 }
595 
596 /*
597  * Finish off a dentry we've decided to kill.
598  * dentry->d_lock must be held, returns with it unlocked.
599  * If ref is non-zero, then decrement the refcount too.
600  * Returns dentry requiring refcount drop, or NULL if we're done.
601  */
602 static struct dentry *dentry_kill(struct dentry *dentry)
603         __releases(dentry->d_lock)
604 {
605         struct inode *inode = dentry->d_inode;
606         struct dentry *parent = NULL;
607 
608         if (inode && unlikely(!spin_trylock(&inode->i_lock)))
609                 goto failed;
610 
611         if (!IS_ROOT(dentry)) {
612                 parent = dentry->d_parent;
613                 if (unlikely(!spin_trylock(&parent->d_lock))) {
614                         if (inode)
615                                 spin_unlock(&inode->i_lock);
616                         goto failed;
617                 }
618         }
619 
620         __dentry_kill(dentry);
621         return parent;
622 
623 failed:
624         spin_unlock(&dentry->d_lock);
625         cpu_relax();
626         return dentry; /* try again with same dentry */
627 }
628 
629 static inline struct dentry *lock_parent(struct dentry *dentry)
630 {
631         struct dentry *parent = dentry->d_parent;
632         if (IS_ROOT(dentry))
633                 return NULL;
634         if (unlikely(dentry->d_lockref.count < 0))
635                 return NULL;
636         if (likely(spin_trylock(&parent->d_lock)))
637                 return parent;
638         rcu_read_lock();
639         spin_unlock(&dentry->d_lock);
640 again:
641         parent = ACCESS_ONCE(dentry->d_parent);
642         spin_lock(&parent->d_lock);
643         /*
644          * We can't blindly lock dentry until we are sure
645          * that we won't violate the locking order.
646          * Any changes of dentry->d_parent must have
647          * been done with parent->d_lock held, so
648          * spin_lock() above is enough of a barrier
649          * for checking if it's still our child.
650          */
651         if (unlikely(parent != dentry->d_parent)) {
652                 spin_unlock(&parent->d_lock);
653                 goto again;
654         }
655         rcu_read_unlock();
656         if (parent != dentry)
657                 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
658         else
659                 parent = NULL;
660         return parent;
661 }
662 
663 /*
664  * Try to do a lockless dput(), and return whether that was successful.
665  *
666  * If unsuccessful, we return false, having already taken the dentry lock.
667  *
668  * The caller needs to hold the RCU read lock, so that the dentry is
669  * guaranteed to stay around even if the refcount goes down to zero!
670  */
671 static inline bool fast_dput(struct dentry *dentry)
672 {
673         int ret;
674         unsigned int d_flags;
675 
676         /*
677          * If we have a d_op->d_delete() operation, we sould not
678          * let the dentry count go to zero, so use "put_or_lock".
679          */
680         if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
681                 return lockref_put_or_lock(&dentry->d_lockref);
682 
683         /*
684          * .. otherwise, we can try to just decrement the
685          * lockref optimistically.
686          */
687         ret = lockref_put_return(&dentry->d_lockref);
688 
689         /*
690          * If the lockref_put_return() failed due to the lock being held
691          * by somebody else, the fast path has failed. We will need to
692          * get the lock, and then check the count again.
693          */
694         if (unlikely(ret < 0)) {
695                 spin_lock(&dentry->d_lock);
696                 if (dentry->d_lockref.count > 1) {
697                         dentry->d_lockref.count--;
698                         spin_unlock(&dentry->d_lock);
699                         return 1;
700                 }
701                 return 0;
702         }
703 
704         /*
705          * If we weren't the last ref, we're done.
706          */
707         if (ret)
708                 return 1;
709 
710         /*
711          * Careful, careful. The reference count went down
712          * to zero, but we don't hold the dentry lock, so
713          * somebody else could get it again, and do another
714          * dput(), and we need to not race with that.
715          *
716          * However, there is a very special and common case
717          * where we don't care, because there is nothing to
718          * do: the dentry is still hashed, it does not have
719          * a 'delete' op, and it's referenced and already on
720          * the LRU list.
721          *
722          * NOTE! Since we aren't locked, these values are
723          * not "stable". However, it is sufficient that at
724          * some point after we dropped the reference the
725          * dentry was hashed and the flags had the proper
726          * value. Other dentry users may have re-gotten
727          * a reference to the dentry and change that, but
728          * our work is done - we can leave the dentry
729          * around with a zero refcount.
730          */
731         smp_rmb();
732         d_flags = ACCESS_ONCE(dentry->d_flags);
733         d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
734 
735         /* Nothing to do? Dropping the reference was all we needed? */
736         if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
737                 return 1;
738 
739         /*
740          * Not the fast normal case? Get the lock. We've already decremented
741          * the refcount, but we'll need to re-check the situation after
742          * getting the lock.
743          */
744         spin_lock(&dentry->d_lock);
745 
746         /*
747          * Did somebody else grab a reference to it in the meantime, and
748          * we're no longer the last user after all? Alternatively, somebody
749          * else could have killed it and marked it dead. Either way, we
750          * don't need to do anything else.
751          */
752         if (dentry->d_lockref.count) {
753                 spin_unlock(&dentry->d_lock);
754                 return 1;
755         }
756 
757         /*
758          * Re-get the reference we optimistically dropped. We hold the
759          * lock, and we just tested that it was zero, so we can just
760          * set it to 1.
761          */
762         dentry->d_lockref.count = 1;
763         return 0;
764 }
765 
766 
767 /* 
768  * This is dput
769  *
770  * This is complicated by the fact that we do not want to put
771  * dentries that are no longer on any hash chain on the unused
772  * list: we'd much rather just get rid of them immediately.
773  *
774  * However, that implies that we have to traverse the dentry
775  * tree upwards to the parents which might _also_ now be
776  * scheduled for deletion (it may have been only waiting for
777  * its last child to go away).
778  *
779  * This tail recursion is done by hand as we don't want to depend
780  * on the compiler to always get this right (gcc generally doesn't).
781  * Real recursion would eat up our stack space.
782  */
783 
784 /*
785  * dput - release a dentry
786  * @dentry: dentry to release 
787  *
788  * Release a dentry. This will drop the usage count and if appropriate
789  * call the dentry unlink method as well as removing it from the queues and
790  * releasing its resources. If the parent dentries were scheduled for release
791  * they too may now get deleted.
792  */
793 void dput(struct dentry *dentry)
794 {
795         if (unlikely(!dentry))
796                 return;
797 
798 repeat:
799         rcu_read_lock();
800         if (likely(fast_dput(dentry))) {
801                 rcu_read_unlock();
802                 return;
803         }
804 
805         /* Slow case: now with the dentry lock held */
806         rcu_read_unlock();
807 
808         WARN_ON(d_in_lookup(dentry));
809 
810         /* Unreachable? Get rid of it */
811         if (unlikely(d_unhashed(dentry)))
812                 goto kill_it;
813 
814         if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
815                 goto kill_it;
816 
817         if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
818                 if (dentry->d_op->d_delete(dentry))
819                         goto kill_it;
820         }
821 
822         if (!(dentry->d_flags & DCACHE_REFERENCED))
823                 dentry->d_flags |= DCACHE_REFERENCED;
824         dentry_lru_add(dentry);
825 
826         dentry->d_lockref.count--;
827         spin_unlock(&dentry->d_lock);
828         return;
829 
830 kill_it:
831         dentry = dentry_kill(dentry);
832         if (dentry)
833                 goto repeat;
834 }
835 EXPORT_SYMBOL(dput);
836 
837 
838 /* This must be called with d_lock held */
839 static inline void __dget_dlock(struct dentry *dentry)
840 {
841         dentry->d_lockref.count++;
842 }
843 
844 static inline void __dget(struct dentry *dentry)
845 {
846         lockref_get(&dentry->d_lockref);
847 }
848 
849 struct dentry *dget_parent(struct dentry *dentry)
850 {
851         int gotref;
852         struct dentry *ret;
853 
854         /*
855          * Do optimistic parent lookup without any
856          * locking.
857          */
858         rcu_read_lock();
859         ret = ACCESS_ONCE(dentry->d_parent);
860         gotref = lockref_get_not_zero(&ret->d_lockref);
861         rcu_read_unlock();
862         if (likely(gotref)) {
863                 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
864                         return ret;
865                 dput(ret);
866         }
867 
868 repeat:
869         /*
870          * Don't need rcu_dereference because we re-check it was correct under
871          * the lock.
872          */
873         rcu_read_lock();
874         ret = dentry->d_parent;
875         spin_lock(&ret->d_lock);
876         if (unlikely(ret != dentry->d_parent)) {
877                 spin_unlock(&ret->d_lock);
878                 rcu_read_unlock();
879                 goto repeat;
880         }
881         rcu_read_unlock();
882         BUG_ON(!ret->d_lockref.count);
883         ret->d_lockref.count++;
884         spin_unlock(&ret->d_lock);
885         return ret;
886 }
887 EXPORT_SYMBOL(dget_parent);
888 
889 /**
890  * d_find_alias - grab a hashed alias of inode
891  * @inode: inode in question
892  *
893  * If inode has a hashed alias, or is a directory and has any alias,
894  * acquire the reference to alias and return it. Otherwise return NULL.
895  * Notice that if inode is a directory there can be only one alias and
896  * it can be unhashed only if it has no children, or if it is the root
897  * of a filesystem, or if the directory was renamed and d_revalidate
898  * was the first vfs operation to notice.
899  *
900  * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
901  * any other hashed alias over that one.
902  */
903 static struct dentry *__d_find_alias(struct inode *inode)
904 {
905         struct dentry *alias, *discon_alias;
906 
907 again:
908         discon_alias = NULL;
909         hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
910                 spin_lock(&alias->d_lock);
911                 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
912                         if (IS_ROOT(alias) &&
913                             (alias->d_flags & DCACHE_DISCONNECTED)) {
914                                 discon_alias = alias;
915                         } else {
916                                 __dget_dlock(alias);
917                                 spin_unlock(&alias->d_lock);
918                                 return alias;
919                         }
920                 }
921                 spin_unlock(&alias->d_lock);
922         }
923         if (discon_alias) {
924                 alias = discon_alias;
925                 spin_lock(&alias->d_lock);
926                 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
927                         __dget_dlock(alias);
928                         spin_unlock(&alias->d_lock);
929                         return alias;
930                 }
931                 spin_unlock(&alias->d_lock);
932                 goto again;
933         }
934         return NULL;
935 }
936 
937 struct dentry *d_find_alias(struct inode *inode)
938 {
939         struct dentry *de = NULL;
940 
941         if (!hlist_empty(&inode->i_dentry)) {
942                 spin_lock(&inode->i_lock);
943                 de = __d_find_alias(inode);
944                 spin_unlock(&inode->i_lock);
945         }
946         return de;
947 }
948 EXPORT_SYMBOL(d_find_alias);
949 
950 /*
951  *      Try to kill dentries associated with this inode.
952  * WARNING: you must own a reference to inode.
953  */
954 void d_prune_aliases(struct inode *inode)
955 {
956         struct dentry *dentry;
957 restart:
958         spin_lock(&inode->i_lock);
959         hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
960                 spin_lock(&dentry->d_lock);
961                 if (!dentry->d_lockref.count) {
962                         struct dentry *parent = lock_parent(dentry);
963                         if (likely(!dentry->d_lockref.count)) {
964                                 __dentry_kill(dentry);
965                                 dput(parent);
966                                 goto restart;
967                         }
968                         if (parent)
969                                 spin_unlock(&parent->d_lock);
970                 }
971                 spin_unlock(&dentry->d_lock);
972         }
973         spin_unlock(&inode->i_lock);
974 }
975 EXPORT_SYMBOL(d_prune_aliases);
976 
977 static void shrink_dentry_list(struct list_head *list)
978 {
979         struct dentry *dentry, *parent;
980 
981         while (!list_empty(list)) {
982                 struct inode *inode;
983                 dentry = list_entry(list->prev, struct dentry, d_lru);
984                 spin_lock(&dentry->d_lock);
985                 parent = lock_parent(dentry);
986 
987                 /*
988                  * The dispose list is isolated and dentries are not accounted
989                  * to the LRU here, so we can simply remove it from the list
990                  * here regardless of whether it is referenced or not.
991                  */
992                 d_shrink_del(dentry);
993 
994                 /*
995                  * We found an inuse dentry which was not removed from
996                  * the LRU because of laziness during lookup. Do not free it.
997                  */
998                 if (dentry->d_lockref.count > 0) {
999                         spin_unlock(&dentry->d_lock);
1000                         if (parent)
1001                                 spin_unlock(&parent->d_lock);
1002                         continue;
1003                 }
1004 
1005 
1006                 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
1007                         bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
1008                         spin_unlock(&dentry->d_lock);
1009                         if (parent)
1010                                 spin_unlock(&parent->d_lock);
1011                         if (can_free)
1012                                 dentry_free(dentry);
1013                         continue;
1014                 }
1015 
1016                 inode = dentry->d_inode;
1017                 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1018                         d_shrink_add(dentry, list);
1019                         spin_unlock(&dentry->d_lock);
1020                         if (parent)
1021                                 spin_unlock(&parent->d_lock);
1022                         continue;
1023                 }
1024 
1025                 __dentry_kill(dentry);
1026 
1027                 /*
1028                  * We need to prune ancestors too. This is necessary to prevent
1029                  * quadratic behavior of shrink_dcache_parent(), but is also
1030                  * expected to be beneficial in reducing dentry cache
1031                  * fragmentation.
1032                  */
1033                 dentry = parent;
1034                 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
1035                         parent = lock_parent(dentry);
1036                         if (dentry->d_lockref.count != 1) {
1037                                 dentry->d_lockref.count--;
1038                                 spin_unlock(&dentry->d_lock);
1039                                 if (parent)
1040                                         spin_unlock(&parent->d_lock);
1041                                 break;
1042                         }
1043                         inode = dentry->d_inode;        /* can't be NULL */
1044                         if (unlikely(!spin_trylock(&inode->i_lock))) {
1045                                 spin_unlock(&dentry->d_lock);
1046                                 if (parent)
1047                                         spin_unlock(&parent->d_lock);
1048                                 cpu_relax();
1049                                 continue;
1050                         }
1051                         __dentry_kill(dentry);
1052                         dentry = parent;
1053                 }
1054         }
1055 }
1056 
1057 static enum lru_status dentry_lru_isolate(struct list_head *item,
1058                 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1059 {
1060         struct list_head *freeable = arg;
1061         struct dentry   *dentry = container_of(item, struct dentry, d_lru);
1062 
1063 
1064         /*
1065          * we are inverting the lru lock/dentry->d_lock here,
1066          * so use a trylock. If we fail to get the lock, just skip
1067          * it
1068          */
1069         if (!spin_trylock(&dentry->d_lock))
1070                 return LRU_SKIP;
1071 
1072         /*
1073          * Referenced dentries are still in use. If they have active
1074          * counts, just remove them from the LRU. Otherwise give them
1075          * another pass through the LRU.
1076          */
1077         if (dentry->d_lockref.count) {
1078                 d_lru_isolate(lru, dentry);
1079                 spin_unlock(&dentry->d_lock);
1080                 return LRU_REMOVED;
1081         }
1082 
1083         if (dentry->d_flags & DCACHE_REFERENCED) {
1084                 dentry->d_flags &= ~DCACHE_REFERENCED;
1085                 spin_unlock(&dentry->d_lock);
1086 
1087                 /*
1088                  * The list move itself will be made by the common LRU code. At
1089                  * this point, we've dropped the dentry->d_lock but keep the
1090                  * lru lock. This is safe to do, since every list movement is
1091                  * protected by the lru lock even if both locks are held.
1092                  *
1093                  * This is guaranteed by the fact that all LRU management
1094                  * functions are intermediated by the LRU API calls like
1095                  * list_lru_add and list_lru_del. List movement in this file
1096                  * only ever occur through this functions or through callbacks
1097                  * like this one, that are called from the LRU API.
1098                  *
1099                  * The only exceptions to this are functions like
1100                  * shrink_dentry_list, and code that first checks for the
1101                  * DCACHE_SHRINK_LIST flag.  Those are guaranteed to be
1102                  * operating only with stack provided lists after they are
1103                  * properly isolated from the main list.  It is thus, always a
1104                  * local access.
1105                  */
1106                 return LRU_ROTATE;
1107         }
1108 
1109         d_lru_shrink_move(lru, dentry, freeable);
1110         spin_unlock(&dentry->d_lock);
1111 
1112         return LRU_REMOVED;
1113 }
1114 
1115 /**
1116  * prune_dcache_sb - shrink the dcache
1117  * @sb: superblock
1118  * @sc: shrink control, passed to list_lru_shrink_walk()
1119  *
1120  * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1121  * is done when we need more memory and called from the superblock shrinker
1122  * function.
1123  *
1124  * This function may fail to free any resources if all the dentries are in
1125  * use.
1126  */
1127 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1128 {
1129         LIST_HEAD(dispose);
1130         long freed;
1131 
1132         freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1133                                      dentry_lru_isolate, &dispose);
1134         shrink_dentry_list(&dispose);
1135         return freed;
1136 }
1137 
1138 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1139                 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1140 {
1141         struct list_head *freeable = arg;
1142         struct dentry   *dentry = container_of(item, struct dentry, d_lru);
1143 
1144         /*
1145          * we are inverting the lru lock/dentry->d_lock here,
1146          * so use a trylock. If we fail to get the lock, just skip
1147          * it
1148          */
1149         if (!spin_trylock(&dentry->d_lock))
1150                 return LRU_SKIP;
1151 
1152         d_lru_shrink_move(lru, dentry, freeable);
1153         spin_unlock(&dentry->d_lock);
1154 
1155         return LRU_REMOVED;
1156 }
1157 
1158 
1159 /**
1160  * shrink_dcache_sb - shrink dcache for a superblock
1161  * @sb: superblock
1162  *
1163  * Shrink the dcache for the specified super block. This is used to free
1164  * the dcache before unmounting a file system.
1165  */
1166 void shrink_dcache_sb(struct super_block *sb)
1167 {
1168         long freed;
1169 
1170         do {
1171                 LIST_HEAD(dispose);
1172 
1173                 freed = list_lru_walk(&sb->s_dentry_lru,
1174                         dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1175 
1176                 this_cpu_sub(nr_dentry_unused, freed);
1177                 shrink_dentry_list(&dispose);
1178         } while (freed > 0);
1179 }
1180 EXPORT_SYMBOL(shrink_dcache_sb);
1181 
1182 /**
1183  * enum d_walk_ret - action to talke during tree walk
1184  * @D_WALK_CONTINUE:    contrinue walk
1185  * @D_WALK_QUIT:        quit walk
1186  * @D_WALK_NORETRY:     quit when retry is needed
1187  * @D_WALK_SKIP:        skip this dentry and its children
1188  */
1189 enum d_walk_ret {
1190         D_WALK_CONTINUE,
1191         D_WALK_QUIT,
1192         D_WALK_NORETRY,
1193         D_WALK_SKIP,
1194 };
1195 
1196 /**
1197  * d_walk - walk the dentry tree
1198  * @parent:     start of walk
1199  * @data:       data passed to @enter() and @finish()
1200  * @enter:      callback when first entering the dentry
1201  * @finish:     callback when successfully finished the walk
1202  *
1203  * The @enter() and @finish() callbacks are called with d_lock held.
1204  */
1205 static void d_walk(struct dentry *parent, void *data,
1206                    enum d_walk_ret (*enter)(void *, struct dentry *),
1207                    void (*finish)(void *))
1208 {
1209         struct dentry *this_parent;
1210         struct list_head *next;
1211         unsigned seq = 0;
1212         enum d_walk_ret ret;
1213         bool retry = true;
1214 
1215 again:
1216         read_seqbegin_or_lock(&rename_lock, &seq);
1217         this_parent = parent;
1218         spin_lock(&this_parent->d_lock);
1219 
1220         ret = enter(data, this_parent);
1221         switch (ret) {
1222         case D_WALK_CONTINUE:
1223                 break;
1224         case D_WALK_QUIT:
1225         case D_WALK_SKIP:
1226                 goto out_unlock;
1227         case D_WALK_NORETRY:
1228                 retry = false;
1229                 break;
1230         }
1231 repeat:
1232         next = this_parent->d_subdirs.next;
1233 resume:
1234         while (next != &this_parent->d_subdirs) {
1235                 struct list_head *tmp = next;
1236                 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1237                 next = tmp->next;
1238 
1239                 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1240                         continue;
1241 
1242                 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1243 
1244                 ret = enter(data, dentry);
1245                 switch (ret) {
1246                 case D_WALK_CONTINUE:
1247                         break;
1248                 case D_WALK_QUIT:
1249                         spin_unlock(&dentry->d_lock);
1250                         goto out_unlock;
1251                 case D_WALK_NORETRY:
1252                         retry = false;
1253                         break;
1254                 case D_WALK_SKIP:
1255                         spin_unlock(&dentry->d_lock);
1256                         continue;
1257                 }
1258 
1259                 if (!list_empty(&dentry->d_subdirs)) {
1260                         spin_unlock(&this_parent->d_lock);
1261                         spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1262                         this_parent = dentry;
1263                         spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1264                         goto repeat;
1265                 }
1266                 spin_unlock(&dentry->d_lock);
1267         }
1268         /*
1269          * All done at this level ... ascend and resume the search.
1270          */
1271         rcu_read_lock();
1272 ascend:
1273         if (this_parent != parent) {
1274                 struct dentry *child = this_parent;
1275                 this_parent = child->d_parent;
1276 
1277                 spin_unlock(&child->d_lock);
1278                 spin_lock(&this_parent->d_lock);
1279 
1280                 /* might go back up the wrong parent if we have had a rename. */
1281                 if (need_seqretry(&rename_lock, seq))
1282                         goto rename_retry;
1283                 /* go into the first sibling still alive */
1284                 do {
1285                         next = child->d_child.next;
1286                         if (next == &this_parent->d_subdirs)
1287                                 goto ascend;
1288                         child = list_entry(next, struct dentry, d_child);
1289                 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1290                 rcu_read_unlock();
1291                 goto resume;
1292         }
1293         if (need_seqretry(&rename_lock, seq))
1294                 goto rename_retry;
1295         rcu_read_unlock();
1296         if (finish)
1297                 finish(data);
1298 
1299 out_unlock:
1300         spin_unlock(&this_parent->d_lock);
1301         done_seqretry(&rename_lock, seq);
1302         return;
1303 
1304 rename_retry:
1305         spin_unlock(&this_parent->d_lock);
1306         rcu_read_unlock();
1307         BUG_ON(seq & 1);
1308         if (!retry)
1309                 return;
1310         seq = 1;
1311         goto again;
1312 }
1313 
1314 /*
1315  * Search for at least 1 mount point in the dentry's subdirs.
1316  * We descend to the next level whenever the d_subdirs
1317  * list is non-empty and continue searching.
1318  */
1319 
1320 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1321 {
1322         int *ret = data;
1323         if (d_mountpoint(dentry)) {
1324                 *ret = 1;
1325                 return D_WALK_QUIT;
1326         }
1327         return D_WALK_CONTINUE;
1328 }
1329 
1330 /**
1331  * have_submounts - check for mounts over a dentry
1332  * @parent: dentry to check.
1333  *
1334  * Return true if the parent or its subdirectories contain
1335  * a mount point
1336  */
1337 int have_submounts(struct dentry *parent)
1338 {
1339         int ret = 0;
1340 
1341         d_walk(parent, &ret, check_mount, NULL);
1342 
1343         return ret;
1344 }
1345 EXPORT_SYMBOL(have_submounts);
1346 
1347 /*
1348  * Called by mount code to set a mountpoint and check if the mountpoint is
1349  * reachable (e.g. NFS can unhash a directory dentry and then the complete
1350  * subtree can become unreachable).
1351  *
1352  * Only one of d_invalidate() and d_set_mounted() must succeed.  For
1353  * this reason take rename_lock and d_lock on dentry and ancestors.
1354  */
1355 int d_set_mounted(struct dentry *dentry)
1356 {
1357         struct dentry *p;
1358         int ret = -ENOENT;
1359         write_seqlock(&rename_lock);
1360         for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1361                 /* Need exclusion wrt. d_invalidate() */
1362                 spin_lock(&p->d_lock);
1363                 if (unlikely(d_unhashed(p))) {
1364                         spin_unlock(&p->d_lock);
1365                         goto out;
1366                 }
1367                 spin_unlock(&p->d_lock);
1368         }
1369         spin_lock(&dentry->d_lock);
1370         if (!d_unlinked(dentry)) {
1371                 dentry->d_flags |= DCACHE_MOUNTED;
1372                 ret = 0;
1373         }
1374         spin_unlock(&dentry->d_lock);
1375 out:
1376         write_sequnlock(&rename_lock);
1377         return ret;
1378 }
1379 
1380 /*
1381  * Search the dentry child list of the specified parent,
1382  * and move any unused dentries to the end of the unused
1383  * list for prune_dcache(). We descend to the next level
1384  * whenever the d_subdirs list is non-empty and continue
1385  * searching.
1386  *
1387  * It returns zero iff there are no unused children,
1388  * otherwise  it returns the number of children moved to
1389  * the end of the unused list. This may not be the total
1390  * number of unused children, because select_parent can
1391  * drop the lock and return early due to latency
1392  * constraints.
1393  */
1394 
1395 struct select_data {
1396         struct dentry *start;
1397         struct list_head dispose;
1398         int found;
1399 };
1400 
1401 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1402 {
1403         struct select_data *data = _data;
1404         enum d_walk_ret ret = D_WALK_CONTINUE;
1405 
1406         if (data->start == dentry)
1407                 goto out;
1408 
1409         if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1410                 data->found++;
1411         } else {
1412                 if (dentry->d_flags & DCACHE_LRU_LIST)
1413                         d_lru_del(dentry);
1414                 if (!dentry->d_lockref.count) {
1415                         d_shrink_add(dentry, &data->dispose);
1416                         data->found++;
1417                 }
1418         }
1419         /*
1420          * We can return to the caller if we have found some (this
1421          * ensures forward progress). We'll be coming back to find
1422          * the rest.
1423          */
1424         if (!list_empty(&data->dispose))
1425                 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1426 out:
1427         return ret;
1428 }
1429 
1430 /**
1431  * shrink_dcache_parent - prune dcache
1432  * @parent: parent of entries to prune
1433  *
1434  * Prune the dcache to remove unused children of the parent dentry.
1435  */
1436 void shrink_dcache_parent(struct dentry *parent)
1437 {
1438         for (;;) {
1439                 struct select_data data;
1440 
1441                 INIT_LIST_HEAD(&data.dispose);
1442                 data.start = parent;
1443                 data.found = 0;
1444 
1445                 d_walk(parent, &data, select_collect, NULL);
1446                 if (!data.found)
1447                         break;
1448 
1449                 shrink_dentry_list(&data.dispose);
1450                 cond_resched();
1451         }
1452 }
1453 EXPORT_SYMBOL(shrink_dcache_parent);
1454 
1455 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1456 {
1457         /* it has busy descendents; complain about those instead */
1458         if (!list_empty(&dentry->d_subdirs))
1459                 return D_WALK_CONTINUE;
1460 
1461         /* root with refcount 1 is fine */
1462         if (dentry == _data && dentry->d_lockref.count == 1)
1463                 return D_WALK_CONTINUE;
1464 
1465         printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1466                         " still in use (%d) [unmount of %s %s]\n",
1467                        dentry,
1468                        dentry->d_inode ?
1469                        dentry->d_inode->i_ino : 0UL,
1470                        dentry,
1471                        dentry->d_lockref.count,
1472                        dentry->d_sb->s_type->name,
1473                        dentry->d_sb->s_id);
1474         WARN_ON(1);
1475         return D_WALK_CONTINUE;
1476 }
1477 
1478 static void do_one_tree(struct dentry *dentry)
1479 {
1480         shrink_dcache_parent(dentry);
1481         d_walk(dentry, dentry, umount_check, NULL);
1482         d_drop(dentry);
1483         dput(dentry);
1484 }
1485 
1486 /*
1487  * destroy the dentries attached to a superblock on unmounting
1488  */
1489 void shrink_dcache_for_umount(struct super_block *sb)
1490 {
1491         struct dentry *dentry;
1492 
1493         WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1494 
1495         dentry = sb->s_root;
1496         sb->s_root = NULL;
1497         do_one_tree(dentry);
1498 
1499         while (!hlist_bl_empty(&sb->s_anon)) {
1500                 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1501                 do_one_tree(dentry);
1502         }
1503 }
1504 
1505 struct detach_data {
1506         struct select_data select;
1507         struct dentry *mountpoint;
1508 };
1509 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1510 {
1511         struct detach_data *data = _data;
1512 
1513         if (d_mountpoint(dentry)) {
1514                 __dget_dlock(dentry);
1515                 data->mountpoint = dentry;
1516                 return D_WALK_QUIT;
1517         }
1518 
1519         return select_collect(&data->select, dentry);
1520 }
1521 
1522 static void check_and_drop(void *_data)
1523 {
1524         struct detach_data *data = _data;
1525 
1526         if (!data->mountpoint && !data->select.found)
1527                 __d_drop(data->select.start);
1528 }
1529 
1530 /**
1531  * d_invalidate - detach submounts, prune dcache, and drop
1532  * @dentry: dentry to invalidate (aka detach, prune and drop)
1533  *
1534  * no dcache lock.
1535  *
1536  * The final d_drop is done as an atomic operation relative to
1537  * rename_lock ensuring there are no races with d_set_mounted.  This
1538  * ensures there are no unhashed dentries on the path to a mountpoint.
1539  */
1540 void d_invalidate(struct dentry *dentry)
1541 {
1542         /*
1543          * If it's already been dropped, return OK.
1544          */
1545         spin_lock(&dentry->d_lock);
1546         if (d_unhashed(dentry)) {
1547                 spin_unlock(&dentry->d_lock);
1548                 return;
1549         }
1550         spin_unlock(&dentry->d_lock);
1551 
1552         /* Negative dentries can be dropped without further checks */
1553         if (!dentry->d_inode) {
1554                 d_drop(dentry);
1555                 return;
1556         }
1557 
1558         for (;;) {
1559                 struct detach_data data;
1560 
1561                 data.mountpoint = NULL;
1562                 INIT_LIST_HEAD(&data.select.dispose);
1563                 data.select.start = dentry;
1564                 data.select.found = 0;
1565 
1566                 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1567 
1568                 if (data.select.found)
1569                         shrink_dentry_list(&data.select.dispose);
1570 
1571                 if (data.mountpoint) {
1572                         detach_mounts(data.mountpoint);
1573                         dput(data.mountpoint);
1574                 }
1575 
1576                 if (!data.mountpoint && !data.select.found)
1577                         break;
1578 
1579                 cond_resched();
1580         }
1581 }
1582 EXPORT_SYMBOL(d_invalidate);
1583 
1584 /**
1585  * __d_alloc    -       allocate a dcache entry
1586  * @sb: filesystem it will belong to
1587  * @name: qstr of the name
1588  *
1589  * Allocates a dentry. It returns %NULL if there is insufficient memory
1590  * available. On a success the dentry is returned. The name passed in is
1591  * copied and the copy passed in may be reused after this call.
1592  */
1593  
1594 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1595 {
1596         struct dentry *dentry;
1597         char *dname;
1598 
1599         dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1600         if (!dentry)
1601                 return NULL;
1602 
1603         /*
1604          * We guarantee that the inline name is always NUL-terminated.
1605          * This way the memcpy() done by the name switching in rename
1606          * will still always have a NUL at the end, even if we might
1607          * be overwriting an internal NUL character
1608          */
1609         dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1610         if (unlikely(!name)) {
1611                 static const struct qstr anon = QSTR_INIT("/", 1);
1612                 name = &anon;
1613                 dname = dentry->d_iname;
1614         } else if (name->len > DNAME_INLINE_LEN-1) {
1615                 size_t size = offsetof(struct external_name, name[1]);
1616                 struct external_name *p = kmalloc(size + name->len,
1617                                                   GFP_KERNEL_ACCOUNT);
1618                 if (!p) {
1619                         kmem_cache_free(dentry_cache, dentry); 
1620                         return NULL;
1621                 }
1622                 atomic_set(&p->u.count, 1);
1623                 dname = p->name;
1624                 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1625                         kasan_unpoison_shadow(dname,
1626                                 round_up(name->len + 1, sizeof(unsigned long)));
1627         } else  {
1628                 dname = dentry->d_iname;
1629         }       
1630 
1631         dentry->d_name.len = name->len;
1632         dentry->d_name.hash = name->hash;
1633         memcpy(dname, name->name, name->len);
1634         dname[name->len] = 0;
1635 
1636         /* Make sure we always see the terminating NUL character */
1637         smp_wmb();
1638         dentry->d_name.name = dname;
1639 
1640         dentry->d_lockref.count = 1;
1641         dentry->d_flags = 0;
1642         spin_lock_init(&dentry->d_lock);
1643         seqcount_init(&dentry->d_seq);
1644         dentry->d_inode = NULL;
1645         dentry->d_parent = dentry;
1646         dentry->d_sb = sb;
1647         dentry->d_op = NULL;
1648         dentry->d_fsdata = NULL;
1649         INIT_HLIST_BL_NODE(&dentry->d_hash);
1650         INIT_LIST_HEAD(&dentry->d_lru);
1651         INIT_LIST_HEAD(&dentry->d_subdirs);
1652         INIT_HLIST_NODE(&dentry->d_u.d_alias);
1653         INIT_LIST_HEAD(&dentry->d_child);
1654         d_set_d_op(dentry, dentry->d_sb->s_d_op);
1655 
1656         this_cpu_inc(nr_dentry);
1657 
1658         return dentry;
1659 }
1660 
1661 /**
1662  * d_alloc      -       allocate a dcache entry
1663  * @parent: parent of entry to allocate
1664  * @name: qstr of the name
1665  *
1666  * Allocates a dentry. It returns %NULL if there is insufficient memory
1667  * available. On a success the dentry is returned. The name passed in is
1668  * copied and the copy passed in may be reused after this call.
1669  */
1670 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1671 {
1672         struct dentry *dentry = __d_alloc(parent->d_sb, name);
1673         if (!dentry)
1674                 return NULL;
1675         dentry->d_flags |= DCACHE_RCUACCESS;
1676         spin_lock(&parent->d_lock);
1677         /*
1678          * don't need child lock because it is not subject
1679          * to concurrency here
1680          */
1681         __dget_dlock(parent);
1682         dentry->d_parent = parent;
1683         list_add(&dentry->d_child, &parent->d_subdirs);
1684         spin_unlock(&parent->d_lock);
1685 
1686         return dentry;
1687 }
1688 EXPORT_SYMBOL(d_alloc);
1689 
1690 struct dentry *d_alloc_cursor(struct dentry * parent)
1691 {
1692         struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
1693         if (dentry) {
1694                 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1695                 dentry->d_parent = dget(parent);
1696         }
1697         return dentry;
1698 }
1699 
1700 /**
1701  * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1702  * @sb: the superblock
1703  * @name: qstr of the name
1704  *
1705  * For a filesystem that just pins its dentries in memory and never
1706  * performs lookups at all, return an unhashed IS_ROOT dentry.
1707  */
1708 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1709 {
1710         return __d_alloc(sb, name);
1711 }
1712 EXPORT_SYMBOL(d_alloc_pseudo);
1713 
1714 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1715 {
1716         struct qstr q;
1717 
1718         q.name = name;
1719         q.hash_len = hashlen_string(name);
1720         return d_alloc(parent, &q);
1721 }
1722 EXPORT_SYMBOL(d_alloc_name);
1723 
1724 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1725 {
1726         WARN_ON_ONCE(dentry->d_op);
1727         WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH  |
1728                                 DCACHE_OP_COMPARE       |
1729                                 DCACHE_OP_REVALIDATE    |
1730                                 DCACHE_OP_WEAK_REVALIDATE       |
1731                                 DCACHE_OP_DELETE        |
1732                                 DCACHE_OP_SELECT_INODE  |
1733                                 DCACHE_OP_REAL));
1734         dentry->d_op = op;
1735         if (!op)
1736                 return;
1737         if (op->d_hash)
1738                 dentry->d_flags |= DCACHE_OP_HASH;
1739         if (op->d_compare)
1740                 dentry->d_flags |= DCACHE_OP_COMPARE;
1741         if (op->d_revalidate)
1742                 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1743         if (op->d_weak_revalidate)
1744                 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1745         if (op->d_delete)
1746                 dentry->d_flags |= DCACHE_OP_DELETE;
1747         if (op->d_prune)
1748                 dentry->d_flags |= DCACHE_OP_PRUNE;
1749         if (op->d_select_inode)
1750                 dentry->d_flags |= DCACHE_OP_SELECT_INODE;
1751         if (op->d_real)
1752                 dentry->d_flags |= DCACHE_OP_REAL;
1753 
1754 }
1755 EXPORT_SYMBOL(d_set_d_op);
1756 
1757 
1758 /*
1759  * d_set_fallthru - Mark a dentry as falling through to a lower layer
1760  * @dentry - The dentry to mark
1761  *
1762  * Mark a dentry as falling through to the lower layer (as set with
1763  * d_pin_lower()).  This flag may be recorded on the medium.
1764  */
1765 void d_set_fallthru(struct dentry *dentry)
1766 {
1767         spin_lock(&dentry->d_lock);
1768         dentry->d_flags |= DCACHE_FALLTHRU;
1769         spin_unlock(&dentry->d_lock);
1770 }
1771 EXPORT_SYMBOL(d_set_fallthru);
1772 
1773 static unsigned d_flags_for_inode(struct inode *inode)
1774 {
1775         unsigned add_flags = DCACHE_REGULAR_TYPE;
1776 
1777         if (!inode)
1778                 return DCACHE_MISS_TYPE;
1779 
1780         if (S_ISDIR(inode->i_mode)) {
1781                 add_flags = DCACHE_DIRECTORY_TYPE;
1782                 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1783                         if (unlikely(!inode->i_op->lookup))
1784                                 add_flags = DCACHE_AUTODIR_TYPE;
1785                         else
1786                                 inode->i_opflags |= IOP_LOOKUP;
1787                 }
1788                 goto type_determined;
1789         }
1790 
1791         if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1792                 if (unlikely(inode->i_op->get_link)) {
1793                         add_flags = DCACHE_SYMLINK_TYPE;
1794                         goto type_determined;
1795                 }
1796                 inode->i_opflags |= IOP_NOFOLLOW;
1797         }
1798 
1799         if (unlikely(!S_ISREG(inode->i_mode)))
1800                 add_flags = DCACHE_SPECIAL_TYPE;
1801 
1802 type_determined:
1803         if (unlikely(IS_AUTOMOUNT(inode)))
1804                 add_flags |= DCACHE_NEED_AUTOMOUNT;
1805         return add_flags;
1806 }
1807 
1808 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1809 {
1810         unsigned add_flags = d_flags_for_inode(inode);
1811         WARN_ON(d_in_lookup(dentry));
1812 
1813         spin_lock(&dentry->d_lock);
1814         hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1815         raw_write_seqcount_begin(&dentry->d_seq);
1816         __d_set_inode_and_type(dentry, inode, add_flags);
1817         raw_write_seqcount_end(&dentry->d_seq);
1818         __fsnotify_d_instantiate(dentry);
1819         spin_unlock(&dentry->d_lock);
1820 }
1821 
1822 /**
1823  * d_instantiate - fill in inode information for a dentry
1824  * @entry: dentry to complete
1825  * @inode: inode to attach to this dentry
1826  *
1827  * Fill in inode information in the entry.
1828  *
1829  * This turns negative dentries into productive full members
1830  * of society.
1831  *
1832  * NOTE! This assumes that the inode count has been incremented
1833  * (or otherwise set) by the caller to indicate that it is now
1834  * in use by the dcache.
1835  */
1836  
1837 void d_instantiate(struct dentry *entry, struct inode * inode)
1838 {
1839         BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1840         if (inode) {
1841                 security_d_instantiate(entry, inode);
1842                 spin_lock(&inode->i_lock);
1843                 __d_instantiate(entry, inode);
1844                 spin_unlock(&inode->i_lock);
1845         }
1846 }
1847 EXPORT_SYMBOL(d_instantiate);
1848 
1849 /**
1850  * d_instantiate_no_diralias - instantiate a non-aliased dentry
1851  * @entry: dentry to complete
1852  * @inode: inode to attach to this dentry
1853  *
1854  * Fill in inode information in the entry.  If a directory alias is found, then
1855  * return an error (and drop inode).  Together with d_materialise_unique() this
1856  * guarantees that a directory inode may never have more than one alias.
1857  */
1858 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1859 {
1860         BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1861 
1862         security_d_instantiate(entry, inode);
1863         spin_lock(&inode->i_lock);
1864         if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1865                 spin_unlock(&inode->i_lock);
1866                 iput(inode);
1867                 return -EBUSY;
1868         }
1869         __d_instantiate(entry, inode);
1870         spin_unlock(&inode->i_lock);
1871 
1872         return 0;
1873 }
1874 EXPORT_SYMBOL(d_instantiate_no_diralias);
1875 
1876 struct dentry *d_make_root(struct inode *root_inode)
1877 {
1878         struct dentry *res = NULL;
1879 
1880         if (root_inode) {
1881                 res = __d_alloc(root_inode->i_sb, NULL);
1882                 if (res)
1883                         d_instantiate(res, root_inode);
1884                 else
1885                         iput(root_inode);
1886         }
1887         return res;
1888 }
1889 EXPORT_SYMBOL(d_make_root);
1890 
1891 static struct dentry * __d_find_any_alias(struct inode *inode)
1892 {
1893         struct dentry *alias;
1894 
1895         if (hlist_empty(&inode->i_dentry))
1896                 return NULL;
1897         alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1898         __dget(alias);
1899         return alias;
1900 }
1901 
1902 /**
1903  * d_find_any_alias - find any alias for a given inode
1904  * @inode: inode to find an alias for
1905  *
1906  * If any aliases exist for the given inode, take and return a
1907  * reference for one of them.  If no aliases exist, return %NULL.
1908  */
1909 struct dentry *d_find_any_alias(struct inode *inode)
1910 {
1911         struct dentry *de;
1912 
1913         spin_lock(&inode->i_lock);
1914         de = __d_find_any_alias(inode);
1915         spin_unlock(&inode->i_lock);
1916         return de;
1917 }
1918 EXPORT_SYMBOL(d_find_any_alias);
1919 
1920 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1921 {
1922         struct dentry *tmp;
1923         struct dentry *res;
1924         unsigned add_flags;
1925 
1926         if (!inode)
1927                 return ERR_PTR(-ESTALE);
1928         if (IS_ERR(inode))
1929                 return ERR_CAST(inode);
1930 
1931         res = d_find_any_alias(inode);
1932         if (res)
1933                 goto out_iput;
1934 
1935         tmp = __d_alloc(inode->i_sb, NULL);
1936         if (!tmp) {
1937                 res = ERR_PTR(-ENOMEM);
1938                 goto out_iput;
1939         }
1940 
1941         security_d_instantiate(tmp, inode);
1942         spin_lock(&inode->i_lock);
1943         res = __d_find_any_alias(inode);
1944         if (res) {
1945                 spin_unlock(&inode->i_lock);
1946                 dput(tmp);
1947                 goto out_iput;
1948         }
1949 
1950         /* attach a disconnected dentry */
1951         add_flags = d_flags_for_inode(inode);
1952 
1953         if (disconnected)
1954                 add_flags |= DCACHE_DISCONNECTED;
1955 
1956         spin_lock(&tmp->d_lock);
1957         __d_set_inode_and_type(tmp, inode, add_flags);
1958         hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1959         hlist_bl_lock(&tmp->d_sb->s_anon);
1960         hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1961         hlist_bl_unlock(&tmp->d_sb->s_anon);
1962         spin_unlock(&tmp->d_lock);
1963         spin_unlock(&inode->i_lock);
1964 
1965         return tmp;
1966 
1967  out_iput:
1968         iput(inode);
1969         return res;
1970 }
1971 
1972 /**
1973  * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1974  * @inode: inode to allocate the dentry for
1975  *
1976  * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1977  * similar open by handle operations.  The returned dentry may be anonymous,
1978  * or may have a full name (if the inode was already in the cache).
1979  *
1980  * When called on a directory inode, we must ensure that the inode only ever
1981  * has one dentry.  If a dentry is found, that is returned instead of
1982  * allocating a new one.
1983  *
1984  * On successful return, the reference to the inode has been transferred
1985  * to the dentry.  In case of an error the reference on the inode is released.
1986  * To make it easier to use in export operations a %NULL or IS_ERR inode may
1987  * be passed in and the error will be propagated to the return value,
1988  * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1989  */
1990 struct dentry *d_obtain_alias(struct inode *inode)
1991 {
1992         return __d_obtain_alias(inode, 1);
1993 }
1994 EXPORT_SYMBOL(d_obtain_alias);
1995 
1996 /**
1997  * d_obtain_root - find or allocate a dentry for a given inode
1998  * @inode: inode to allocate the dentry for
1999  *
2000  * Obtain an IS_ROOT dentry for the root of a filesystem.
2001  *
2002  * We must ensure that directory inodes only ever have one dentry.  If a
2003  * dentry is found, that is returned instead of allocating a new one.
2004  *
2005  * On successful return, the reference to the inode has been transferred
2006  * to the dentry.  In case of an error the reference on the inode is
2007  * released.  A %NULL or IS_ERR inode may be passed in and will be the
2008  * error will be propagate to the return value, with a %NULL @inode
2009  * replaced by ERR_PTR(-ESTALE).
2010  */
2011 struct dentry *d_obtain_root(struct inode *inode)
2012 {
2013         return __d_obtain_alias(inode, 0);
2014 }
2015 EXPORT_SYMBOL(d_obtain_root);
2016 
2017 /**
2018  * d_add_ci - lookup or allocate new dentry with case-exact name
2019  * @inode:  the inode case-insensitive lookup has found
2020  * @dentry: the negative dentry that was passed to the parent's lookup func
2021  * @name:   the case-exact name to be associated with the returned dentry
2022  *
2023  * This is to avoid filling the dcache with case-insensitive names to the
2024  * same inode, only the actual correct case is stored in the dcache for
2025  * case-insensitive filesystems.
2026  *
2027  * For a case-insensitive lookup match and if the the case-exact dentry
2028  * already exists in in the dcache, use it and return it.
2029  *
2030  * If no entry exists with the exact case name, allocate new dentry with
2031  * the exact case, and return the spliced entry.
2032  */
2033 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2034                         struct qstr *name)
2035 {
2036         struct dentry *found, *res;
2037 
2038         /*
2039          * First check if a dentry matching the name already exists,
2040          * if not go ahead and create it now.
2041          */
2042         found = d_hash_and_lookup(dentry->d_parent, name);
2043         if (found) {
2044                 iput(inode);
2045                 return found;
2046         }
2047         if (d_in_lookup(dentry)) {
2048                 found = d_alloc_parallel(dentry->d_parent, name,
2049                                         dentry->d_wait);
2050                 if (IS_ERR(found) || !d_in_lookup(found)) {
2051                         iput(inode);
2052                         return found;
2053                 }
2054         } else {
2055                 found = d_alloc(dentry->d_parent, name);
2056                 if (!found) {
2057                         iput(inode);
2058                         return ERR_PTR(-ENOMEM);
2059                 } 
2060         }
2061         res = d_splice_alias(inode, found);
2062         if (res) {
2063                 dput(found);
2064                 return res;
2065         }
2066         return found;
2067 }
2068 EXPORT_SYMBOL(d_add_ci);
2069 
2070 /*
2071  * Do the slow-case of the dentry name compare.
2072  *
2073  * Unlike the dentry_cmp() function, we need to atomically
2074  * load the name and length information, so that the
2075  * filesystem can rely on them, and can use the 'name' and
2076  * 'len' information without worrying about walking off the
2077  * end of memory etc.
2078  *
2079  * Thus the read_seqcount_retry() and the "duplicate" info
2080  * in arguments (the low-level filesystem should not look
2081  * at the dentry inode or name contents directly, since
2082  * rename can change them while we're in RCU mode).
2083  */
2084 enum slow_d_compare {
2085         D_COMP_OK,
2086         D_COMP_NOMATCH,
2087         D_COMP_SEQRETRY,
2088 };
2089 
2090 static noinline enum slow_d_compare slow_dentry_cmp(
2091                 const struct dentry *parent,
2092                 struct dentry *dentry,
2093                 unsigned int seq,
2094                 const struct qstr *name)
2095 {
2096         int tlen = dentry->d_name.len;
2097         const char *tname = dentry->d_name.name;
2098 
2099         if (read_seqcount_retry(&dentry->d_seq, seq)) {
2100                 cpu_relax();
2101                 return D_COMP_SEQRETRY;
2102         }
2103         if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2104                 return D_COMP_NOMATCH;
2105         return D_COMP_OK;
2106 }
2107 
2108 /**
2109  * __d_lookup_rcu - search for a dentry (racy, store-free)
2110  * @parent: parent dentry
2111  * @name: qstr of name we wish to find
2112  * @seqp: returns d_seq value at the point where the dentry was found
2113  * Returns: dentry, or NULL
2114  *
2115  * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2116  * resolution (store-free path walking) design described in
2117  * Documentation/filesystems/path-lookup.txt.
2118  *
2119  * This is not to be used outside core vfs.
2120  *
2121  * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2122  * held, and rcu_read_lock held. The returned dentry must not be stored into
2123  * without taking d_lock and checking d_seq sequence count against @seq
2124  * returned here.
2125  *
2126  * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2127  * function.
2128  *
2129  * Alternatively, __d_lookup_rcu may be called again to look up the child of
2130  * the returned dentry, so long as its parent's seqlock is checked after the
2131  * child is looked up. Thus, an interlocking stepping of sequence lock checks
2132  * is formed, giving integrity down the path walk.
2133  *
2134  * NOTE! The caller *has* to check the resulting dentry against the sequence
2135  * number we've returned before using any of the resulting dentry state!
2136  */
2137 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2138                                 const struct qstr *name,
2139                                 unsigned *seqp)
2140 {
2141         u64 hashlen = name->hash_len;
2142         const unsigned char *str = name->name;
2143         struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2144         struct hlist_bl_node *node;
2145         struct dentry *dentry;
2146 
2147         /*
2148          * Note: There is significant duplication with __d_lookup_rcu which is
2149          * required to prevent single threaded performance regressions
2150          * especially on architectures where smp_rmb (in seqcounts) are costly.
2151          * Keep the two functions in sync.
2152          */
2153 
2154         /*
2155          * The hash list is protected using RCU.
2156          *
2157          * Carefully use d_seq when comparing a candidate dentry, to avoid
2158          * races with d_move().
2159          *
2160          * It is possible that concurrent renames can mess up our list
2161          * walk here and result in missing our dentry, resulting in the
2162          * false-negative result. d_lookup() protects against concurrent
2163          * renames using rename_lock seqlock.
2164          *
2165          * See Documentation/filesystems/path-lookup.txt for more details.
2166          */
2167         hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2168                 unsigned seq;
2169 
2170 seqretry:
2171                 /*
2172                  * The dentry sequence count protects us from concurrent
2173                  * renames, and thus protects parent and name fields.
2174                  *
2175                  * The caller must perform a seqcount check in order
2176                  * to do anything useful with the returned dentry.
2177                  *
2178                  * NOTE! We do a "raw" seqcount_begin here. That means that
2179                  * we don't wait for the sequence count to stabilize if it
2180                  * is in the middle of a sequence change. If we do the slow
2181                  * dentry compare, we will do seqretries until it is stable,
2182                  * and if we end up with a successful lookup, we actually
2183                  * want to exit RCU lookup anyway.
2184                  */
2185                 seq = raw_seqcount_begin(&dentry->d_seq);
2186                 if (dentry->d_parent != parent)
2187                         continue;
2188                 if (d_unhashed(dentry))
2189                         continue;
2190 
2191                 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2192                         if (dentry->d_name.hash != hashlen_hash(hashlen))
2193                                 continue;
2194                         *seqp = seq;
2195                         switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2196                         case D_COMP_OK:
2197                                 return dentry;
2198                         case D_COMP_NOMATCH:
2199                                 continue;
2200                         default:
2201                                 goto seqretry;
2202                         }
2203                 }
2204 
2205                 if (dentry->d_name.hash_len != hashlen)
2206                         continue;
2207                 *seqp = seq;
2208                 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2209                         return dentry;
2210         }
2211         return NULL;
2212 }
2213 
2214 /**
2215  * d_lookup - search for a dentry
2216  * @parent: parent dentry
2217  * @name: qstr of name we wish to find
2218  * Returns: dentry, or NULL
2219  *
2220  * d_lookup searches the children of the parent dentry for the name in
2221  * question. If the dentry is found its reference count is incremented and the
2222  * dentry is returned. The caller must use dput to free the entry when it has
2223  * finished using it. %NULL is returned if the dentry does not exist.
2224  */
2225 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2226 {
2227         struct dentry *dentry;
2228         unsigned seq;
2229 
2230         do {
2231                 seq = read_seqbegin(&rename_lock);
2232                 dentry = __d_lookup(parent, name);
2233                 if (dentry)
2234                         break;
2235         } while (read_seqretry(&rename_lock, seq));
2236         return dentry;
2237 }
2238 EXPORT_SYMBOL(d_lookup);
2239 
2240 /**
2241  * __d_lookup - search for a dentry (racy)
2242  * @parent: parent dentry
2243  * @name: qstr of name we wish to find
2244  * Returns: dentry, or NULL
2245  *
2246  * __d_lookup is like d_lookup, however it may (rarely) return a
2247  * false-negative result due to unrelated rename activity.
2248  *
2249  * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2250  * however it must be used carefully, eg. with a following d_lookup in
2251  * the case of failure.
2252  *
2253  * __d_lookup callers must be commented.
2254  */
2255 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2256 {
2257         unsigned int len = name->len;
2258         unsigned int hash = name->hash;
2259         const unsigned char *str = name->name;
2260         struct hlist_bl_head *b = d_hash(parent, hash);
2261         struct hlist_bl_node *node;
2262         struct dentry *found = NULL;
2263         struct dentry *dentry;
2264 
2265         /*
2266          * Note: There is significant duplication with __d_lookup_rcu which is
2267          * required to prevent single threaded performance regressions
2268          * especially on architectures where smp_rmb (in seqcounts) are costly.
2269          * Keep the two functions in sync.
2270          */
2271 
2272         /*
2273          * The hash list is protected using RCU.
2274          *
2275          * Take d_lock when comparing a candidate dentry, to avoid races
2276          * with d_move().
2277          *
2278          * It is possible that concurrent renames can mess up our list
2279          * walk here and result in missing our dentry, resulting in the
2280          * false-negative result. d_lookup() protects against concurrent
2281          * renames using rename_lock seqlock.
2282          *
2283          * See Documentation/filesystems/path-lookup.txt for more details.
2284          */
2285         rcu_read_lock();
2286         
2287         hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2288 
2289                 if (dentry->d_name.hash != hash)
2290                         continue;
2291 
2292                 spin_lock(&dentry->d_lock);
2293                 if (dentry->d_parent != parent)
2294                         goto next;
2295                 if (d_unhashed(dentry))
2296                         goto next;
2297 
2298                 /*
2299                  * It is safe to compare names since d_move() cannot
2300                  * change the qstr (protected by d_lock).
2301                  */
2302                 if (parent->d_flags & DCACHE_OP_COMPARE) {
2303                         int tlen = dentry->d_name.len;
2304                         const char *tname = dentry->d_name.name;
2305                         if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2306                                 goto next;
2307                 } else {
2308                         if (dentry->d_name.len != len)
2309                                 goto next;
2310                         if (dentry_cmp(dentry, str, len))
2311                                 goto next;
2312                 }
2313 
2314                 dentry->d_lockref.count++;
2315                 found = dentry;
2316                 spin_unlock(&dentry->d_lock);
2317                 break;
2318 next:
2319                 spin_unlock(&dentry->d_lock);
2320         }
2321         rcu_read_unlock();
2322 
2323         return found;
2324 }
2325 
2326 /**
2327  * d_hash_and_lookup - hash the qstr then search for a dentry
2328  * @dir: Directory to search in
2329  * @name: qstr of name we wish to find
2330  *
2331  * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2332  */
2333 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2334 {
2335         /*
2336          * Check for a fs-specific hash function. Note that we must
2337          * calculate the standard hash first, as the d_op->d_hash()
2338          * routine may choose to leave the hash value unchanged.
2339          */
2340         name->hash = full_name_hash(name->name, name->len);
2341         if (dir->d_flags & DCACHE_OP_HASH) {
2342                 int err = dir->d_op->d_hash(dir, name);
2343                 if (unlikely(err < 0))
2344                         return ERR_PTR(err);
2345         }
2346         return d_lookup(dir, name);
2347 }
2348 EXPORT_SYMBOL(d_hash_and_lookup);
2349 
2350 /*
2351  * When a file is deleted, we have two options:
2352  * - turn this dentry into a negative dentry
2353  * - unhash this dentry and free it.
2354  *
2355  * Usually, we want to just turn this into
2356  * a negative dentry, but if anybody else is
2357  * currently using the dentry or the inode
2358  * we can't do that and we fall back on removing
2359  * it from the hash queues and waiting for
2360  * it to be deleted later when it has no users
2361  */
2362  
2363 /**
2364  * d_delete - delete a dentry
2365  * @dentry: The dentry to delete
2366  *
2367  * Turn the dentry into a negative dentry if possible, otherwise
2368  * remove it from the hash queues so it can be deleted later
2369  */
2370  
2371 void d_delete(struct dentry * dentry)
2372 {
2373         struct inode *inode;
2374         int isdir = 0;
2375         /*
2376          * Are we the only user?
2377          */
2378 again:
2379         spin_lock(&dentry->d_lock);
2380         inode = dentry->d_inode;
2381         isdir = S_ISDIR(inode->i_mode);
2382         if (dentry->d_lockref.count == 1) {
2383                 if (!spin_trylock(&inode->i_lock)) {
2384                         spin_unlock(&dentry->d_lock);
2385                         cpu_relax();
2386                         goto again;
2387                 }
2388                 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2389                 dentry_unlink_inode(dentry);
2390                 fsnotify_nameremove(dentry, isdir);
2391                 return;
2392         }
2393 
2394         if (!d_unhashed(dentry))
2395                 __d_drop(dentry);
2396 
2397         spin_unlock(&dentry->d_lock);
2398 
2399         fsnotify_nameremove(dentry, isdir);
2400 }
2401 EXPORT_SYMBOL(d_delete);
2402 
2403 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2404 {
2405         BUG_ON(!d_unhashed(entry));
2406         hlist_bl_lock(b);
2407         hlist_bl_add_head_rcu(&entry->d_hash, b);
2408         hlist_bl_unlock(b);
2409 }
2410 
2411 static void _d_rehash(struct dentry * entry)
2412 {
2413         __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2414 }
2415 
2416 /**
2417  * d_rehash     - add an entry back to the hash
2418  * @entry: dentry to add to the hash
2419  *
2420  * Adds a dentry to the hash according to its name.
2421  */
2422  
2423 void d_rehash(struct dentry * entry)
2424 {
2425         spin_lock(&entry->d_lock);
2426         _d_rehash(entry);
2427         spin_unlock(&entry->d_lock);
2428 }
2429 EXPORT_SYMBOL(d_rehash);
2430 
2431 static inline unsigned start_dir_add(struct inode *dir)
2432 {
2433 
2434         for (;;) {
2435                 unsigned n = dir->i_dir_seq;
2436                 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2437                         return n;
2438                 cpu_relax();
2439         }
2440 }
2441 
2442 static inline void end_dir_add(struct inode *dir, unsigned n)
2443 {
2444         smp_store_release(&dir->i_dir_seq, n + 2);
2445 }
2446 
2447 static void d_wait_lookup(struct dentry *dentry)
2448 {
2449         if (d_in_lookup(dentry)) {
2450                 DECLARE_WAITQUEUE(wait, current);
2451                 add_wait_queue(dentry->d_wait, &wait);
2452                 do {
2453                         set_current_state(TASK_UNINTERRUPTIBLE);
2454                         spin_unlock(&dentry->d_lock);
2455                         schedule();
2456                         spin_lock(&dentry->d_lock);
2457                 } while (d_in_lookup(dentry));
2458         }
2459 }
2460 
2461 struct dentry *d_alloc_parallel(struct dentry *parent,
2462                                 const struct qstr *name,
2463                                 wait_queue_head_t *wq)
2464 {
2465         unsigned int len = name->len;
2466         unsigned int hash = name->hash;
2467         const unsigned char *str = name->name;
2468         struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2469         struct hlist_bl_node *node;
2470         struct dentry *new = d_alloc(parent, name);
2471         struct dentry *dentry;
2472         unsigned seq, r_seq, d_seq;
2473 
2474         if (unlikely(!new))
2475                 return ERR_PTR(-ENOMEM);
2476 
2477 retry:
2478         rcu_read_lock();
2479         seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
2480         r_seq = read_seqbegin(&rename_lock);
2481         dentry = __d_lookup_rcu(parent, name, &d_seq);
2482         if (unlikely(dentry)) {
2483                 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2484                         rcu_read_unlock();
2485                         goto retry;
2486                 }
2487                 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2488                         rcu_read_unlock();
2489                         dput(dentry);
2490                         goto retry;
2491                 }
2492                 rcu_read_unlock();
2493                 dput(new);
2494                 return dentry;
2495         }
2496         if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2497                 rcu_read_unlock();
2498                 goto retry;
2499         }
2500         hlist_bl_lock(b);
2501         if (unlikely(parent->d_inode->i_dir_seq != seq)) {
2502                 hlist_bl_unlock(b);
2503                 rcu_read_unlock();
2504                 goto retry;
2505         }
2506         /*
2507          * No changes for the parent since the beginning of d_lookup().
2508          * Since all removals from the chain happen with hlist_bl_lock(),
2509          * any potential in-lookup matches are going to stay here until
2510          * we unlock the chain.  All fields are stable in everything
2511          * we encounter.
2512          */
2513         hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2514                 if (dentry->d_name.hash != hash)
2515                         continue;
2516                 if (dentry->d_parent != parent)
2517                         continue;
2518                 if (parent->d_flags & DCACHE_OP_COMPARE) {
2519                         int tlen = dentry->d_name.len;
2520                         const char *tname = dentry->d_name.name;
2521                         if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2522                                 continue;
2523                 } else {
2524                         if (dentry->d_name.len != len)
2525                                 continue;
2526                         if (dentry_cmp(dentry, str, len))
2527                                 continue;
2528                 }
2529                 hlist_bl_unlock(b);
2530                 /* now we can try to grab a reference */
2531                 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2532                         rcu_read_unlock();
2533                         goto retry;
2534                 }
2535 
2536                 rcu_read_unlock();
2537                 /*
2538                  * somebody is likely to be still doing lookup for it;
2539                  * wait for them to finish
2540                  */
2541                 spin_lock(&dentry->d_lock);
2542                 d_wait_lookup(dentry);
2543                 /*
2544                  * it's not in-lookup anymore; in principle we should repeat
2545                  * everything from dcache lookup, but it's likely to be what
2546                  * d_lookup() would've found anyway.  If it is, just return it;
2547                  * otherwise we really have to repeat the whole thing.
2548                  */
2549                 if (unlikely(dentry->d_name.hash != hash))
2550                         goto mismatch;
2551                 if (unlikely(dentry->d_parent != parent))
2552                         goto mismatch;
2553                 if (unlikely(d_unhashed(dentry)))
2554                         goto mismatch;
2555                 if (parent->d_flags & DCACHE_OP_COMPARE) {
2556                         int tlen = dentry->d_name.len;
2557                         const char *tname = dentry->d_name.name;
2558                         if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2559                                 goto mismatch;
2560                 } else {
2561                         if (unlikely(dentry->d_name.len != len))
2562                                 goto mismatch;
2563                         if (unlikely(dentry_cmp(dentry, str, len)))
2564                                 goto mismatch;
2565                 }
2566                 /* OK, it *is* a hashed match; return it */
2567                 spin_unlock(&dentry->d_lock);
2568                 dput(new);
2569                 return dentry;
2570         }
2571         rcu_read_unlock();
2572         /* we can't take ->d_lock here; it's OK, though. */
2573         new->d_flags |= DCACHE_PAR_LOOKUP;
2574         new->d_wait = wq;
2575         hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2576         hlist_bl_unlock(b);
2577         return new;
2578 mismatch:
2579         spin_unlock(&dentry->d_lock);
2580         dput(dentry);
2581         goto retry;
2582 }
2583 EXPORT_SYMBOL(d_alloc_parallel);
2584 
2585 void __d_lookup_done(struct dentry *dentry)
2586 {
2587         struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2588                                                  dentry->d_name.hash);
2589         hlist_bl_lock(b);
2590         dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2591         __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2592         wake_up_all(dentry->d_wait);
2593         dentry->d_wait = NULL;
2594         hlist_bl_unlock(b);
2595         INIT_HLIST_NODE(&dentry->d_u.d_alias);
2596         INIT_LIST_HEAD(&dentry->d_lru);
2597 }
2598 EXPORT_SYMBOL(__d_lookup_done);
2599 
2600 /* inode->i_lock held if inode is non-NULL */
2601 
2602 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2603 {
2604         struct inode *dir = NULL;
2605         unsigned n;
2606         spin_lock(&dentry->d_lock);
2607         if (unlikely(d_in_lookup(dentry))) {
2608                 dir = dentry->d_parent->d_inode;
2609                 n = start_dir_add(dir);
2610                 __d_lookup_done(dentry);
2611         }
2612         if (inode) {
2613                 unsigned add_flags = d_flags_for_inode(inode);
2614                 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2615                 raw_write_seqcount_begin(&dentry->d_seq);
2616                 __d_set_inode_and_type(dentry, inode, add_flags);
2617                 raw_write_seqcount_end(&dentry->d_seq);
2618                 __fsnotify_d_instantiate(dentry);
2619         }
2620         _d_rehash(dentry);
2621         if (dir)
2622                 end_dir_add(dir, n);
2623         spin_unlock(&dentry->d_lock);
2624         if (inode)
2625                 spin_unlock(&inode->i_lock);
2626 }
2627 
2628 /**
2629  * d_add - add dentry to hash queues
2630  * @entry: dentry to add
2631  * @inode: The inode to attach to this dentry
2632  *
2633  * This adds the entry to the hash queues and initializes @inode.
2634  * The entry was actually filled in earlier during d_alloc().
2635  */
2636 
2637 void d_add(struct dentry *entry, struct inode *inode)
2638 {
2639         if (inode) {
2640                 security_d_instantiate(entry, inode);
2641                 spin_lock(&inode->i_lock);
2642         }
2643         __d_add(entry, inode);
2644 }
2645 EXPORT_SYMBOL(d_add);
2646 
2647 /**
2648  * d_exact_alias - find and hash an exact unhashed alias
2649  * @entry: dentry to add
2650  * @inode: The inode to go with this dentry
2651  *
2652  * If an unhashed dentry with the same name/parent and desired
2653  * inode already exists, hash and return it.  Otherwise, return
2654  * NULL.
2655  *
2656  * Parent directory should be locked.
2657  */
2658 struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2659 {
2660         struct dentry *alias;
2661         int len = entry->d_name.len;
2662         const char *name = entry->d_name.name;
2663         unsigned int hash = entry->d_name.hash;
2664 
2665         spin_lock(&inode->i_lock);
2666         hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2667                 /*
2668                  * Don't need alias->d_lock here, because aliases with
2669                  * d_parent == entry->d_parent are not subject to name or
2670                  * parent changes, because the parent inode i_mutex is held.
2671                  */
2672                 if (alias->d_name.hash != hash)
2673                         continue;
2674                 if (alias->d_parent != entry->d_parent)
2675                         continue;
2676                 if (alias->d_name.len != len)
2677                         continue;
2678                 if (dentry_cmp(alias, name, len))
2679                         continue;
2680                 spin_lock(&alias->d_lock);
2681                 if (!d_unhashed(alias)) {
2682                         spin_unlock(&alias->d_lock);
2683                         alias = NULL;
2684                 } else {
2685                         __dget_dlock(alias);
2686                         _d_rehash(alias);
2687                         spin_unlock(&alias->d_lock);
2688                 }
2689                 spin_unlock(&inode->i_lock);
2690                 return alias;
2691         }
2692         spin_unlock(&inode->i_lock);
2693         return NULL;
2694 }
2695 EXPORT_SYMBOL(d_exact_alias);
2696 
2697 /**
2698  * dentry_update_name_case - update case insensitive dentry with a new name
2699  * @dentry: dentry to be updated
2700  * @name: new name
2701  *
2702  * Update a case insensitive dentry with new case of name.
2703  *
2704  * dentry must have been returned by d_lookup with name @name. Old and new
2705  * name lengths must match (ie. no d_compare which allows mismatched name
2706  * lengths).
2707  *
2708  * Parent inode i_mutex must be held over d_lookup and into this call (to
2709  * keep renames and concurrent inserts, and readdir(2) away).
2710  */
2711 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2712 {
2713         BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
2714         BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2715 
2716         spin_lock(&dentry->d_lock);
2717         write_seqcount_begin(&dentry->d_seq);
2718         memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2719         write_seqcount_end(&dentry->d_seq);
2720         spin_unlock(&dentry->d_lock);
2721 }
2722 EXPORT_SYMBOL(dentry_update_name_case);
2723 
2724 static void swap_names(struct dentry *dentry, struct dentry *target)
2725 {
2726         if (unlikely(dname_external(target))) {
2727                 if (unlikely(dname_external(dentry))) {
2728                         /*
2729                          * Both external: swap the pointers
2730                          */
2731                         swap(target->d_name.name, dentry->d_name.name);
2732                 } else {
2733                         /*
2734                          * dentry:internal, target:external.  Steal target's
2735                          * storage and make target internal.
2736                          */
2737                         memcpy(target->d_iname, dentry->d_name.name,
2738                                         dentry->d_name.len + 1);
2739                         dentry->d_name.name = target->d_name.name;
2740                         target->d_name.name = target->d_iname;
2741                 }
2742         } else {
2743                 if (unlikely(dname_external(dentry))) {
2744                         /*
2745                          * dentry:external, target:internal.  Give dentry's
2746                          * storage to target and make dentry internal
2747                          */
2748                         memcpy(dentry->d_iname, target->d_name.name,
2749                                         target->d_name.len + 1);
2750                         target->d_name.name = dentry->d_name.name;
2751                         dentry->d_name.name = dentry->d_iname;
2752                 } else {
2753                         /*
2754                          * Both are internal.
2755                          */
2756                         unsigned int i;
2757                         BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2758                         kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2759                         kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2760                         for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2761                                 swap(((long *) &dentry->d_iname)[i],
2762                                      ((long *) &target->d_iname)[i]);
2763                         }
2764                 }
2765         }
2766         swap(dentry->d_name.hash_len, target->d_name.hash_len);
2767 }
2768 
2769 static void copy_name(struct dentry *dentry, struct dentry *target)
2770 {
2771         struct external_name *old_name = NULL;
2772         if (unlikely(dname_external(dentry)))
2773                 old_name = external_name(dentry);
2774         if (unlikely(dname_external(target))) {
2775                 atomic_inc(&external_name(target)->u.count);
2776                 dentry->d_name = target->d_name;
2777         } else {
2778                 memcpy(dentry->d_iname, target->d_name.name,
2779                                 target->d_name.len + 1);
2780                 dentry->d_name.name = dentry->d_iname;
2781                 dentry->d_name.hash_len = target->d_name.hash_len;
2782         }
2783         if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2784                 kfree_rcu(old_name, u.head);
2785 }
2786 
2787 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2788 {
2789         /*
2790          * XXXX: do we really need to take target->d_lock?
2791          */
2792         if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2793                 spin_lock(&target->d_parent->d_lock);
2794         else {
2795                 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2796                         spin_lock(&dentry->d_parent->d_lock);
2797                         spin_lock_nested(&target->d_parent->d_lock,
2798                                                 DENTRY_D_LOCK_NESTED);
2799                 } else {
2800                         spin_lock(&target->d_parent->d_lock);
2801                         spin_lock_nested(&dentry->d_parent->d_lock,
2802                                                 DENTRY_D_LOCK_NESTED);
2803                 }
2804         }
2805         if (target < dentry) {
2806                 spin_lock_nested(&target->d_lock, 2);
2807                 spin_lock_nested(&dentry->d_lock, 3);
2808         } else {
2809                 spin_lock_nested(&dentry->d_lock, 2);
2810                 spin_lock_nested(&target->d_lock, 3);
2811         }
2812 }
2813 
2814 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2815 {
2816         if (target->d_parent != dentry->d_parent)
2817                 spin_unlock(&dentry->d_parent->d_lock);
2818         if (target->d_parent != target)
2819                 spin_unlock(&target->d_parent->d_lock);
2820         spin_unlock(&target->d_lock);
2821         spin_unlock(&dentry->d_lock);
2822 }
2823 
2824 /*
2825  * When switching names, the actual string doesn't strictly have to
2826  * be preserved in the target - because we're dropping the target
2827  * anyway. As such, we can just do a simple memcpy() to copy over
2828  * the new name before we switch, unless we are going to rehash
2829  * it.  Note that if we *do* unhash the target, we are not allowed
2830  * to rehash it without giving it a new name/hash key - whether
2831  * we swap or overwrite the names here, resulting name won't match
2832  * the reality in filesystem; it's only there for d_path() purposes.
2833  * Note that all of this is happening under rename_lock, so the
2834  * any hash lookup seeing it in the middle of manipulations will
2835  * be discarded anyway.  So we do not care what happens to the hash
2836  * key in that case.
2837  */
2838 /*
2839  * __d_move - move a dentry
2840  * @dentry: entry to move
2841  * @target: new dentry
2842  * @exchange: exchange the two dentries
2843  *
2844  * Update the dcache to reflect the move of a file name. Negative
2845  * dcache entries should not be moved in this way. Caller must hold
2846  * rename_lock, the i_mutex of the source and target directories,
2847  * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2848  */
2849 static void __d_move(struct dentry *dentry, struct dentry *target,
2850                      bool exchange)
2851 {
2852         struct inode *dir = NULL;
2853         unsigned n;
2854         if (!dentry->d_inode)
2855                 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2856 
2857         BUG_ON(d_ancestor(dentry, target));
2858         BUG_ON(d_ancestor(target, dentry));
2859 
2860         dentry_lock_for_move(dentry, target);
2861         if (unlikely(d_in_lookup(target))) {
2862                 dir = target->d_parent->d_inode;
2863                 n = start_dir_add(dir);
2864                 __d_lookup_done(target);
2865         }
2866 
2867         write_seqcount_begin(&dentry->d_seq);
2868         write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2869 
2870         /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2871 
2872         /*
2873          * Move the dentry to the target hash queue. Don't bother checking
2874          * for the same hash queue because of how unlikely it is.
2875          */
2876         __d_drop(dentry);
2877         __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2878 
2879         /*
2880          * Unhash the target (d_delete() is not usable here).  If exchanging
2881          * the two dentries, then rehash onto the other's hash queue.
2882          */
2883         __d_drop(target);
2884         if (exchange) {
2885                 __d_rehash(target,
2886                            d_hash(dentry->d_parent, dentry->d_name.hash));
2887         }
2888 
2889         /* Switch the names.. */
2890         if (exchange)
2891                 swap_names(dentry, target);
2892         else
2893                 copy_name(dentry, target);
2894 
2895         /* ... and switch them in the tree */
2896         if (IS_ROOT(dentry)) {
2897                 /* splicing a tree */
2898                 dentry->d_flags |= DCACHE_RCUACCESS;
2899                 dentry->d_parent = target->d_parent;
2900                 target->d_parent = target;
2901                 list_del_init(&target->d_child);
2902                 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2903         } else {
2904                 /* swapping two dentries */
2905                 swap(dentry->d_parent, target->d_parent);
2906                 list_move(&target->d_child, &target->d_parent->d_subdirs);
2907                 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2908                 if (exchange)
2909                         fsnotify_d_move(target);
2910                 fsnotify_d_move(dentry);
2911         }
2912 
2913         write_seqcount_end(&target->d_seq);
2914         write_seqcount_end(&dentry->d_seq);
2915 
2916         if (dir)
2917                 end_dir_add(dir, n);
2918         dentry_unlock_for_move(dentry, target);
2919 }
2920 
2921 /*
2922  * d_move - move a dentry
2923  * @dentry: entry to move
2924  * @target: new dentry
2925  *
2926  * Update the dcache to reflect the move of a file name. Negative
2927  * dcache entries should not be moved in this way. See the locking
2928  * requirements for __d_move.
2929  */
2930 void d_move(struct dentry *dentry, struct dentry *target)
2931 {
2932         write_seqlock(&rename_lock);
2933         __d_move(dentry, target, false);
2934         write_sequnlock(&rename_lock);
2935 }
2936 EXPORT_SYMBOL(d_move);
2937 
2938 /*
2939  * d_exchange - exchange two dentries
2940  * @dentry1: first dentry
2941  * @dentry2: second dentry
2942  */
2943 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2944 {
2945         write_seqlock(&rename_lock);
2946 
2947         WARN_ON(!dentry1->d_inode);
2948         WARN_ON(!dentry2->d_inode);
2949         WARN_ON(IS_ROOT(dentry1));
2950         WARN_ON(IS_ROOT(dentry2));
2951 
2952         __d_move(dentry1, dentry2, true);
2953 
2954         write_sequnlock(&rename_lock);
2955 }
2956 
2957 /**
2958  * d_ancestor - search for an ancestor
2959  * @p1: ancestor dentry
2960  * @p2: child dentry
2961  *
2962  * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2963  * an ancestor of p2, else NULL.
2964  */
2965 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2966 {
2967         struct dentry *p;
2968 
2969         for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2970                 if (p->d_parent == p1)
2971                         return p;
2972         }
2973         return NULL;
2974 }
2975 
2976 /*
2977  * This helper attempts to cope with remotely renamed directories
2978  *
2979  * It assumes that the caller is already holding
2980  * dentry->d_parent->d_inode->i_mutex, and rename_lock
2981  *
2982  * Note: If ever the locking in lock_rename() changes, then please
2983  * remember to update this too...
2984  */
2985 static int __d_unalias(struct inode *inode,
2986                 struct dentry *dentry, struct dentry *alias)
2987 {
2988         struct mutex *m1 = NULL;
2989         struct rw_semaphore *m2 = NULL;
2990         int ret = -ESTALE;
2991 
2992         /* If alias and dentry share a parent, then no extra locks required */
2993         if (alias->d_parent == dentry->d_parent)
2994                 goto out_unalias;
2995 
2996         /* See lock_rename() */
2997         if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2998                 goto out_err;
2999         m1 = &dentry->d_sb->s_vfs_rename_mutex;
3000         if (!inode_trylock_shared(alias->d_parent->d_inode))
3001                 goto out_err;
3002         m2 = &alias->d_parent->d_inode->i_rwsem;
3003 out_unalias:
3004         __d_move(alias, dentry, false);
3005         ret = 0;
3006 out_err:
3007         if (m2)
3008                 up_read(m2);
3009         if (m1)
3010                 mutex_unlock(m1);
3011         return ret;
3012 }
3013 
3014 /**
3015  * d_splice_alias - splice a disconnected dentry into the tree if one exists
3016  * @inode:  the inode which may have a disconnected dentry
3017  * @dentry: a negative dentry which we want to point to the inode.
3018  *
3019  * If inode is a directory and has an IS_ROOT alias, then d_move that in
3020  * place of the given dentry and return it, else simply d_add the inode
3021  * to the dentry and return NULL.
3022  *
3023  * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
3024  * we should error out: directories can't have multiple aliases.
3025  *
3026  * This is needed in the lookup routine of any filesystem that is exportable
3027  * (via knfsd) so that we can build dcache paths to directories effectively.
3028  *
3029  * If a dentry was found and moved, then it is returned.  Otherwise NULL
3030  * is returned.  This matches the expected return value of ->lookup.
3031  *
3032  * Cluster filesystems may call this function with a negative, hashed dentry.
3033  * In that case, we know that the inode will be a regular file, and also this
3034  * will only occur during atomic_open. So we need to check for the dentry
3035  * being already hashed only in the final case.
3036  */
3037 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
3038 {
3039         if (IS_ERR(inode))
3040                 return ERR_CAST(inode);
3041 
3042         BUG_ON(!d_unhashed(dentry));
3043 
3044         if (!inode)
3045                 goto out;
3046 
3047         security_d_instantiate(dentry, inode);
3048         spin_lock(&inode->i_lock);
3049         if (S_ISDIR(inode->i_mode)) {
3050                 struct dentry *new = __d_find_any_alias(inode);
3051                 if (unlikely(new)) {
3052                         /* The reference to new ensures it remains an alias */
3053                         spin_unlock(&inode->i_lock);
3054                         write_seqlock(&rename_lock);
3055                         if (unlikely(d_ancestor(new, dentry))) {
3056                                 write_sequnlock(&rename_lock);
3057                                 dput(new);
3058                                 new = ERR_PTR(-ELOOP);
3059                                 pr_warn_ratelimited(
3060                                         "VFS: Lookup of '%s' in %s %s"
3061                                         " would have caused loop\n",
3062                                         dentry->d_name.name,
3063                                         inode->i_sb->s_type->name,
3064                                         inode->i_sb->s_id);
3065                         } else if (!IS_ROOT(new)) {
3066                                 int err = __d_unalias(inode, dentry, new);
3067                                 write_sequnlock(&rename_lock);
3068                                 if (err) {
3069                                         dput(new);
3070                                         new = ERR_PTR(err);
3071                                 }
3072                         } else {
3073                                 __d_move(new, dentry, false);
3074                                 write_sequnlock(&rename_lock);
3075                         }
3076                         iput(inode);
3077                         return new;
3078                 }
3079         }
3080 out:
3081         __d_add(dentry, inode);
3082         return NULL;
3083 }
3084 EXPORT_SYMBOL(d_splice_alias);
3085 
3086 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
3087 {
3088         *buflen -= namelen;
3089         if (*buflen < 0)
3090                 return -ENAMETOOLONG;
3091         *buffer -= namelen;
3092         memcpy(*buffer, str, namelen);
3093         return 0;
3094 }
3095 
3096 /**
3097  * prepend_name - prepend a pathname in front of current buffer pointer
3098  * @buffer: buffer pointer
3099  * @buflen: allocated length of the buffer
3100  * @name:   name string and length qstr structure
3101  *
3102  * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
3103  * make sure that either the old or the new name pointer and length are
3104  * fetched. However, there may be mismatch between length and pointer.
3105  * The length cannot be trusted, we need to copy it byte-by-byte until
3106  * the length is reached or a null byte is found. It also prepends "/" at
3107  * the beginning of the name. The sequence number check at the caller will
3108  * retry it again when a d_move() does happen. So any garbage in the buffer
3109  * due to mismatched pointer and length will be discarded.
3110  *
3111  * Data dependency barrier is needed to make sure that we see that terminating
3112  * NUL.  Alpha strikes again, film at 11...
3113  */
3114 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
3115 {
3116         const char *dname = ACCESS_ONCE(name->name);
3117         u32 dlen = ACCESS_ONCE(name->len);
3118         char *p;
3119 
3120         smp_read_barrier_depends();
3121 
3122         *buflen -= dlen + 1;
3123         if (*buflen < 0)
3124                 return -ENAMETOOLONG;
3125         p = *buffer -= dlen + 1;
3126         *p++ = '/';
3127         while (dlen--) {
3128                 char c = *dname++;
3129                 if (!c)
3130                         break;
3131                 *p++ = c;
3132         }
3133         return 0;
3134 }
3135 
3136 /**
3137  * prepend_path - Prepend path string to a buffer
3138  * @path: the dentry/vfsmount to report
3139  * @root: root vfsmnt/dentry
3140  * @buffer: pointer to the end of the buffer
3141  * @buflen: pointer to buffer length
3142  *
3143  * The function will first try to write out the pathname without taking any
3144  * lock other than the RCU read lock to make sure that dentries won't go away.
3145  * It only checks the sequence number of the global rename_lock as any change
3146  * in the dentry's d_seq will be preceded by changes in the rename_lock
3147  * sequence number. If the sequence number had been changed, it will restart
3148  * the whole pathname back-tracing sequence again by taking the rename_lock.
3149  * In this case, there is no need to take the RCU read lock as the recursive
3150  * parent pointer references will keep the dentry chain alive as long as no
3151  * rename operation is performed.
3152  */
3153 static int prepend_path(const struct path *path,
3154                         const struct path *root,
3155                         char **buffer, int *buflen)
3156 {
3157         struct dentry *dentry;
3158         struct vfsmount *vfsmnt;
3159         struct mount *mnt;
3160         int error = 0;
3161         unsigned seq, m_seq = 0;
3162         char *bptr;
3163         int blen;
3164 
3165         rcu_read_lock();
3166 restart_mnt:
3167         read_seqbegin_or_lock(&mount_lock, &m_seq);
3168         seq = 0;
3169         rcu_read_lock();
3170 restart:
3171         bptr = *buffer;
3172         blen = *buflen;
3173         error = 0;
3174         dentry = path->dentry;
3175         vfsmnt = path->mnt;
3176         mnt = real_mount(vfsmnt);
3177         read_seqbegin_or_lock(&rename_lock, &seq);
3178         while (dentry != root->dentry || vfsmnt != root->mnt) {
3179                 struct dentry * parent;
3180 
3181                 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
3182                         struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
3183                         /* Escaped? */
3184                         if (dentry != vfsmnt->mnt_root) {
3185                                 bptr = *buffer;
3186                                 blen = *buflen;
3187                                 error = 3;
3188                                 break;
3189                         }
3190                         /* Global root? */
3191                         if (mnt != parent) {
3192                                 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
3193                                 mnt = parent;
3194                                 vfsmnt = &mnt->mnt;
3195                                 continue;
3196                         }
3197                         if (!error)
3198                                 error = is_mounted(vfsmnt) ? 1 : 2;
3199                         break;
3200                 }
3201                 parent = dentry->d_parent;
3202                 prefetch(parent);
3203                 error = prepend_name(&bptr, &blen, &dentry->d_name);
3204                 if (error)
3205                         break;
3206 
3207                 dentry = parent;
3208         }
3209         if (!(seq & 1))
3210                 rcu_read_unlock();
3211         if (need_seqretry(&rename_lock, seq)) {
3212                 seq = 1;
3213                 goto restart;
3214         }
3215         done_seqretry(&rename_lock, seq);
3216 
3217         if (!(m_seq & 1))
3218                 rcu_read_unlock();
3219         if (need_seqretry(&mount_lock, m_seq)) {
3220                 m_seq = 1;
3221                 goto restart_mnt;
3222         }
3223         done_seqretry(&mount_lock, m_seq);
3224 
3225         if (error >= 0 && bptr == *buffer) {
3226                 if (--blen < 0)
3227                         error = -ENAMETOOLONG;
3228                 else
3229                         *--bptr = '/';
3230         }
3231         *buffer = bptr;
3232         *buflen = blen;
3233         return error;
3234 }
3235 
3236 /**
3237  * __d_path - return the path of a dentry
3238  * @path: the dentry/vfsmount to report
3239  * @root: root vfsmnt/dentry
3240  * @buf: buffer to return value in
3241  * @buflen: buffer length
3242  *
3243  * Convert a dentry into an ASCII path name.
3244  *
3245  * Returns a pointer into the buffer or an error code if the
3246  * path was too long.
3247  *
3248  * "buflen" should be positive.
3249  *
3250  * If the path is not reachable from the supplied root, return %NULL.
3251  */
3252 char *__d_path(const struct path *path,
3253                const struct path *root,
3254                char *buf, int buflen)
3255 {
3256         char *res = buf + buflen;
3257         int error;
3258 
3259         prepend(&res, &buflen, "\0", 1);
3260         error = prepend_path(path, root, &res, &buflen);
3261 
3262         if (error < 0)
3263                 return ERR_PTR(error);
3264         if (error > 0)
3265                 return NULL;
3266         return res;
3267 }
3268 
3269 char *d_absolute_path(const struct path *path,
3270                char *buf, int buflen)
3271 {
3272         struct path root = {};
3273         char *res = buf + buflen;
3274         int error;
3275 
3276         prepend(&res, &buflen, "\0", 1);
3277         error = prepend_path(path, &root, &res, &buflen);
3278 
3279         if (error > 1)
3280                 error = -EINVAL;
3281         if (error < 0)
3282                 return ERR_PTR(error);
3283         return res;
3284 }
3285 
3286 /*
3287  * same as __d_path but appends "(deleted)" for unlinked files.
3288  */
3289 static int path_with_deleted(const struct path *path,
3290                              const struct path *root,
3291                              char **buf, int *buflen)
3292 {
3293         prepend(buf, buflen, "\0", 1);
3294         if (d_unlinked(path->dentry)) {
3295                 int error = prepend(buf, buflen, " (deleted)", 10);
3296                 if (error)
3297                         return error;
3298         }
3299 
3300         return prepend_path(path, root, buf, buflen);
3301 }
3302 
3303 static int prepend_unreachable(char **buffer, int *buflen)
3304 {
3305         return prepend(buffer, buflen, "(unreachable)", 13);
3306 }
3307 
3308 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3309 {
3310         unsigned seq;
3311 
3312         do {
3313                 seq = read_seqcount_begin(&fs->seq);
3314                 *root = fs->root;
3315         } while (read_seqcount_retry(&fs->seq, seq));
3316 }
3317 
3318 /**
3319  * d_path - return the path of a dentry
3320  * @path: path to report
3321  * @buf: buffer to return value in
3322  * @buflen: buffer length
3323  *
3324  * Convert a dentry into an ASCII path name. If the entry has been deleted
3325  * the string " (deleted)" is appended. Note that this is ambiguous.
3326  *
3327  * Returns a pointer into the buffer or an error code if the path was
3328  * too long. Note: Callers should use the returned pointer, not the passed
3329  * in buffer, to use the name! The implementation often starts at an offset
3330  * into the buffer, and may leave 0 bytes at the start.
3331  *
3332  * "buflen" should be positive.
3333  */
3334 char *d_path(const struct path *path, char *buf, int buflen)
3335 {
3336         char *res = buf + buflen;
3337         struct path root;
3338         int error;
3339 
3340         /*
3341          * We have various synthetic filesystems that never get mounted.  On
3342          * these filesystems dentries are never used for lookup purposes, and
3343          * thus don't need to be hashed.  They also don't need a name until a
3344          * user wants to identify the object in /proc/pid/fd/.  The little hack
3345          * below allows us to generate a name for these objects on demand:
3346          *
3347          * Some pseudo inodes are mountable.  When they are mounted
3348          * path->dentry == path->mnt->mnt_root.  In that case don't call d_dname
3349          * and instead have d_path return the mounted path.
3350          */
3351         if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3352             (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3353                 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3354 
3355         rcu_read_lock();
3356         get_fs_root_rcu(current->fs, &root);
3357         error = path_with_deleted(path, &root, &res, &buflen);
3358         rcu_read_unlock();
3359 
3360         if (error < 0)
3361                 res = ERR_PTR(error);
3362         return res;
3363 }
3364 EXPORT_SYMBOL(d_path);
3365 
3366 /*
3367  * Helper function for dentry_operations.d_dname() members
3368  */
3369 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3370                         const char *fmt, ...)
3371 {
3372         va_list args;
3373         char temp[64];
3374         int sz;
3375 
3376         va_start(args, fmt);
3377         sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3378         va_end(args);
3379 
3380         if (sz > sizeof(temp) || sz > buflen)
3381                 return ERR_PTR(-ENAMETOOLONG);
3382 
3383         buffer += buflen - sz;
3384         return memcpy(buffer, temp, sz);
3385 }
3386 
3387 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3388 {
3389         char *end = buffer + buflen;
3390         /* these dentries are never renamed, so d_lock is not needed */
3391         if (prepend(&end, &buflen, " (deleted)", 11) ||
3392             prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3393             prepend(&end, &buflen, "/", 1))  
3394                 end = ERR_PTR(-ENAMETOOLONG);
3395         return end;
3396 }
3397 EXPORT_SYMBOL(simple_dname);
3398 
3399 /*
3400  * Write full pathname from the root of the filesystem into the buffer.
3401  */
3402 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3403 {
3404         struct dentry *dentry;
3405         char *end, *retval;
3406         int len, seq = 0;
3407         int error = 0;
3408 
3409         if (buflen < 2)
3410                 goto Elong;
3411 
3412         rcu_read_lock();
3413 restart:
3414         dentry = d;
3415         end = buf + buflen;
3416         len = buflen;
3417         prepend(&end, &len, "\0", 1);
3418         /* Get '/' right */
3419         retval = end-1;
3420         *retval = '/';
3421         read_seqbegin_or_lock(&rename_lock, &seq);
3422         while (!IS_ROOT(dentry)) {
3423                 struct dentry *parent = dentry->d_parent;
3424 
3425                 prefetch(parent);
3426                 error = prepend_name(&end, &len, &dentry->d_name);
3427                 if (error)
3428                         break;
3429 
3430                 retval = end;
3431                 dentry = parent;
3432         }
3433         if (!(seq & 1))
3434                 rcu_read_unlock();
3435         if (need_seqretry(&rename_lock, seq)) {
3436                 seq = 1;
3437                 goto restart;
3438         }
3439         done_seqretry(&rename_lock, seq);
3440         if (error)
3441                 goto Elong;
3442         return retval;
3443 Elong:
3444         return ERR_PTR(-ENAMETOOLONG);
3445 }
3446 
3447 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3448 {
3449         return __dentry_path(dentry, buf, buflen);
3450 }
3451 EXPORT_SYMBOL(dentry_path_raw);
3452 
3453 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3454 {
3455         char *p = NULL;
3456         char *retval;
3457 
3458         if (d_unlinked(dentry)) {
3459                 p = buf + buflen;
3460                 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3461                         goto Elong;
3462                 buflen++;
3463         }
3464         retval = __dentry_path(dentry, buf, buflen);
3465         if (!IS_ERR(retval) && p)
3466                 *p = '/';       /* restore '/' overriden with '\0' */
3467         return retval;
3468 Elong:
3469         return ERR_PTR(-ENAMETOOLONG);
3470 }
3471 
3472 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3473                                     struct path *pwd)
3474 {
3475         unsigned seq;
3476 
3477         do {
3478                 seq = read_seqcount_begin(&fs->seq);
3479                 *root = fs->root;
3480                 *pwd = fs->pwd;
3481         } while (read_seqcount_retry(&fs->seq, seq));
3482 }
3483 
3484 /*
3485  * NOTE! The user-level library version returns a
3486  * character pointer. The kernel system call just
3487  * returns the length of the buffer filled (which
3488  * includes the ending '\0' character), or a negative
3489  * error value. So libc would do something like
3490  *
3491  *      char *getcwd(char * buf, size_t size)
3492  *      {
3493  *              int retval;
3494  *
3495  *              retval = sys_getcwd(buf, size);
3496  *              if (retval >= 0)
3497  *                      return buf;
3498  *              errno = -retval;
3499  *              return NULL;
3500  *      }
3501  */
3502 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3503 {
3504         int error;
3505         struct path pwd, root;
3506         char *page = __getname();
3507 
3508         if (!page)
3509                 return -ENOMEM;
3510 
3511         rcu_read_lock();
3512         get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3513 
3514         error = -ENOENT;
3515         if (!d_unlinked(pwd.dentry)) {
3516                 unsigned long len;
3517                 char *cwd = page + PATH_MAX;
3518                 int buflen = PATH_MAX;
3519 
3520                 prepend(&cwd, &buflen, "\0", 1);
3521                 error = prepend_path(&pwd, &root, &cwd, &buflen);
3522                 rcu_read_unlock();
3523 
3524                 if (error < 0)
3525                         goto out;
3526 
3527                 /* Unreachable from current root */
3528                 if (error > 0) {
3529                         error = prepend_unreachable(&cwd, &buflen);
3530                         if (error)
3531                                 goto out;
3532                 }
3533 
3534                 error = -ERANGE;
3535                 len = PATH_MAX + page - cwd;
3536                 if (len <= size) {
3537                         error = len;
3538                         if (copy_to_user(buf, cwd, len))
3539                                 error = -EFAULT;
3540                 }
3541         } else {
3542                 rcu_read_unlock();
3543         }
3544 
3545 out:
3546         __putname(page);
3547         return error;
3548 }
3549 
3550 /*
3551  * Test whether new_dentry is a subdirectory of old_dentry.
3552  *
3553  * Trivially implemented using the dcache structure
3554  */
3555 
3556 /**
3557  * is_subdir - is new dentry a subdirectory of old_dentry
3558  * @new_dentry: new dentry
3559  * @old_dentry: old dentry
3560  *
3561  * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3562  * Returns false otherwise.
3563  * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3564  */
3565   
3566 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3567 {
3568         bool result;
3569         unsigned seq;
3570 
3571         if (new_dentry == old_dentry)
3572                 return true;
3573 
3574         do {
3575                 /* for restarting inner loop in case of seq retry */
3576                 seq = read_seqbegin(&rename_lock);
3577                 /*
3578                  * Need rcu_readlock to protect against the d_parent trashing
3579                  * due to d_move
3580                  */
3581                 rcu_read_lock();
3582                 if (d_ancestor(old_dentry, new_dentry))
3583                         result = true;
3584                 else
3585                         result = false;
3586                 rcu_read_unlock();
3587         } while (read_seqretry(&rename_lock, seq));
3588 
3589         return result;
3590 }
3591 
3592 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3593 {
3594         struct dentry *root = data;
3595         if (dentry != root) {
3596                 if (d_unhashed(dentry) || !dentry->d_inode)
3597                         return D_WALK_SKIP;
3598 
3599                 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3600                         dentry->d_flags |= DCACHE_GENOCIDE;
3601                         dentry->d_lockref.count--;
3602                 }
3603         }
3604         return D_WALK_CONTINUE;
3605 }
3606 
3607 void d_genocide(struct dentry *parent)
3608 {
3609         d_walk(parent, parent, d_genocide_kill, NULL);
3610 }
3611 
3612 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3613 {
3614         inode_dec_link_count(inode);
3615         BUG_ON(dentry->d_name.name != dentry->d_iname ||
3616                 !hlist_unhashed(&dentry->d_u.d_alias) ||
3617                 !d_unlinked(dentry));
3618         spin_lock(&dentry->d_parent->d_lock);
3619         spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3620         dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3621                                 (unsigned long long)inode->i_ino);
3622         spin_unlock(&dentry->d_lock);
3623         spin_unlock(&dentry->d_parent->d_lock);
3624         d_instantiate(dentry, inode);
3625 }
3626 EXPORT_SYMBOL(d_tmpfile);
3627 
3628 static __initdata unsigned long dhash_entries;
3629 static int __init set_dhash_entries(char *str)
3630 {
3631         if (!str)
3632                 return 0;
3633         dhash_entries = simple_strtoul(str, &str, 0);
3634         return 1;
3635 }
3636 __setup("dhash_entries=", set_dhash_entries);
3637 
3638 static void __init dcache_init_early(void)
3639 {
3640         unsigned int loop;
3641 
3642         /* If hashes are distributed across NUMA nodes, defer
3643          * hash allocation until vmalloc space is available.
3644          */
3645         if (hashdist)
3646                 return;
3647 
3648         dentry_hashtable =
3649                 alloc_large_system_hash("Dentry cache",
3650                                         sizeof(struct hlist_bl_head),
3651                                         dhash_entries,
3652                                         13,
3653                                         HASH_EARLY,
3654                                         &d_hash_shift,
3655                                         &d_hash_mask,
3656                                         0,
3657                                         0);
3658 
3659         for (loop = 0; loop < (1U << d_hash_shift); loop++)
3660                 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3661 }
3662 
3663 static void __init dcache_init(void)
3664 {
3665         unsigned int loop;
3666 
3667         /* 
3668          * A constructor could be added for stable state like the lists,
3669          * but it is probably not worth it because of the cache nature
3670          * of the dcache. 
3671          */
3672         dentry_cache = KMEM_CACHE(dentry,
3673                 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
3674 
3675         /* Hash may have been set up in dcache_init_early */
3676         if (!hashdist)
3677                 return;
3678 
3679         dentry_hashtable =
3680                 alloc_large_system_hash("Dentry cache",
3681                                         sizeof(struct hlist_bl_head),
3682                                         dhash_entries,
3683                                         13,
3684                                         0,
3685                                         &d_hash_shift,
3686                                         &d_hash_mask,
3687                                         0,
3688                                         0);
3689 
3690         for (loop = 0; loop < (1U << d_hash_shift); loop++)
3691                 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3692 }
3693 
3694 /* SLAB cache for __getname() consumers */
3695 struct kmem_cache *names_cachep __read_mostly;
3696 EXPORT_SYMBOL(names_cachep);
3697 
3698 EXPORT_SYMBOL(d_genocide);
3699 
3700 void __init vfs_caches_init_early(void)
3701 {
3702         dcache_init_early();
3703         inode_init_early();
3704 }
3705 
3706 void __init vfs_caches_init(void)
3707 {
3708         names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3709                         SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3710 
3711         dcache_init();
3712         inode_init();
3713         files_init();
3714         files_maxfiles_init();
3715         mnt_init();
3716         bdev_cache_init();
3717         chrdev_init();
3718 }
3719 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us