Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/lib/percpu-refcount.c

  1 #define pr_fmt(fmt) "%s: " fmt "\n", __func__
  2 
  3 #include <linux/kernel.h>
  4 #include <linux/sched.h>
  5 #include <linux/wait.h>
  6 #include <linux/percpu-refcount.h>
  7 
  8 /*
  9  * Initially, a percpu refcount is just a set of percpu counters. Initially, we
 10  * don't try to detect the ref hitting 0 - which means that get/put can just
 11  * increment or decrement the local counter. Note that the counter on a
 12  * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
 13  * percpu counters will all sum to the correct value
 14  *
 15  * (More precisely: because modular arithmetic is commutative the sum of all the
 16  * percpu_count vars will be equal to what it would have been if all the gets
 17  * and puts were done to a single integer, even if some of the percpu integers
 18  * overflow or underflow).
 19  *
 20  * The real trick to implementing percpu refcounts is shutdown. We can't detect
 21  * the ref hitting 0 on every put - this would require global synchronization
 22  * and defeat the whole purpose of using percpu refs.
 23  *
 24  * What we do is require the user to keep track of the initial refcount; we know
 25  * the ref can't hit 0 before the user drops the initial ref, so as long as we
 26  * convert to non percpu mode before the initial ref is dropped everything
 27  * works.
 28  *
 29  * Converting to non percpu mode is done with some RCUish stuff in
 30  * percpu_ref_kill. Additionally, we need a bias value so that the
 31  * atomic_long_t can't hit 0 before we've added up all the percpu refs.
 32  */
 33 
 34 #define PERCPU_COUNT_BIAS       (1LU << (BITS_PER_LONG - 1))
 35 
 36 static DEFINE_SPINLOCK(percpu_ref_switch_lock);
 37 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
 38 
 39 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
 40 {
 41         return (unsigned long __percpu *)
 42                 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
 43 }
 44 
 45 /**
 46  * percpu_ref_init - initialize a percpu refcount
 47  * @ref: percpu_ref to initialize
 48  * @release: function which will be called when refcount hits 0
 49  * @flags: PERCPU_REF_INIT_* flags
 50  * @gfp: allocation mask to use
 51  *
 52  * Initializes @ref.  If @flags is zero, @ref starts in percpu mode with a
 53  * refcount of 1; analagous to atomic_long_set(ref, 1).  See the
 54  * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
 55  *
 56  * Note that @release must not sleep - it may potentially be called from RCU
 57  * callback context by percpu_ref_kill().
 58  */
 59 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
 60                     unsigned int flags, gfp_t gfp)
 61 {
 62         size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
 63                              __alignof__(unsigned long));
 64         unsigned long start_count = 0;
 65 
 66         ref->percpu_count_ptr = (unsigned long)
 67                 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
 68         if (!ref->percpu_count_ptr)
 69                 return -ENOMEM;
 70 
 71         ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
 72 
 73         if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
 74                 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
 75         else
 76                 start_count += PERCPU_COUNT_BIAS;
 77 
 78         if (flags & PERCPU_REF_INIT_DEAD)
 79                 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
 80         else
 81                 start_count++;
 82 
 83         atomic_long_set(&ref->count, start_count);
 84 
 85         ref->release = release;
 86         ref->confirm_switch = NULL;
 87         return 0;
 88 }
 89 EXPORT_SYMBOL_GPL(percpu_ref_init);
 90 
 91 /**
 92  * percpu_ref_exit - undo percpu_ref_init()
 93  * @ref: percpu_ref to exit
 94  *
 95  * This function exits @ref.  The caller is responsible for ensuring that
 96  * @ref is no longer in active use.  The usual places to invoke this
 97  * function from are the @ref->release() callback or in init failure path
 98  * where percpu_ref_init() succeeded but other parts of the initialization
 99  * of the embedding object failed.
100  */
101 void percpu_ref_exit(struct percpu_ref *ref)
102 {
103         unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
104 
105         if (percpu_count) {
106                 /* non-NULL confirm_switch indicates switching in progress */
107                 WARN_ON_ONCE(ref->confirm_switch);
108                 free_percpu(percpu_count);
109                 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
110         }
111 }
112 EXPORT_SYMBOL_GPL(percpu_ref_exit);
113 
114 static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
115 {
116         struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
117 
118         ref->confirm_switch(ref);
119         ref->confirm_switch = NULL;
120         wake_up_all(&percpu_ref_switch_waitq);
121 
122         /* drop ref from percpu_ref_switch_to_atomic() */
123         percpu_ref_put(ref);
124 }
125 
126 static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
127 {
128         struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
129         unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
130         unsigned long count = 0;
131         int cpu;
132 
133         for_each_possible_cpu(cpu)
134                 count += *per_cpu_ptr(percpu_count, cpu);
135 
136         pr_debug("global %ld percpu %ld",
137                  atomic_long_read(&ref->count), (long)count);
138 
139         /*
140          * It's crucial that we sum the percpu counters _before_ adding the sum
141          * to &ref->count; since gets could be happening on one cpu while puts
142          * happen on another, adding a single cpu's count could cause
143          * @ref->count to hit 0 before we've got a consistent value - but the
144          * sum of all the counts will be consistent and correct.
145          *
146          * Subtracting the bias value then has to happen _after_ adding count to
147          * &ref->count; we need the bias value to prevent &ref->count from
148          * reaching 0 before we add the percpu counts. But doing it at the same
149          * time is equivalent and saves us atomic operations:
150          */
151         atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
152 
153         WARN_ONCE(atomic_long_read(&ref->count) <= 0,
154                   "percpu ref (%pf) <= 0 (%ld) after switching to atomic",
155                   ref->release, atomic_long_read(&ref->count));
156 
157         /* @ref is viewed as dead on all CPUs, send out switch confirmation */
158         percpu_ref_call_confirm_rcu(rcu);
159 }
160 
161 static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
162 {
163 }
164 
165 static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
166                                           percpu_ref_func_t *confirm_switch)
167 {
168         if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
169                 if (confirm_switch)
170                         confirm_switch(ref);
171                 return;
172         }
173 
174         /* switching from percpu to atomic */
175         ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
176 
177         /*
178          * Non-NULL ->confirm_switch is used to indicate that switching is
179          * in progress.  Use noop one if unspecified.
180          */
181         ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
182 
183         percpu_ref_get(ref);    /* put after confirmation */
184         call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
185 }
186 
187 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
188 {
189         unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
190         int cpu;
191 
192         BUG_ON(!percpu_count);
193 
194         if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
195                 return;
196 
197         atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
198 
199         /*
200          * Restore per-cpu operation.  smp_store_release() is paired with
201          * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
202          * that the zeroing is visible to all percpu accesses which can see
203          * the following __PERCPU_REF_ATOMIC clearing.
204          */
205         for_each_possible_cpu(cpu)
206                 *per_cpu_ptr(percpu_count, cpu) = 0;
207 
208         smp_store_release(&ref->percpu_count_ptr,
209                           ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
210 }
211 
212 static void __percpu_ref_switch_mode(struct percpu_ref *ref,
213                                      percpu_ref_func_t *confirm_switch)
214 {
215         lockdep_assert_held(&percpu_ref_switch_lock);
216 
217         /*
218          * If the previous ATOMIC switching hasn't finished yet, wait for
219          * its completion.  If the caller ensures that ATOMIC switching
220          * isn't in progress, this function can be called from any context.
221          */
222         wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
223                             percpu_ref_switch_lock);
224 
225         if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
226                 __percpu_ref_switch_to_atomic(ref, confirm_switch);
227         else
228                 __percpu_ref_switch_to_percpu(ref);
229 }
230 
231 /**
232  * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
233  * @ref: percpu_ref to switch to atomic mode
234  * @confirm_switch: optional confirmation callback
235  *
236  * There's no reason to use this function for the usual reference counting.
237  * Use percpu_ref_kill[_and_confirm]().
238  *
239  * Schedule switching of @ref to atomic mode.  All its percpu counts will
240  * be collected to the main atomic counter.  On completion, when all CPUs
241  * are guaraneed to be in atomic mode, @confirm_switch, which may not
242  * block, is invoked.  This function may be invoked concurrently with all
243  * the get/put operations and can safely be mixed with kill and reinit
244  * operations.  Note that @ref will stay in atomic mode across kill/reinit
245  * cycles until percpu_ref_switch_to_percpu() is called.
246  *
247  * This function may block if @ref is in the process of switching to atomic
248  * mode.  If the caller ensures that @ref is not in the process of
249  * switching to atomic mode, this function can be called from any context.
250  */
251 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
252                                  percpu_ref_func_t *confirm_switch)
253 {
254         unsigned long flags;
255 
256         spin_lock_irqsave(&percpu_ref_switch_lock, flags);
257 
258         ref->force_atomic = true;
259         __percpu_ref_switch_mode(ref, confirm_switch);
260 
261         spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
262 }
263 
264 /**
265  * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
266  * @ref: percpu_ref to switch to percpu mode
267  *
268  * There's no reason to use this function for the usual reference counting.
269  * To re-use an expired ref, use percpu_ref_reinit().
270  *
271  * Switch @ref to percpu mode.  This function may be invoked concurrently
272  * with all the get/put operations and can safely be mixed with kill and
273  * reinit operations.  This function reverses the sticky atomic state set
274  * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic().  If @ref is
275  * dying or dead, the actual switching takes place on the following
276  * percpu_ref_reinit().
277  *
278  * This function may block if @ref is in the process of switching to atomic
279  * mode.  If the caller ensures that @ref is not in the process of
280  * switching to atomic mode, this function can be called from any context.
281  */
282 void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
283 {
284         unsigned long flags;
285 
286         spin_lock_irqsave(&percpu_ref_switch_lock, flags);
287 
288         ref->force_atomic = false;
289         __percpu_ref_switch_mode(ref, NULL);
290 
291         spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
292 }
293 
294 /**
295  * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
296  * @ref: percpu_ref to kill
297  * @confirm_kill: optional confirmation callback
298  *
299  * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
300  * @confirm_kill is not NULL.  @confirm_kill, which may not block, will be
301  * called after @ref is seen as dead from all CPUs at which point all
302  * further invocations of percpu_ref_tryget_live() will fail.  See
303  * percpu_ref_tryget_live() for details.
304  *
305  * This function normally doesn't block and can be called from any context
306  * but it may block if @confirm_kill is specified and @ref is in the
307  * process of switching to atomic mode by percpu_ref_switch_to_atomic().
308  */
309 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
310                                  percpu_ref_func_t *confirm_kill)
311 {
312         unsigned long flags;
313 
314         spin_lock_irqsave(&percpu_ref_switch_lock, flags);
315 
316         WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
317                   "%s called more than once on %pf!", __func__, ref->release);
318 
319         ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
320         __percpu_ref_switch_mode(ref, confirm_kill);
321         percpu_ref_put(ref);
322 
323         spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
324 }
325 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
326 
327 /**
328  * percpu_ref_reinit - re-initialize a percpu refcount
329  * @ref: perpcu_ref to re-initialize
330  *
331  * Re-initialize @ref so that it's in the same state as when it finished
332  * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD.  @ref must have been
333  * initialized successfully and reached 0 but not exited.
334  *
335  * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
336  * this function is in progress.
337  */
338 void percpu_ref_reinit(struct percpu_ref *ref)
339 {
340         unsigned long flags;
341 
342         spin_lock_irqsave(&percpu_ref_switch_lock, flags);
343 
344         WARN_ON_ONCE(!percpu_ref_is_zero(ref));
345 
346         ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
347         percpu_ref_get(ref);
348         __percpu_ref_switch_mode(ref, NULL);
349 
350         spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
351 }
352 EXPORT_SYMBOL_GPL(percpu_ref_reinit);
353 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us