Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/lib/percpu_counter.c

  1 /*
  2  * Fast batching percpu counters.
  3  */
  4 
  5 #include <linux/percpu_counter.h>
  6 #include <linux/notifier.h>
  7 #include <linux/mutex.h>
  8 #include <linux/init.h>
  9 #include <linux/cpu.h>
 10 #include <linux/module.h>
 11 #include <linux/debugobjects.h>
 12 
 13 #ifdef CONFIG_HOTPLUG_CPU
 14 static LIST_HEAD(percpu_counters);
 15 static DEFINE_SPINLOCK(percpu_counters_lock);
 16 #endif
 17 
 18 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
 19 
 20 static struct debug_obj_descr percpu_counter_debug_descr;
 21 
 22 static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
 23 {
 24         struct percpu_counter *fbc = addr;
 25 
 26         switch (state) {
 27         case ODEBUG_STATE_ACTIVE:
 28                 percpu_counter_destroy(fbc);
 29                 debug_object_free(fbc, &percpu_counter_debug_descr);
 30                 return true;
 31         default:
 32                 return false;
 33         }
 34 }
 35 
 36 static struct debug_obj_descr percpu_counter_debug_descr = {
 37         .name           = "percpu_counter",
 38         .fixup_free     = percpu_counter_fixup_free,
 39 };
 40 
 41 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 42 {
 43         debug_object_init(fbc, &percpu_counter_debug_descr);
 44         debug_object_activate(fbc, &percpu_counter_debug_descr);
 45 }
 46 
 47 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 48 {
 49         debug_object_deactivate(fbc, &percpu_counter_debug_descr);
 50         debug_object_free(fbc, &percpu_counter_debug_descr);
 51 }
 52 
 53 #else   /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 54 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 55 { }
 56 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 57 { }
 58 #endif  /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 59 
 60 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 61 {
 62         int cpu;
 63         unsigned long flags;
 64 
 65         raw_spin_lock_irqsave(&fbc->lock, flags);
 66         for_each_possible_cpu(cpu) {
 67                 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 68                 *pcount = 0;
 69         }
 70         fbc->count = amount;
 71         raw_spin_unlock_irqrestore(&fbc->lock, flags);
 72 }
 73 EXPORT_SYMBOL(percpu_counter_set);
 74 
 75 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 76 {
 77         s64 count;
 78 
 79         preempt_disable();
 80         count = __this_cpu_read(*fbc->counters) + amount;
 81         if (count >= batch || count <= -batch) {
 82                 unsigned long flags;
 83                 raw_spin_lock_irqsave(&fbc->lock, flags);
 84                 fbc->count += count;
 85                 __this_cpu_sub(*fbc->counters, count - amount);
 86                 raw_spin_unlock_irqrestore(&fbc->lock, flags);
 87         } else {
 88                 this_cpu_add(*fbc->counters, amount);
 89         }
 90         preempt_enable();
 91 }
 92 EXPORT_SYMBOL(__percpu_counter_add);
 93 
 94 /*
 95  * Add up all the per-cpu counts, return the result.  This is a more accurate
 96  * but much slower version of percpu_counter_read_positive()
 97  */
 98 s64 __percpu_counter_sum(struct percpu_counter *fbc)
 99 {
100         s64 ret;
101         int cpu;
102         unsigned long flags;
103 
104         raw_spin_lock_irqsave(&fbc->lock, flags);
105         ret = fbc->count;
106         for_each_online_cpu(cpu) {
107                 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
108                 ret += *pcount;
109         }
110         raw_spin_unlock_irqrestore(&fbc->lock, flags);
111         return ret;
112 }
113 EXPORT_SYMBOL(__percpu_counter_sum);
114 
115 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
116                           struct lock_class_key *key)
117 {
118         unsigned long flags __maybe_unused;
119 
120         raw_spin_lock_init(&fbc->lock);
121         lockdep_set_class(&fbc->lock, key);
122         fbc->count = amount;
123         fbc->counters = alloc_percpu_gfp(s32, gfp);
124         if (!fbc->counters)
125                 return -ENOMEM;
126 
127         debug_percpu_counter_activate(fbc);
128 
129 #ifdef CONFIG_HOTPLUG_CPU
130         INIT_LIST_HEAD(&fbc->list);
131         spin_lock_irqsave(&percpu_counters_lock, flags);
132         list_add(&fbc->list, &percpu_counters);
133         spin_unlock_irqrestore(&percpu_counters_lock, flags);
134 #endif
135         return 0;
136 }
137 EXPORT_SYMBOL(__percpu_counter_init);
138 
139 void percpu_counter_destroy(struct percpu_counter *fbc)
140 {
141         unsigned long flags __maybe_unused;
142 
143         if (!fbc->counters)
144                 return;
145 
146         debug_percpu_counter_deactivate(fbc);
147 
148 #ifdef CONFIG_HOTPLUG_CPU
149         spin_lock_irqsave(&percpu_counters_lock, flags);
150         list_del(&fbc->list);
151         spin_unlock_irqrestore(&percpu_counters_lock, flags);
152 #endif
153         free_percpu(fbc->counters);
154         fbc->counters = NULL;
155 }
156 EXPORT_SYMBOL(percpu_counter_destroy);
157 
158 int percpu_counter_batch __read_mostly = 32;
159 EXPORT_SYMBOL(percpu_counter_batch);
160 
161 static int compute_batch_value(unsigned int cpu)
162 {
163         int nr = num_online_cpus();
164 
165         percpu_counter_batch = max(32, nr*2);
166         return 0;
167 }
168 
169 static int percpu_counter_cpu_dead(unsigned int cpu)
170 {
171 #ifdef CONFIG_HOTPLUG_CPU
172         struct percpu_counter *fbc;
173 
174         compute_batch_value(cpu);
175 
176         spin_lock_irq(&percpu_counters_lock);
177         list_for_each_entry(fbc, &percpu_counters, list) {
178                 s32 *pcount;
179                 unsigned long flags;
180 
181                 raw_spin_lock_irqsave(&fbc->lock, flags);
182                 pcount = per_cpu_ptr(fbc->counters, cpu);
183                 fbc->count += *pcount;
184                 *pcount = 0;
185                 raw_spin_unlock_irqrestore(&fbc->lock, flags);
186         }
187         spin_unlock_irq(&percpu_counters_lock);
188 #endif
189         return 0;
190 }
191 
192 /*
193  * Compare counter against given value.
194  * Return 1 if greater, 0 if equal and -1 if less
195  */
196 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
197 {
198         s64     count;
199 
200         count = percpu_counter_read(fbc);
201         /* Check to see if rough count will be sufficient for comparison */
202         if (abs(count - rhs) > (batch * num_online_cpus())) {
203                 if (count > rhs)
204                         return 1;
205                 else
206                         return -1;
207         }
208         /* Need to use precise count */
209         count = percpu_counter_sum(fbc);
210         if (count > rhs)
211                 return 1;
212         else if (count < rhs)
213                 return -1;
214         else
215                 return 0;
216 }
217 EXPORT_SYMBOL(__percpu_counter_compare);
218 
219 static int __init percpu_counter_startup(void)
220 {
221         int ret;
222 
223         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
224                                 compute_batch_value, NULL);
225         WARN_ON(ret < 0);
226         ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
227                                         "lib/percpu_cnt:dead", NULL,
228                                         percpu_counter_cpu_dead);
229         WARN_ON(ret < 0);
230         return 0;
231 }
232 module_init(percpu_counter_startup);
233 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us