blob: c639556f3fa06234dcc34517826fbf0420641e56 [file] [log] [blame]
Paul E. McKenney621934e2006-10-04 02:17:02 -07001/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenney87de1cf2013-12-03 10:02:52 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenney621934e2006-10-04 02:17:02 -070017 *
18 * Copyright (C) IBM Corporation, 2006
Lai Jiangshan4e87b2d2012-10-13 01:14:14 +080019 * Copyright (C) Fujitsu, 2012
Paul E. McKenney621934e2006-10-04 02:17:02 -070020 *
21 * Author: Paul McKenney <paulmck@us.ibm.com>
Lai Jiangshan4e87b2d2012-10-13 01:14:14 +080022 * Lai Jiangshan <laijs@cn.fujitsu.com>
Paul E. McKenney621934e2006-10-04 02:17:02 -070023 *
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
26 *
27 */
28
Paul Gortmaker9984de12011-05-23 14:51:41 -040029#include <linux/export.h>
Paul E. McKenney621934e2006-10-04 02:17:02 -070030#include <linux/mutex.h>
31#include <linux/percpu.h>
32#include <linux/preempt.h>
33#include <linux/rcupdate.h>
34#include <linux/sched.h>
Paul E. McKenney621934e2006-10-04 02:17:02 -070035#include <linux/smp.h>
Paul E. McKenney46fdb092010-10-26 02:11:40 -070036#include <linux/delay.h>
Paul E. McKenney621934e2006-10-04 02:17:02 -070037#include <linux/srcu.h>
38
Antti P Miettinen3705b882012-10-05 09:59:15 +030039#include "rcu.h"
40
Lai Jiangshan931ea9d2012-03-19 16:12:13 +080041/*
42 * Initialize an rcu_batch structure to empty.
43 */
44static inline void rcu_batch_init(struct rcu_batch *b)
45{
46 b->head = NULL;
47 b->tail = &b->head;
48}
49
50/*
51 * Enqueue a callback onto the tail of the specified rcu_batch structure.
52 */
53static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
54{
55 *b->tail = head;
56 b->tail = &head->next;
57}
58
59/*
60 * Is the specified rcu_batch structure empty?
61 */
62static inline bool rcu_batch_empty(struct rcu_batch *b)
63{
64 return b->tail == &b->head;
65}
66
67/*
68 * Remove the callback at the head of the specified rcu_batch structure
69 * and return a pointer to it, or return NULL if the structure is empty.
70 */
71static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
72{
73 struct rcu_head *head;
74
75 if (rcu_batch_empty(b))
76 return NULL;
77
78 head = b->head;
79 b->head = head->next;
80 if (b->tail == &head->next)
81 rcu_batch_init(b);
82
83 return head;
84}
85
86/*
87 * Move all callbacks from the rcu_batch structure specified by "from" to
88 * the structure specified by "to".
89 */
90static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
91{
92 if (!rcu_batch_empty(from)) {
93 *to->tail = from->head;
94 to->tail = from->tail;
95 rcu_batch_init(from);
96 }
97}
98
Paul E. McKenney632ee202010-02-22 17:04:45 -080099static int init_srcu_struct_fields(struct srcu_struct *sp)
100{
101 sp->completed = 0;
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800102 spin_lock_init(&sp->queue_lock);
103 sp->running = false;
104 rcu_batch_init(&sp->batch_queue);
105 rcu_batch_init(&sp->batch_check0);
106 rcu_batch_init(&sp->batch_check1);
107 rcu_batch_init(&sp->batch_done);
108 INIT_DELAYED_WORK(&sp->work, process_srcu);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800109 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
110 return sp->per_cpu_ref ? 0 : -ENOMEM;
111}
112
113#ifdef CONFIG_DEBUG_LOCK_ALLOC
114
115int __init_srcu_struct(struct srcu_struct *sp, const char *name,
116 struct lock_class_key *key)
117{
Paul E. McKenney632ee202010-02-22 17:04:45 -0800118 /* Don't re-initialize a lock while it is held. */
119 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
120 lockdep_init_map(&sp->dep_map, name, key, 0);
Paul E. McKenney632ee202010-02-22 17:04:45 -0800121 return init_srcu_struct_fields(sp);
122}
123EXPORT_SYMBOL_GPL(__init_srcu_struct);
124
125#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
126
Paul E. McKenney621934e2006-10-04 02:17:02 -0700127/**
128 * init_srcu_struct - initialize a sleep-RCU structure
129 * @sp: structure to initialize.
130 *
131 * Must invoke this on a given srcu_struct before passing that srcu_struct
132 * to any other function. Each srcu_struct represents a separate domain
133 * of SRCU protection.
134 */
Alan Sterne6a92012006-10-04 02:17:05 -0700135int init_srcu_struct(struct srcu_struct *sp)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700136{
Paul E. McKenney632ee202010-02-22 17:04:45 -0800137 return init_srcu_struct_fields(sp);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700138}
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700139EXPORT_SYMBOL_GPL(init_srcu_struct);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700140
Paul E. McKenney632ee202010-02-22 17:04:45 -0800141#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
142
Paul E. McKenney621934e2006-10-04 02:17:02 -0700143/*
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800144 * Returns approximate total of the readers' ->seq[] values for the
145 * rank of per-CPU counters specified by idx.
146 */
147static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
148{
149 int cpu;
150 unsigned long sum = 0;
151 unsigned long t;
152
153 for_each_possible_cpu(cpu) {
154 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
155 sum += t;
156 }
157 return sum;
158}
159
160/*
Paul E. McKenneycef50122012-02-05 07:42:44 -0800161 * Returns approximate number of readers active on the specified rank
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800162 * of the per-CPU ->c[] counters.
Paul E. McKenney621934e2006-10-04 02:17:02 -0700163 */
Paul E. McKenneycef50122012-02-05 07:42:44 -0800164static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700165{
166 int cpu;
Paul E. McKenneycef50122012-02-05 07:42:44 -0800167 unsigned long sum = 0;
168 unsigned long t;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700169
Paul E. McKenneycef50122012-02-05 07:42:44 -0800170 for_each_possible_cpu(cpu) {
171 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
172 sum += t;
Paul E. McKenneycef50122012-02-05 07:42:44 -0800173 }
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800174 return sum;
Paul E. McKenneycef50122012-02-05 07:42:44 -0800175}
176
177/*
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800178 * Return true if the number of pre-existing readers is determined to
179 * be stably zero. An example unstable zero can occur if the call
180 * to srcu_readers_active_idx() misses an __srcu_read_lock() increment,
181 * but due to task migration, sees the corresponding __srcu_read_unlock()
182 * decrement. This can happen because srcu_readers_active_idx() takes
183 * time to sum the array, and might in fact be interrupted or preempted
184 * partway through the summation.
Paul E. McKenneycef50122012-02-05 07:42:44 -0800185 */
186static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
187{
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800188 unsigned long seq;
189
190 seq = srcu_readers_seq_idx(sp, idx);
191
192 /*
193 * The following smp_mb() A pairs with the smp_mb() B located in
194 * __srcu_read_lock(). This pairing ensures that if an
195 * __srcu_read_lock() increments its counter after the summation
196 * in srcu_readers_active_idx(), then the corresponding SRCU read-side
197 * critical section will see any changes made prior to the start
198 * of the current SRCU grace period.
199 *
200 * Also, if the above call to srcu_readers_seq_idx() saw the
201 * increment of ->seq[], then the call to srcu_readers_active_idx()
202 * must see the increment of ->c[].
203 */
204 smp_mb(); /* A */
Paul E. McKenneycef50122012-02-05 07:42:44 -0800205
206 /*
207 * Note that srcu_readers_active_idx() can incorrectly return
208 * zero even though there is a pre-existing reader throughout.
209 * To see this, suppose that task A is in a very long SRCU
210 * read-side critical section that started on CPU 0, and that
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800211 * no other reader exists, so that the sum of the counters
Paul E. McKenneycef50122012-02-05 07:42:44 -0800212 * is equal to one. Then suppose that task B starts executing
213 * srcu_readers_active_idx(), summing up to CPU 1, and then that
214 * task C starts reading on CPU 0, so that its increment is not
215 * summed, but finishes reading on CPU 2, so that its decrement
216 * -is- summed. Then when task B completes its sum, it will
217 * incorrectly get zero, despite the fact that task A has been
218 * in its SRCU read-side critical section the whole time.
219 *
220 * We therefore do a validation step should srcu_readers_active_idx()
221 * return zero.
222 */
223 if (srcu_readers_active_idx(sp, idx) != 0)
224 return false;
225
226 /*
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800227 * The remainder of this function is the validation step.
228 * The following smp_mb() D pairs with the smp_mb() C in
229 * __srcu_read_unlock(). If the __srcu_read_unlock() was seen
230 * by srcu_readers_active_idx() above, then any destructive
231 * operation performed after the grace period will happen after
232 * the corresponding SRCU read-side critical section.
233 *
234 * Note that there can be at most NR_CPUS worth of readers using
235 * the old index, which is not enough to overflow even a 32-bit
236 * integer. (Yes, this does mean that systems having more than
237 * a billion or so CPUs need to be 64-bit systems.) Therefore,
238 * the sum of the ->seq[] counters cannot possibly overflow.
239 * Therefore, the only way that the return values of the two
240 * calls to srcu_readers_seq_idx() can be equal is if there were
241 * no increments of the corresponding rank of ->seq[] counts
242 * in the interim. But the missed-increment scenario laid out
243 * above includes an increment of the ->seq[] counter by
244 * the corresponding __srcu_read_lock(). Therefore, if this
245 * scenario occurs, the return values from the two calls to
246 * srcu_readers_seq_idx() will differ, and thus the validation
247 * step below suffices.
Paul E. McKenneycef50122012-02-05 07:42:44 -0800248 */
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800249 smp_mb(); /* D */
Paul E. McKenneycef50122012-02-05 07:42:44 -0800250
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800251 return srcu_readers_seq_idx(sp, idx) == seq;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700252}
253
254/**
255 * srcu_readers_active - returns approximate number of readers.
256 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
257 *
258 * Note that this is not an atomic primitive, and can therefore suffer
259 * severe errors when invoked on an active srcu_struct. That said, it
260 * can be useful as an error check at cleanup time.
261 */
Adrian Bunkbb695172008-02-06 01:36:45 -0800262static int srcu_readers_active(struct srcu_struct *sp)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700263{
Lai Jiangshandc879172012-03-06 17:57:34 +0800264 int cpu;
265 unsigned long sum = 0;
266
267 for_each_possible_cpu(cpu) {
268 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
269 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
270 }
271 return sum;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700272}
273
274/**
275 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
276 * @sp: structure to clean up.
277 *
278 * Must invoke this after you are finished using a given srcu_struct that
279 * was initialized via init_srcu_struct(), else you leak memory.
280 */
281void cleanup_srcu_struct(struct srcu_struct *sp)
282{
Lai Jiangshanab4d2982012-11-29 16:46:04 +0800283 if (WARN_ON(srcu_readers_active(sp)))
284 return; /* Leakage unless caller handles error. */
Paul E. McKenney621934e2006-10-04 02:17:02 -0700285 free_percpu(sp->per_cpu_ref);
286 sp->per_cpu_ref = NULL;
287}
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700288EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700289
Paul E. McKenney632ee202010-02-22 17:04:45 -0800290/*
Paul E. McKenney621934e2006-10-04 02:17:02 -0700291 * Counts the new reader in the appropriate per-CPU element of the
292 * srcu_struct. Must be called from process context.
293 * Returns an index that must be passed to the matching srcu_read_unlock().
294 */
Paul E. McKenney632ee202010-02-22 17:04:45 -0800295int __srcu_read_lock(struct srcu_struct *sp)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700296{
297 int idx;
298
Lai Jiangshan7a6b55e2012-11-29 16:46:09 +0800299 idx = ACCESS_ONCE(sp->completed) & 0x1;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700300 preempt_disable();
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800301 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
Paul E. McKenneycef50122012-02-05 07:42:44 -0800302 smp_mb(); /* B */ /* Avoid leaking the critical section. */
Lai Jiangshanb52ce062012-02-27 09:29:09 -0800303 ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
Paul E. McKenney621934e2006-10-04 02:17:02 -0700304 preempt_enable();
305 return idx;
306}
Paul E. McKenney632ee202010-02-22 17:04:45 -0800307EXPORT_SYMBOL_GPL(__srcu_read_lock);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700308
Paul E. McKenney632ee202010-02-22 17:04:45 -0800309/*
Paul E. McKenney621934e2006-10-04 02:17:02 -0700310 * Removes the count for the old reader from the appropriate per-CPU
311 * element of the srcu_struct. Note that this may well be a different
312 * CPU than that which was incremented by the corresponding srcu_read_lock().
313 * Must be called from process context.
314 */
Paul E. McKenney632ee202010-02-22 17:04:45 -0800315void __srcu_read_unlock(struct srcu_struct *sp, int idx)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700316{
Paul E. McKenneycef50122012-02-05 07:42:44 -0800317 smp_mb(); /* C */ /* Avoid leaking the critical section. */
Lai Jiangshan5a413442012-11-29 16:46:02 +0800318 this_cpu_dec(sp->per_cpu_ref->c[idx]);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700319}
Paul E. McKenney632ee202010-02-22 17:04:45 -0800320EXPORT_SYMBOL_GPL(__srcu_read_unlock);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700321
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700322/*
Paul E. McKenneyc072a382011-01-07 02:33:47 -0800323 * We use an adaptive strategy for synchronize_srcu() and especially for
324 * synchronize_srcu_expedited(). We spin for a fixed time period
325 * (defined below) to allow SRCU readers to exit their read-side critical
326 * sections. If there are still some readers after 10 microseconds,
327 * we repeatedly block for 1-millisecond time periods. This approach
328 * has done well in testing, so there is no need for a config parameter.
329 */
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800330#define SRCU_RETRY_CHECK_DELAY 5
Lai Jiangshand9792ed2012-03-19 16:12:12 +0800331#define SYNCHRONIZE_SRCU_TRYCOUNT 2
332#define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
Paul E. McKenneycef50122012-02-05 07:42:44 -0800333
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800334/*
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800335 * @@@ Wait until all pre-existing readers complete. Such readers
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800336 * will have used the index specified by "idx".
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800337 * the caller should ensures the ->completed is not changed while checking
338 * and idx = (->completed & 1) ^ 1
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800339 */
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800340static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
Paul E. McKenneycef50122012-02-05 07:42:44 -0800341{
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800342 for (;;) {
343 if (srcu_readers_active_idx_check(sp, idx))
344 return true;
345 if (--trycount <= 0)
346 return false;
347 udelay(SRCU_RETRY_CHECK_DELAY);
Paul E. McKenneycef50122012-02-05 07:42:44 -0800348 }
Paul E. McKenneycef50122012-02-05 07:42:44 -0800349}
Paul E. McKenneyc072a382011-01-07 02:33:47 -0800350
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800351/*
352 * Increment the ->completed counter so that future SRCU readers will
353 * use the other rank of the ->c[] and ->seq[] arrays. This allows
354 * us to wait for pre-existing readers in a starvation-free manner.
355 */
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800356static void srcu_flip(struct srcu_struct *sp)
Lai Jiangshan944ce9a2012-02-22 16:43:55 -0800357{
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800358 sp->completed++;
Lai Jiangshan944ce9a2012-02-22 16:43:55 -0800359}
360
361/*
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800362 * Enqueue an SRCU callback on the specified srcu_struct structure,
363 * initiating grace-period processing if it is not already running.
Paul E. McKenneybc72d962013-10-26 04:43:36 -0700364 *
365 * Note that all CPUs must agree that the grace period extended beyond
366 * all pre-existing SRCU read-side critical section. On systems with
367 * more than one CPU, this means that when "func()" is invoked, each CPU
368 * is guaranteed to have executed a full memory barrier since the end of
369 * its last corresponding SRCU read-side critical section whose beginning
370 * preceded the call to call_rcu(). It also means that each CPU executing
371 * an SRCU read-side critical section that continues beyond the start of
372 * "func()" must have executed a memory barrier after the call_rcu()
373 * but before the beginning of that SRCU read-side critical section.
374 * Note that these guarantees include CPUs that are offline, idle, or
375 * executing in user mode, as well as CPUs that are executing in the kernel.
376 *
377 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
378 * resulting SRCU callback function "func()", then both CPU A and CPU
379 * B are guaranteed to execute a full memory barrier during the time
380 * interval between the call to call_rcu() and the invocation of "func()".
381 * This guarantee applies even if CPU A and CPU B are the same CPU (but
382 * again only if the system has more than one CPU).
383 *
384 * Of course, these guarantees apply only for invocations of call_srcu(),
385 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
386 * srcu_struct structure.
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800387 */
388void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
389 void (*func)(struct rcu_head *head))
390{
391 unsigned long flags;
392
393 head->next = NULL;
394 head->func = func;
395 spin_lock_irqsave(&sp->queue_lock, flags);
396 rcu_batch_queue(&sp->batch_queue, head);
397 if (!sp->running) {
398 sp->running = true;
Shaibal Duttaae167032014-01-31 11:53:06 -0800399 queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800400 }
401 spin_unlock_irqrestore(&sp->queue_lock, flags);
402}
403EXPORT_SYMBOL_GPL(call_srcu);
404
405struct rcu_synchronize {
406 struct rcu_head head;
407 struct completion completion;
408};
409
410/*
411 * Awaken the corresponding synchronize_srcu() instance now that a
412 * grace period has elapsed.
413 */
414static void wakeme_after_rcu(struct rcu_head *head)
415{
416 struct rcu_synchronize *rcu;
417
418 rcu = container_of(head, struct rcu_synchronize, head);
419 complete(&rcu->completion);
420}
421
422static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
423static void srcu_reschedule(struct srcu_struct *sp);
424
425/*
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700426 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
Paul E. McKenney621934e2006-10-04 02:17:02 -0700427 */
Lai Jiangshand9792ed2012-03-19 16:12:12 +0800428static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
Paul E. McKenney621934e2006-10-04 02:17:02 -0700429{
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800430 struct rcu_synchronize rcu;
431 struct rcu_head *head = &rcu.head;
432 bool done = false;
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800433
Paul E. McKenneyfe15d702012-01-04 13:30:33 -0800434 rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
435 !lock_is_held(&rcu_bh_lock_map) &&
436 !lock_is_held(&rcu_lock_map) &&
437 !lock_is_held(&rcu_sched_lock_map),
438 "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
439
Lai Jiangshan6e6f1b32012-11-29 16:46:03 +0800440 might_sleep();
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800441 init_completion(&rcu.completion);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700442
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800443 head->next = NULL;
444 head->func = wakeme_after_rcu;
445 spin_lock_irq(&sp->queue_lock);
446 if (!sp->running) {
447 /* steal the processing owner */
448 sp->running = true;
449 rcu_batch_queue(&sp->batch_check0, head);
450 spin_unlock_irq(&sp->queue_lock);
Lai Jiangshan944ce9a2012-02-22 16:43:55 -0800451
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800452 srcu_advance_batches(sp, trycount);
453 if (!rcu_batch_empty(&sp->batch_done)) {
454 BUG_ON(sp->batch_done.head != head);
455 rcu_batch_dequeue(&sp->batch_done);
456 done = true;
457 }
458 /* give the processing owner to work_struct */
459 srcu_reschedule(sp);
460 } else {
461 rcu_batch_queue(&sp->batch_queue, head);
462 spin_unlock_irq(&sp->queue_lock);
463 }
Lai Jiangshan18108eb2012-02-27 09:28:10 -0800464
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800465 if (!done)
466 wait_for_completion(&rcu.completion);
Paul E. McKenney621934e2006-10-04 02:17:02 -0700467}
468
469/**
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700470 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
471 * @sp: srcu_struct with which to synchronize.
472 *
Lai Jiangshan34a64b62012-11-29 16:46:07 +0800473 * Wait for the count to drain to zero of both indexes. To avoid the
474 * possible starvation of synchronize_srcu(), it waits for the count of
475 * the index=((->completed & 1) ^ 1) to drain to zero at first,
476 * and then flip the completed and wait for the count of the other index.
477 *
478 * Can block; must be called from process context.
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700479 *
480 * Note that it is illegal to call synchronize_srcu() from the corresponding
481 * SRCU read-side critical section; doing so will result in deadlock.
482 * However, it is perfectly legal to call synchronize_srcu() on one
Paul E. McKenneybc72d962013-10-26 04:43:36 -0700483 * srcu_struct from some other srcu_struct's read-side critical section,
484 * as long as the resulting graph of srcu_structs is acyclic.
485 *
486 * There are memory-ordering constraints implied by synchronize_srcu().
487 * On systems with more than one CPU, when synchronize_srcu() returns,
488 * each CPU is guaranteed to have executed a full memory barrier since
489 * the end of its last corresponding SRCU-sched read-side critical section
490 * whose beginning preceded the call to synchronize_srcu(). In addition,
491 * each CPU having an SRCU read-side critical section that extends beyond
492 * the return from synchronize_srcu() is guaranteed to have executed a
493 * full memory barrier after the beginning of synchronize_srcu() and before
494 * the beginning of that SRCU read-side critical section. Note that these
495 * guarantees include CPUs that are offline, idle, or executing in user mode,
496 * as well as CPUs that are executing in the kernel.
497 *
498 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
499 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
500 * to have executed a full memory barrier during the execution of
501 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
502 * are the same CPU, but again only if the system has more than one CPU.
503 *
504 * Of course, these memory-ordering guarantees apply only when
505 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
506 * passed the same srcu_struct structure.
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700507 */
508void synchronize_srcu(struct srcu_struct *sp)
509{
Antti P Miettinen3705b882012-10-05 09:59:15 +0300510 __synchronize_srcu(sp, rcu_expedited
511 ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
512 : SYNCHRONIZE_SRCU_TRYCOUNT);
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700513}
514EXPORT_SYMBOL_GPL(synchronize_srcu);
515
516/**
Paul E. McKenney236fefa2012-01-31 14:00:41 -0800517 * synchronize_srcu_expedited - Brute-force SRCU grace period
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700518 * @sp: srcu_struct with which to synchronize.
519 *
Paul E. McKenneycef50122012-02-05 07:42:44 -0800520 * Wait for an SRCU grace period to elapse, but be more aggressive about
521 * spinning rather than blocking when waiting.
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700522 *
Paul E. McKenneybc72d962013-10-26 04:43:36 -0700523 * Note that synchronize_srcu_expedited() has the same deadlock and
524 * memory-ordering properties as does synchronize_srcu().
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700525 */
526void synchronize_srcu_expedited(struct srcu_struct *sp)
527{
Lai Jiangshand9792ed2012-03-19 16:12:12 +0800528 __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
Paul E. McKenney0cd397d2009-10-25 19:03:51 -0700529}
530EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
531
532/**
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800533 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
Paul E. McKenney4461212a2013-10-09 08:09:29 -0700534 * @sp: srcu_struct on which to wait for in-flight callbacks.
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800535 */
536void srcu_barrier(struct srcu_struct *sp)
537{
538 synchronize_srcu(sp);
539}
540EXPORT_SYMBOL_GPL(srcu_barrier);
541
542/**
Paul E. McKenney621934e2006-10-04 02:17:02 -0700543 * srcu_batches_completed - return batches completed.
544 * @sp: srcu_struct on which to report batch completion.
545 *
546 * Report the number of batches, correlated with, but not necessarily
547 * precisely the same as, the number of grace periods that have elapsed.
548 */
Paul E. McKenney621934e2006-10-04 02:17:02 -0700549long srcu_batches_completed(struct srcu_struct *sp)
550{
551 return sp->completed;
552}
Paul E. McKenney621934e2006-10-04 02:17:02 -0700553EXPORT_SYMBOL_GPL(srcu_batches_completed);
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800554
555#define SRCU_CALLBACK_BATCH 10
556#define SRCU_INTERVAL 1
557
558/*
559 * Move any new SRCU callbacks to the first stage of the SRCU grace
560 * period pipeline.
561 */
562static void srcu_collect_new(struct srcu_struct *sp)
563{
564 if (!rcu_batch_empty(&sp->batch_queue)) {
565 spin_lock_irq(&sp->queue_lock);
566 rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
567 spin_unlock_irq(&sp->queue_lock);
568 }
569}
570
571/*
572 * Core SRCU state machine. Advance callbacks from ->batch_check0 to
573 * ->batch_check1 and then to ->batch_done as readers drain.
574 */
575static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
576{
577 int idx = 1 ^ (sp->completed & 1);
578
579 /*
580 * Because readers might be delayed for an extended period after
581 * fetching ->completed for their index, at any point in time there
582 * might well be readers using both idx=0 and idx=1. We therefore
583 * need to wait for readers to clear from both index values before
584 * invoking a callback.
585 */
586
587 if (rcu_batch_empty(&sp->batch_check0) &&
588 rcu_batch_empty(&sp->batch_check1))
589 return; /* no callbacks need to be advanced */
590
591 if (!try_check_zero(sp, idx, trycount))
592 return; /* failed to advance, will try after SRCU_INTERVAL */
593
594 /*
595 * The callbacks in ->batch_check1 have already done with their
596 * first zero check and flip back when they were enqueued on
597 * ->batch_check0 in a previous invocation of srcu_advance_batches().
598 * (Presumably try_check_zero() returned false during that
599 * invocation, leaving the callbacks stranded on ->batch_check1.)
600 * They are therefore ready to invoke, so move them to ->batch_done.
601 */
602 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
603
604 if (rcu_batch_empty(&sp->batch_check0))
605 return; /* no callbacks need to be advanced */
606 srcu_flip(sp);
607
608 /*
609 * The callbacks in ->batch_check0 just finished their
610 * first check zero and flip, so move them to ->batch_check1
611 * for future checking on the other idx.
612 */
613 rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
614
615 /*
616 * SRCU read-side critical sections are normally short, so check
617 * at least twice in quick succession after a flip.
618 */
619 trycount = trycount < 2 ? 2 : trycount;
620 if (!try_check_zero(sp, idx^1, trycount))
621 return; /* failed to advance, will try after SRCU_INTERVAL */
622
623 /*
624 * The callbacks in ->batch_check1 have now waited for all
625 * pre-existing readers using both idx values. They are therefore
626 * ready to invoke, so move them to ->batch_done.
627 */
628 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
629}
630
631/*
632 * Invoke a limited number of SRCU callbacks that have passed through
633 * their grace period. If there are more to do, SRCU will reschedule
634 * the workqueue.
635 */
636static void srcu_invoke_callbacks(struct srcu_struct *sp)
637{
638 int i;
639 struct rcu_head *head;
640
641 for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
642 head = rcu_batch_dequeue(&sp->batch_done);
643 if (!head)
644 break;
645 local_bh_disable();
646 head->func(head);
647 local_bh_enable();
648 }
649}
650
651/*
652 * Finished one round of SRCU grace period. Start another if there are
653 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
654 */
655static void srcu_reschedule(struct srcu_struct *sp)
656{
657 bool pending = true;
658
659 if (rcu_batch_empty(&sp->batch_done) &&
660 rcu_batch_empty(&sp->batch_check1) &&
661 rcu_batch_empty(&sp->batch_check0) &&
662 rcu_batch_empty(&sp->batch_queue)) {
663 spin_lock_irq(&sp->queue_lock);
664 if (rcu_batch_empty(&sp->batch_done) &&
665 rcu_batch_empty(&sp->batch_check1) &&
666 rcu_batch_empty(&sp->batch_check0) &&
667 rcu_batch_empty(&sp->batch_queue)) {
668 sp->running = false;
669 pending = false;
670 }
671 spin_unlock_irq(&sp->queue_lock);
672 }
673
674 if (pending)
Shaibal Duttaae167032014-01-31 11:53:06 -0800675 queue_delayed_work(system_power_efficient_wq,
676 &sp->work, SRCU_INTERVAL);
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800677}
678
679/*
680 * This is the work-queue function that handles SRCU grace periods.
681 */
Lai Jiangshanf2ebfbc2012-10-13 01:14:15 +0800682void process_srcu(struct work_struct *work)
Lai Jiangshan931ea9d2012-03-19 16:12:13 +0800683{
684 struct srcu_struct *sp;
685
686 sp = container_of(work, struct srcu_struct, work.work);
687
688 srcu_collect_new(sp);
689 srcu_advance_batches(sp, 1);
690 srcu_invoke_callbacks(sp);
691 srcu_reschedule(sp);
692}
Lai Jiangshanf2ebfbc2012-10-13 01:14:15 +0800693EXPORT_SYMBOL_GPL(process_srcu);