2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/rcupdate.h>
39 #include <linux/interrupt.h>
40 #include <linux/sched.h>
41 #include <asm/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/module.h>
44 #include <linux/completion.h>
45 #include <linux/moduleparam.h>
46 #include <linux/percpu.h>
47 #include <linux/notifier.h>
48 #include <linux/cpu.h>
49 #include <linux/mutex.h>
51 #ifdef CONFIG_DEBUG_LOCK_ALLOC
52 static struct lock_class_key rcu_lock_key;
53 struct lockdep_map rcu_lock_map =
54 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
55 EXPORT_SYMBOL_GPL(rcu_lock_map);
59 /* Definition for rcupdate control block. */
60 static struct rcu_ctrlblk rcu_ctrlblk = {
64 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
65 .cpumask = CPU_MASK_NONE,
67 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
71 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
72 .cpumask = CPU_MASK_NONE,
75 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
76 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
78 static int blimit = 10;
79 static int qhimark = 10000;
80 static int qlowmark = 100;
83 static void force_quiescent_state(struct rcu_data *rdp,
84 struct rcu_ctrlblk *rcp)
89 if (unlikely(!rcp->signaled)) {
92 * Don't send IPI to itself. With irqs disabled,
93 * rdp->cpu is the current cpu.
95 * cpu_online_map is updated by the _cpu_down()
96 * using stop_machine_run(). Since we're in irqs disabled
97 * section, stop_machine_run() is not exectuting, hence
98 * the cpu_online_map is stable.
100 * However, a cpu might have been offlined _just_ before
101 * we disabled irqs while entering here.
102 * And rcu subsystem might not yet have handled the CPU_DEAD
103 * notification, leading to the offlined cpu's bit
104 * being set in the rcp->cpumask.
106 * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent
107 * sending smp_reschedule() to an offlined CPU.
109 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
110 cpu_clear(rdp->cpu, cpumask);
111 for_each_cpu_mask(cpu, cpumask)
112 smp_send_reschedule(cpu);
116 static inline void force_quiescent_state(struct rcu_data *rdp,
117 struct rcu_ctrlblk *rcp)
123 static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
124 struct rcu_data *rdp)
127 smp_mb(); /* reads the most recently updated value of rcu->cur. */
130 * Determine the batch number of this callback.
132 * Using ACCESS_ONCE to avoid the following error when gcc eliminates
133 * local variable "batch" and emits codes like this:
134 * 1) rdp->batch = rcp->cur + 1 # gets old value
136 * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
137 * then [*nxttail[0], *nxttail[1]) may contain callbacks
138 * that batch# = rdp->batch, see the comment of struct rcu_data.
140 batch = ACCESS_ONCE(rcp->cur) + 1;
142 if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
143 /* process callbacks */
144 rdp->nxttail[0] = rdp->nxttail[1];
145 rdp->nxttail[1] = rdp->nxttail[2];
146 if (rcu_batch_after(batch - 1, rdp->batch))
147 rdp->nxttail[0] = rdp->nxttail[2];
151 *rdp->nxttail[2] = head;
152 rdp->nxttail[2] = &head->next;
154 if (unlikely(++rdp->qlen > qhimark)) {
155 rdp->blimit = INT_MAX;
156 force_quiescent_state(rdp, &rcu_ctrlblk);
161 * call_rcu - Queue an RCU callback for invocation after a grace period.
162 * @head: structure to be used for queueing the RCU updates.
163 * @func: actual update function to be invoked after the grace period
165 * The update function will be invoked some time after a full grace
166 * period elapses, in other words after all currently executing RCU
167 * read-side critical sections have completed. RCU read-side critical
168 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
171 void call_rcu(struct rcu_head *head,
172 void (*func)(struct rcu_head *rcu))
178 local_irq_save(flags);
179 __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
180 local_irq_restore(flags);
182 EXPORT_SYMBOL_GPL(call_rcu);
185 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
186 * @head: structure to be used for queueing the RCU updates.
187 * @func: actual update function to be invoked after the grace period
189 * The update function will be invoked some time after a full grace
190 * period elapses, in other words after all currently executing RCU
191 * read-side critical sections have completed. call_rcu_bh() assumes
192 * that the read-side critical sections end on completion of a softirq
193 * handler. This means that read-side critical sections in process
194 * context must not be interrupted by softirqs. This interface is to be
195 * used when most of the read-side critical sections are in softirq context.
196 * RCU read-side critical sections are delimited by rcu_read_lock() and
197 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
198 * and rcu_read_unlock_bh(), if in process context. These may be nested.
200 void call_rcu_bh(struct rcu_head *head,
201 void (*func)(struct rcu_head *rcu))
207 local_irq_save(flags);
208 __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
209 local_irq_restore(flags);
211 EXPORT_SYMBOL_GPL(call_rcu_bh);
214 * Return the number of RCU batches processed thus far. Useful
215 * for debug and statistics.
217 long rcu_batches_completed(void)
219 return rcu_ctrlblk.completed;
221 EXPORT_SYMBOL_GPL(rcu_batches_completed);
224 * Return the number of RCU batches processed thus far. Useful
225 * for debug and statistics.
227 long rcu_batches_completed_bh(void)
229 return rcu_bh_ctrlblk.completed;
231 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
233 /* Raises the softirq for processing rcu_callbacks. */
234 static inline void raise_rcu_softirq(void)
236 raise_softirq(RCU_SOFTIRQ);
240 * Invoke the completed RCU callbacks. They are expected to be in
243 static void rcu_do_batch(struct rcu_data *rdp)
245 struct rcu_head *next, *list;
248 list = rdp->donelist;
254 if (++count >= rdp->blimit)
257 rdp->donelist = list;
262 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
263 rdp->blimit = blimit;
266 rdp->donetail = &rdp->donelist;
272 * Grace period handling:
273 * The grace period handling consists out of two steps:
274 * - A new grace period is started.
275 * This is done by rcu_start_batch. The start is not broadcasted to
276 * all cpus, they must pick this up by comparing rcp->cur with
277 * rdp->quiescbatch. All cpus are recorded in the
278 * rcu_ctrlblk.cpumask bitmap.
279 * - All cpus must go through a quiescent state.
280 * Since the start of the grace period is not broadcasted, at least two
281 * calls to rcu_check_quiescent_state are required:
282 * The first call just notices that a new grace period is running. The
283 * following calls check if there was a quiescent state since the beginning
284 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
285 * the bitmap is empty, then the grace period is completed.
286 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
287 * period (if necessary).
290 * Register a new batch of callbacks, and start it up if there is currently no
291 * active batch and the batch to be registered has not already occurred.
292 * Caller must hold rcu_ctrlblk.lock.
294 static void rcu_start_batch(struct rcu_ctrlblk *rcp)
296 if (rcp->cur != rcp->pending &&
297 rcp->completed == rcp->cur) {
301 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
302 * Barrier Otherwise it can cause tickless idle CPUs to be
303 * included in rcp->cpumask, which will extend graceperiods
307 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
314 * cpu went through a quiescent state since the beginning of the grace period.
315 * Clear it from the cpu mask and complete the grace period if it was the last
316 * cpu. Start another grace period if someone has further entries pending
318 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
320 cpu_clear(cpu, rcp->cpumask);
321 if (cpus_empty(rcp->cpumask)) {
322 /* batch completed ! */
323 rcp->completed = rcp->cur;
324 rcu_start_batch(rcp);
329 * Check if the cpu has gone through a quiescent state (say context
330 * switch). If so and if it already hasn't done so in this RCU
331 * quiescent cycle, then indicate that it has done so.
333 static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
334 struct rcu_data *rdp)
336 if (rdp->quiescbatch != rcp->cur) {
337 /* start new grace period: */
339 rdp->passed_quiesc = 0;
340 rdp->quiescbatch = rcp->cur;
344 /* Grace period already completed for this cpu?
345 * qs_pending is checked instead of the actual bitmap to avoid
346 * cacheline trashing.
348 if (!rdp->qs_pending)
352 * Was there a quiescent state since the beginning of the grace
353 * period? If no, then exit and wait for the next call.
355 if (!rdp->passed_quiesc)
359 spin_lock(&rcp->lock);
361 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
362 * during cpu startup. Ignore the quiescent state.
364 if (likely(rdp->quiescbatch == rcp->cur))
365 cpu_quiet(rdp->cpu, rcp);
367 spin_unlock(&rcp->lock);
371 #ifdef CONFIG_HOTPLUG_CPU
373 /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
374 * locking requirements, the list it's pulling from has to belong to a cpu
375 * which is dead and hence not processing interrupts.
377 static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
378 struct rcu_head **tail, long batch)
382 this_rdp->batch = batch;
383 *this_rdp->nxttail[2] = list;
384 this_rdp->nxttail[2] = tail;
389 static void __rcu_offline_cpu(struct rcu_data *this_rdp,
390 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
392 /* if the cpu going offline owns the grace period
393 * we can block indefinitely waiting for it, so flush
396 spin_lock_bh(&rcp->lock);
397 if (rcp->cur != rcp->completed)
398 cpu_quiet(rdp->cpu, rcp);
399 spin_unlock_bh(&rcp->lock);
400 /* spin_lock implies smp_mb() */
401 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
402 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
405 this_rdp->qlen += rdp->qlen;
409 static void rcu_offline_cpu(int cpu)
411 struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
412 struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
414 __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
415 &per_cpu(rcu_data, cpu));
416 __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
417 &per_cpu(rcu_bh_data, cpu));
418 put_cpu_var(rcu_data);
419 put_cpu_var(rcu_bh_data);
424 static void rcu_offline_cpu(int cpu)
431 * This does the RCU processing work from softirq context.
433 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
434 struct rcu_data *rdp)
440 * move the other grace-period-completed entries to
441 * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
443 if (!rcu_batch_before(rcp->completed, rdp->batch))
444 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
445 else if (!rcu_batch_before(rcp->completed, rdp->batch - 1))
446 rdp->nxttail[0] = rdp->nxttail[1];
449 * the grace period for entries in
450 * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
451 * move these entries to donelist
453 if (rdp->nxttail[0] != &rdp->nxtlist) {
454 *rdp->donetail = rdp->nxtlist;
455 rdp->donetail = rdp->nxttail[0];
456 rdp->nxtlist = *rdp->nxttail[0];
457 *rdp->donetail = NULL;
459 if (rdp->nxttail[1] == rdp->nxttail[0])
460 rdp->nxttail[1] = &rdp->nxtlist;
461 if (rdp->nxttail[2] == rdp->nxttail[0])
462 rdp->nxttail[2] = &rdp->nxtlist;
463 rdp->nxttail[0] = &rdp->nxtlist;
468 if (rcu_batch_after(rdp->batch, rcp->pending)) {
469 /* and start it/schedule start if it's a new batch */
470 spin_lock(&rcp->lock);
471 if (rcu_batch_after(rdp->batch, rcp->pending)) {
472 rcp->pending = rdp->batch;
473 rcu_start_batch(rcp);
475 spin_unlock(&rcp->lock);
479 rcu_check_quiescent_state(rcp, rdp);
484 static void rcu_process_callbacks(struct softirq_action *unused)
486 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
487 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
490 static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
494 * This cpu has pending rcu entries and the grace period
495 * for them has completed.
497 if (!rcu_batch_before(rcp->completed, rdp->batch))
499 if (!rcu_batch_before(rcp->completed, rdp->batch - 1) &&
500 rdp->nxttail[0] != rdp->nxttail[1])
502 if (rdp->nxttail[0] != &rdp->nxtlist)
506 * This cpu has pending rcu entries and the new batch
507 * for then hasn't been started nor scheduled start
509 if (rcu_batch_after(rdp->batch, rcp->pending))
513 /* This cpu has finished callbacks to invoke */
517 /* The rcu core waits for a quiescent state from the cpu */
518 if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
526 * Check to see if there is any immediate RCU-related work to be done
527 * by the current CPU, returning 1 if so. This function is part of the
528 * RCU implementation; it is -not- an exported member of the RCU API.
530 int rcu_pending(int cpu)
532 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
533 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
537 * Check to see if any future RCU-related work will need to be done
538 * by the current CPU, even if none need be done immediately, returning
539 * 1 if so. This function is part of the RCU implementation; it is -not-
540 * an exported member of the RCU API.
542 int rcu_needs_cpu(int cpu)
544 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
545 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
547 return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
550 void rcu_check_callbacks(int cpu, int user)
553 (idle_cpu(cpu) && !in_softirq() &&
554 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
557 * Get here if this CPU took its interrupt from user
558 * mode or from the idle loop, and if this is not a
559 * nested interrupt. In this case, the CPU is in
560 * a quiescent state, so count it.
562 * Also do a memory barrier. This is needed to handle
563 * the case where writes from a preempt-disable section
564 * of code get reordered into schedule() by this CPU's
565 * write buffer. The memory barrier makes sure that
566 * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
567 * by other CPUs to happen after any such write.
570 smp_mb(); /* See above block comment. */
572 rcu_bh_qsctr_inc(cpu);
574 } else if (!in_softirq()) {
577 * Get here if this CPU did not take its interrupt from
578 * softirq, in other words, if it is not interrupting
579 * a rcu_bh read-side critical section. This is an _bh
580 * critical section, so count it. The memory barrier
581 * is needed for the same reason as is the above one.
584 smp_mb(); /* See above block comment. */
585 rcu_bh_qsctr_inc(cpu);
590 static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
591 struct rcu_data *rdp)
593 memset(rdp, 0, sizeof(*rdp));
594 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
595 rdp->donetail = &rdp->donelist;
596 rdp->quiescbatch = rcp->completed;
599 rdp->blimit = blimit;
602 static void __cpuinit rcu_online_cpu(int cpu)
604 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
605 struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
607 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
608 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
609 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
612 static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
613 unsigned long action, void *hcpu)
615 long cpu = (long)hcpu;
619 case CPU_UP_PREPARE_FROZEN:
623 case CPU_DEAD_FROZEN:
624 rcu_offline_cpu(cpu);
632 static struct notifier_block __cpuinitdata rcu_nb = {
633 .notifier_call = rcu_cpu_notify,
637 * Initializes rcu mechanism. Assumed to be called early.
638 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
639 * Note that rcu_qsctr and friends are implicitly
640 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
642 void __init __rcu_init(void)
644 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
645 (void *)(long)smp_processor_id());
646 /* Register notifier for non-boot CPUs */
647 register_cpu_notifier(&rcu_nb);
650 module_param(blimit, int, 0);
651 module_param(qhimark, int, 0);
652 module_param(qlowmark, int, 0);