/* * Read-Copy Update mechanism for mutual exclusion (classic version) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma * * Based on the original work by Paul McKenney * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU * * Rewrite based on a global state machine * (C) Manfred Spraul , 2008 */ #ifndef __LINUX_RCUCLASSIC_H #define __LINUX_RCUCLASSIC_H #include #include #include #include #include #include #include /* * cpu bitmask: * default implementation, flat without hierarchy, not optimized for UP. */ struct rcu_cpumask { spinlock_t lock; cpumask_t cpus; } ____cacheline_internodealigned_in_smp; #define __RCU_CPUMASK_INIT(ptr) { .lock = __SPIN_LOCK_UNLOCKED(&(ptr)->lock) } /* * global state machine: * - each cpu regularly check the global state and compares it with it's own local state. * - if both state do not match, then the cpus do the required work and afterwards * - update their local state * - clear their bit in the cpu bitmask. * The state machine is sequence lock protected. It's only read with disabled local interupts. * Since all cpus must do something to complete a state change, the current state cannot * jump forward by more than one state. */ /* RCU_STATE_DESTROY: * call callbacks that were registered by call_rcu for the objects in rcu_cpu_state.old */ #define RCU_STATE_DESTROY 1 /* RCU_STATE_DESTROY_AND_COLLECT: * - call callbacks that were registered by call_rcu for the objects in rcu_cpu_state.old * - move the objects from rcu_cpu_state.new to rcu_cpu_state.new */ #define RCU_STATE_DESTROY_AND_COLLECT 2 /* RCU_STATE_GRACE * - wait for a quiescent state */ #define RCU_STATE_GRACE 3 struct rcu_global_state { seqlock_t lock; int state; int start_immediately; long completed; struct rcu_cpumask cpus; } ____cacheline_internodealigned_in_smp; struct rcu_cpu_state { int state; /* new objects, directly from call_rcu(). * objects are added LIFO, better for cache hits. * the list are length-based, not NULL-terminated. */ struct rcu_head *new; /* new objects */ struct rcu_head **newtail; long newqlen; /* # of queued callbacks */ /* objects that are in rcu grace processing. The actual * state depends on rgs->state. */ struct rcu_head *old; struct rcu_head **oldtail; long oldqlen; }; struct rcu_cpu_dead { /* objects that are scheduled for immediate call of * ->func(). * objects are added FIFO, necessary for forward progress. * only one structure for _bh and _normal. */ struct rcu_head *dead; long deadqlen; long batchcount; }; DECLARE_PER_CPU(struct rcu_cpu_state, rcu_cpudata_normal); DECLARE_PER_CPU(struct rcu_cpu_state, rcu_cpudata_bh); DECLARE_PER_CPU(struct rcu_cpu_dead, rcu_cpudata_dead); extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); extern int rcu_pending(int cpu); extern int rcu_needs_cpu(int cpu); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; # define rcu_read_acquire() \ lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) #else # define rcu_read_acquire() do { } while (0) # define rcu_read_release() do { } while (0) #endif #define __rcu_read_lock() \ do { \ preempt_disable(); \ __acquire(RCU); \ rcu_read_acquire(); \ } while (0) #define __rcu_read_unlock() \ do { \ rcu_read_release(); \ __release(RCU); \ preempt_enable(); \ } while (0) #define __rcu_read_lock_bh() \ do { \ local_bh_disable(); \ __acquire(RCU_BH); \ rcu_read_acquire(); \ } while (0) #define __rcu_read_unlock_bh() \ do { \ rcu_read_release(); \ __release(RCU_BH); \ local_bh_enable(); \ } while (0) #define __synchronize_sched() synchronize_rcu() #define call_rcu_sched(head, func) call_rcu(head, func) extern void __rcu_init(void); #define rcu_init_sched() do { } while (0) extern void rcu_check_callbacks(int cpu, int user); extern void rcu_restart_cpu(int cpu); #define rcu_enter_nohz() do { } while (0) #define rcu_exit_nohz() do { } while (0) #define rcu_qsctr_inc(cpu) do { } while (0) #define rcu_bh_qsctr_inc(cpu) do { } while (0) #endif /* __LINUX_RCUCLASSIC_H */