rcu: Use CONFIG_PREEMPTION

CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by
CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same
functionality which today depends on CONFIG_PREEMPT.

Switch the conditionals in RCU to use CONFIG_PREEMPTION.

That's the first step towards RCU on RT. The further tweaks are work in
progress. This neither touches the selftest bits which need a closer look
by Paul.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20190726212124.210156346@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Thomas Gleixner 2019-07-26 23:19:38 +02:00 committed by Ingo Molnar
parent c1a280b68d
commit 01b1d88b09
8 changed files with 15 additions and 15 deletions

View File

@ -103,7 +103,7 @@ config STATIC_KEYS_SELFTEST
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
select TASKS_RCU if PREEMPT
select TASKS_RCU if PREEMPTION
config KPROBES_ON_FTRACE
def_bool y

View File

@ -578,7 +578,7 @@ do { \
*
* In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
* it is illegal to block while in an RCU read-side critical section.
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
* kernel builds, RCU read-side critical sections may be preempted,
* but explicit blocking is illegal. Finally, in preemptible RCU
* implementations in real-time (with -rt patchset) kernel builds, RCU

View File

@ -53,7 +53,7 @@ void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
void rcu_end_inkernel_boot(void);
bool rcu_is_watching(void);
#ifndef CONFIG_PREEMPT
#ifndef CONFIG_PREEMPTION
void rcu_all_qs(void);
#endif

View File

@ -86,7 +86,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
#define torture_preempt_schedule() preempt_schedule()
#else
#define torture_preempt_schedule()

View File

@ -7,7 +7,7 @@ menu "RCU Subsystem"
config TREE_RCU
bool
default y if !PREEMPT && SMP
default y if !PREEMPTION && SMP
help
This option selects the RCU implementation that is
designed for very large SMP system with hundreds or
@ -16,7 +16,7 @@ config TREE_RCU
config PREEMPT_RCU
bool
default y if PREEMPT
default y if PREEMPTION
help
This option selects the RCU implementation that is
designed for very large SMP systems with hundreds or
@ -28,7 +28,7 @@ config PREEMPT_RCU
config TINY_RCU
bool
default y if !PREEMPT && !SMP
default y if !PREEMPTION && !SMP
help
This option selects the RCU implementation that is
designed for UP systems from which real-time response
@ -70,7 +70,7 @@ config TREE_SRCU
This option selects the full-fledged version of SRCU.
config TASKS_RCU
def_bool PREEMPT
def_bool PREEMPTION
select SRCU
help
This option enables a task-based RCU implementation that uses

View File

@ -1881,7 +1881,7 @@ rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
struct rcu_node *rnp_p;
raw_lockdep_assert_held_rcu_node(rnp);
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) ||
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPTION)) ||
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
rnp->qsmask != 0) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@ -2205,7 +2205,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
mask = 0;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->qsmask == 0) {
if (!IS_ENABLED(CONFIG_PREEMPT) ||
if (!IS_ENABLED(CONFIG_PREEMPTION) ||
rcu_preempt_blocked_readers_cgp(rnp)) {
/*
* No point in scanning bits because they
@ -2622,7 +2622,7 @@ static int rcu_blocking_is_gp(void)
{
int ret;
if (IS_ENABLED(CONFIG_PREEMPT))
if (IS_ENABLED(CONFIG_PREEMPTION))
return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
might_sleep(); /* Check for RCU read-side critical section. */
preempt_disable();

View File

@ -163,7 +163,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
//
// Printing RCU CPU stall warnings
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
/*
* Dump detailed information for all tasks blocking the current RCU
@ -215,7 +215,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
return ndetected;
}
#else /* #ifdef CONFIG_PREEMPT */
#else /* #ifdef CONFIG_PREEMPTION */
/*
* Because preemptible RCU does not exist, we never have to check for
@ -233,7 +233,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
{
return 0;
}
#endif /* #else #ifdef CONFIG_PREEMPT */
#endif /* #else #ifdef CONFIG_PREEMPTION */
/*
* Dump stacks of all tasks running on stalled CPUs. First try using

View File

@ -146,7 +146,7 @@ config FUNCTION_TRACER
select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
select GLOB
select TASKS_RCU if PREEMPT
select TASKS_RCU if PREEMPTION
help
Enable the kernel to trace every kernel function. This is done
by using a compiler feature to insert a small, 5-byte No-Operation