From 27972765bd0410fc2ef5e86a41de17c71440a2dd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Jul 2019 23:19:39 +0200 Subject: [PATCH] locking/spinlocks: Use CONFIG_PREEMPTION CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same functionality which today depends on CONFIG_PREEMPT. Adjust the comments in the locking code. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Paolo Bonzini Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/20190726212124.302995288@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/spinlock.h | 2 +- include/linux/spinlock_api_smp.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index ed7c4d6b8235..031ce8617df8 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -214,7 +214,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) /* * Define the various spin_lock methods. Note we define these - * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The + * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The * various methods are defined as nops in the case they are not * required. */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 42dfab89e740..b762eaba4cdf 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) /* * If lockdep is enabled then we use the non-preemption spin-ops - * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are * not re-enabled during lock-acquire (which the preempt-spin-ops do): */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)