context_tracking: Generalize context tracking APIs to support user and guest

Generalize the context tracking APIs to support various nature of
contexts. This is performed by splitting out the mechanism from
context_tracking_user_enter and context_tracking_user_exit into
context_tracking_enter and context_tracking_exit.

The nature of the context we track is now detailed in a ctx_state
parameter pushed to these APIs, allowing the same functions to not just
track kernel <> user space switching, but also kernel <> guest transitions.

But leave the old functions in order to avoid breaking ARM, which calls
these functions from assembler code, and cannot easily use C enum
parameters.

Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Will deacon <will.deacon@arm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
This commit is contained in:
Rik van Riel 2015-02-10 15:27:50 -05:00 committed by Frederic Weisbecker
parent c467ea763f
commit 3aab4f50bf
2 changed files with 34 additions and 18 deletions

View File

@ -10,6 +10,8 @@
#ifdef CONFIG_CONTEXT_TRACKING
extern void context_tracking_cpu_set(int cpu);
extern void context_tracking_enter(enum ctx_state state);
extern void context_tracking_exit(enum ctx_state state);
extern void context_tracking_user_enter(void);
extern void context_tracking_user_exit(void);
extern void __context_tracking_task_switch(struct task_struct *prev,
@ -35,7 +37,8 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
context_tracking_user_exit();
if (prev_ctx != CONTEXT_KERNEL)
context_tracking_exit(prev_ctx);
return prev_ctx;
}
@ -43,8 +46,8 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
if (context_tracking_is_enabled()) {
if (prev_ctx == CONTEXT_USER)
context_tracking_user_enter();
if (prev_ctx != CONTEXT_KERNEL)
context_tracking_enter(prev_ctx);
}
}

View File

@ -39,15 +39,15 @@ void context_tracking_cpu_set(int cpu)
}
/**
* context_tracking_user_enter - Inform the context tracking that the CPU is going to
* enter userspace mode.
* context_tracking_enter - Inform the context tracking that the CPU is going
* enter user or guest space mode.
*
* This function must be called right before we switch from the kernel
* to userspace, when it's guaranteed the remaining kernel instructions
* to execute won't use any RCU read side critical section because this
* function sets RCU in extended quiescent state.
* to user or guest space, when it's guaranteed the remaining kernel
* instructions to execute won't use any RCU read side critical section
* because this function sets RCU in extended quiescent state.
*/
void context_tracking_user_enter(void)
void context_tracking_enter(enum ctx_state state)
{
unsigned long flags;
@ -75,7 +75,7 @@ void context_tracking_user_enter(void)
WARN_ON_ONCE(!current->mm);
local_irq_save(flags);
if ( __this_cpu_read(context_tracking.state) != CONTEXT_USER) {
if ( __this_cpu_read(context_tracking.state) != state) {
if (__this_cpu_read(context_tracking.active)) {
trace_user_enter(0);
/*
@ -101,24 +101,31 @@ void context_tracking_user_enter(void)
* OTOH we can spare the calls to vtime and RCU when context_tracking.active
* is false because we know that CPU is not tickless.
*/
__this_cpu_write(context_tracking.state, CONTEXT_USER);
__this_cpu_write(context_tracking.state, state);
}
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(context_tracking_enter);
void context_tracking_user_enter(void)
{
context_tracking_enter(CONTEXT_USER);
}
NOKPROBE_SYMBOL(context_tracking_user_enter);
/**
* context_tracking_user_exit - Inform the context tracking that the CPU is
* exiting userspace mode and entering the kernel.
* context_tracking_exit - Inform the context tracking that the CPU is
* exiting user or guest mode and entering the kernel.
*
* This function must be called after we entered the kernel from userspace
* before any use of RCU read side critical section. This potentially include
* any high level kernel code like syscalls, exceptions, signal handling, etc...
* This function must be called after we entered the kernel from user or
* guest space before any use of RCU read side critical section. This
* potentially include any high level kernel code like syscalls, exceptions,
* signal handling, etc...
*
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
void context_tracking_user_exit(void)
void context_tracking_exit(enum ctx_state state)
{
unsigned long flags;
@ -129,7 +136,7 @@ void context_tracking_user_exit(void)
return;
local_irq_save(flags);
if (__this_cpu_read(context_tracking.state) == CONTEXT_USER) {
if (__this_cpu_read(context_tracking.state) == state) {
if (__this_cpu_read(context_tracking.active)) {
/*
* We are going to run code that may use RCU. Inform
@ -143,6 +150,12 @@ void context_tracking_user_exit(void)
}
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(context_tracking_exit);
void context_tracking_user_exit(void)
{
context_tracking_exit(CONTEXT_USER);
}
NOKPROBE_SYMBOL(context_tracking_user_exit);
/**