KVM: arm/arm64: Provide a get_input_level for the arch timer

The VGIC can now support the life-cycle of mapped level-triggered
interrupts, and we no longer have to read back the timer state on every
exit from the VM if we had an asserted timer interrupt signal, because
the VGIC already knows if we hit the unlikely case where the guest
disables the timer without ACKing the virtual timer interrupt.

This means we rework a bit of the code to factor out the functionality
to snapshot the timer state from vtimer_save_state(), and we can reuse
this functionality in the sync path when we have an irqchip in
userspace, and also to support our implementation of the
get_input_level() function for the timer.

This change also means that we can no longer rely on the timer's view of
the interrupt line to set the active state, because we no longer
maintain this state for mapped interrupts when exiting from the guest.
Instead, we only set the active state if the virtual interrupt is
active, and otherwise we simply let the timer fire again and raise the
virtual interrupt from the ISR.

Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
This commit is contained in:
Christoffer Dall 2017-10-27 19:34:30 +02:00
parent df635c5b18
commit 4c60e360d6
2 changed files with 40 additions and 46 deletions

View File

@ -90,6 +90,8 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
void kvm_timer_init_vhe(void);
bool kvm_arch_timer_get_input_level(int vintid);
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)

View File

@ -343,6 +343,12 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
phys_timer_emulate(vcpu);
}
static void __timer_snapshot_state(struct arch_timer_context *timer)
{
timer->cnt_ctl = read_sysreg_el0(cntv_ctl);
timer->cnt_cval = read_sysreg_el0(cntv_cval);
}
static void vtimer_save_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@ -354,10 +360,8 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
if (!vtimer->loaded)
goto out;
if (timer->enabled) {
vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
}
if (timer->enabled)
__timer_snapshot_state(vtimer);
/* Disable the virtual timer */
write_sysreg_el0(0, cntv_ctl);
@ -454,8 +458,7 @@ static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu)
bool phys_active;
int ret;
phys_active = vtimer->irq.level ||
kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
ret = irq_set_irqchip_state(host_vtimer_irq,
IRQCHIP_STATE_ACTIVE,
@ -535,54 +538,27 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
set_cntvoff(0);
}
static void unmask_vtimer_irq(struct kvm_vcpu *vcpu)
/*
* With a userspace irqchip we have to check if the guest de-asserted the
* timer and if so, unmask the timer irq signal on the host interrupt
* controller to ensure that we see future timer signals.
*/
static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
kvm_vtimer_update_mask_user(vcpu);
return;
}
/*
* If the guest disabled the timer without acking the interrupt, then
* we must make sure the physical and virtual active states are in
* sync by deactivating the physical interrupt, because otherwise we
* wouldn't see the next timer interrupt in the host.
*/
if (!kvm_vgic_map_is_active(vcpu, vtimer->irq.irq)) {
int ret;
ret = irq_set_irqchip_state(host_vtimer_irq,
IRQCHIP_STATE_ACTIVE,
false);
WARN_ON(ret);
__timer_snapshot_state(vtimer);
if (!kvm_timer_should_fire(vtimer)) {
kvm_timer_update_irq(vcpu, false, vtimer);
kvm_vtimer_update_mask_user(vcpu);
}
}
}
/**
* kvm_timer_sync_hwstate - sync timer state from cpu
* @vcpu: The vcpu pointer
*
* Check if any of the timers have expired while we were running in the guest,
* and inject an interrupt if that was the case.
*/
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
/*
* If we entered the guest with the vtimer output asserted we have to
* check if the guest has modified the timer so that we should lower
* the line at this point.
*/
if (vtimer->irq.level) {
vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
if (!kvm_timer_should_fire(vtimer)) {
kvm_timer_update_irq(vcpu, false, vtimer);
unmask_vtimer_irq(vcpu);
}
}
unmask_vtimer_irq_user(vcpu);
}
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
@ -813,6 +789,22 @@ static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
return true;
}
bool kvm_arch_timer_get_input_level(int vintid)
{
struct kvm_vcpu *vcpu = kvm_arm_get_running_vcpu();
struct arch_timer_context *timer;
if (vintid == vcpu_vtimer(vcpu)->irq.irq)
timer = vcpu_vtimer(vcpu);
else
BUG(); /* We only map the vtimer so far */
if (timer->loaded)
__timer_snapshot_state(timer);
return kvm_timer_should_fire(timer);
}
int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@ -835,7 +827,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
}
ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq,
NULL);
kvm_arch_timer_get_input_level);
if (ret)
return ret;