summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorShameer Kolothum <shameerali.kolothum.thodi@huawei.com>2021-11-22 12:18:44 +0000
committerMarc Zyngier <maz@kernel.org>2022-02-08 14:57:04 +0000
commit100b4f092f878dc379f1fcef9ce567c25dee3473 (patch)
tree94812b659483a3d9daf59f9bc0d0e70d3784ccab /arch/arm64/include
parent3248136b3637e1671e4fa46e32e2122f9ec4bc3d (diff)
downloadlinux-100b4f092f878dc379f1fcef9ce567c25dee3473.tar.gz
KVM: arm64: Make active_vmids invalid on vCPU schedule out
Like ASID allocator, we copy the active_vmids into the reserved_vmids on a rollover. But it's unlikely that every CPU will have a vCPU as current task and we may end up unnecessarily reserving the VMID space. Hence, set active_vmids to an invalid one when scheduling out a vCPU. Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20211122121844.867-5-shameerali.kolothum.thodi@huawei.com
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 47aeed22e2ad..305ea13c4fd0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -694,6 +694,7 @@ extern unsigned int kvm_arm_vmid_bits;
int kvm_arm_vmid_alloc_init(void);
void kvm_arm_vmid_alloc_free(void);
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
+void kvm_arm_vmid_clear_active(void);
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
{