summaryrefslogtreecommitdiff
path: root/tools/testing
diff options
context:
space:
mode:
authorAndrew Jones <drjones@redhat.com>2020-11-11 13:26:30 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2020-11-15 09:49:20 -0500
commit0aa9ec45d42779af711c7a209b5780ff7391b5bd (patch)
treefd7b03c1f6fc3e59f09bc560cd50fd0a21915aa9 /tools/testing
parentec2f18bb4783648041498b06d4bff222821efed1 (diff)
downloadlinux-0aa9ec45d42779af711c7a209b5780ff7391b5bd.tar.gz
KVM: selftests: Introduce vm_create_[default_]_with_vcpus
Introduce new vm_create variants that also takes a number of vcpus, an amount of per-vcpu pages, and optionally a list of vcpuids. These variants will create default VMs with enough additional pages to cover the vcpu stacks, per-vcpu pages, and pagetable pages for all. The new 'default' variant uses VM_MODE_DEFAULT, whereas the other new variant accepts the mode as a parameter. Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Ben Gardon <bgardon@google.com> Signed-off-by: Andrew Jones <drjones@redhat.com> Message-Id: <20201111122636.73346-6-drjones@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h10
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c35
2 files changed, 40 insertions, 5 deletions
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 8154785196cb..dfa9d369e8fc 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -266,6 +266,16 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
void *guest_code);
+/* Same as vm_create_default, but can be used for more than one vcpu */
+struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[]);
+
+/* Like vm_create_default_with_vcpus, but accepts mode as a parameter */
+struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+ uint64_t extra_mem_pages, uint32_t num_percpu_pages,
+ void *guest_code, uint32_t vcpuids[]);
+
/*
* Adds a vCPU with reasonable defaults (e.g. a stack)
*
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 70676a305523..df2035ac54a6 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -282,8 +282,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
- void *guest_code)
+struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+ uint64_t extra_mem_pages, uint32_t num_percpu_pages,
+ void *guest_code, uint32_t vcpuids[])
{
/* The maximum page table size for a memory region will be when the
* smallest pages are used. Considering each page contains x page
@@ -291,10 +292,18 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
* N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
* than N/x*2.
*/
- uint64_t extra_pg_pages = (extra_mem_pages / PTES_PER_MIN_PAGE) * 2;
+ uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
+ uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
+ uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
struct kvm_vm *vm;
+ int i;
+
+ TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
+ "nr_vcpus = %d too large for host, max-vcpus = %d",
+ nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+ pages = vm_adjust_num_guest_pages(mode, pages);
+ vm = vm_create(mode, pages, O_RDWR);
kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
@@ -302,11 +311,27 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
vm_create_irqchip(vm);
#endif
- vm_vcpu_add_default(vm, vcpuid, guest_code);
+ for (i = 0; i < nr_vcpus; ++i)
+ vm_vcpu_add_default(vm, vcpuids ? vcpuids[i] : i, guest_code);
return vm;
}
+struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
+ uint32_t num_percpu_pages, void *guest_code,
+ uint32_t vcpuids[])
+{
+ return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
+ num_percpu_pages, guest_code, vcpuids);
+}
+
+struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
+ void *guest_code)
+{
+ return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code,
+ (uint32_t []){ vcpuid });
+}
+
/*
* VM Restart
*