From d134998838ac217a8427c1ddc83cf48888bb3fa3 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 3 Feb 2022 17:41:58 +0000 Subject: selftests: KVM: Add OSLSR_EL1 to the list of blessed regs OSLSR_EL1 is now part of the visible system register state. Add it to the get-reg-list selftest to ensure we keep it that way. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220203174159.2887882-6-oupton@google.com --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index f769fc6cd927..f12147c43464 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -760,6 +760,7 @@ static __u64 base_regs[] = { ARM64_SYS_REG(2, 0, 0, 15, 5), ARM64_SYS_REG(2, 0, 0, 15, 6), ARM64_SYS_REG(2, 0, 0, 15, 7), + ARM64_SYS_REG(2, 0, 1, 1, 4), /* OSLSR_EL1 */ ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */ ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */ ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */ -- cgit v1.2.3 From 05c9324de1695b5e61dceca6d2ef0ab8c0f2f26b Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 3 Feb 2022 17:41:59 +0000 Subject: selftests: KVM: Test OS lock behavior KVM now correctly handles the OS Lock for its guests. When set, KVM blocks all debug exceptions originating from the guest. Add test cases to the debug-exceptions test to assert that software breakpoint, hardware breakpoint, watchpoint, and single-step exceptions are in fact blocked. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220203174159.2887882-7-oupton@google.com --- .../selftests/kvm/aarch64/debug-exceptions.c | 58 +++++++++++++++++++++- 1 file changed, 56 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index ea189d83abf7..63b2178210c4 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -23,7 +23,7 @@ #define SPSR_D (1 << 9) #define SPSR_SS (1 << 21) -extern unsigned char sw_bp, hw_bp, bp_svc, bp_brk, hw_wp, ss_start; +extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start; static volatile uint64_t sw_bp_addr, hw_bp_addr; static volatile uint64_t wp_addr, wp_data_addr; static volatile uint64_t svc_addr; @@ -47,6 +47,14 @@ static void reset_debug_state(void) isb(); } +static void enable_os_lock(void) +{ + write_sysreg(1, oslar_el1); + isb(); + + GUEST_ASSERT(read_sysreg(oslsr_el1) & 2); +} + static void install_wp(uint64_t addr) { uint32_t wcr; @@ -99,6 +107,7 @@ static void guest_code(void) GUEST_SYNC(0); /* Software-breakpoint */ + reset_debug_state(); asm volatile("sw_bp: brk #0"); GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp)); @@ -152,6 +161,51 @@ static void guest_code(void) GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4); GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8); + GUEST_SYNC(6); + + /* OS Lock does not block software-breakpoint */ + reset_debug_state(); + enable_os_lock(); + sw_bp_addr = 0; + asm volatile("sw_bp2: brk #0"); + GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp2)); + + GUEST_SYNC(7); + + /* OS Lock blocking hardware-breakpoint */ + reset_debug_state(); + enable_os_lock(); + install_hw_bp(PC(hw_bp2)); + hw_bp_addr = 0; + asm volatile("hw_bp2: nop"); + GUEST_ASSERT_EQ(hw_bp_addr, 0); + + GUEST_SYNC(8); + + /* OS Lock blocking watchpoint */ + reset_debug_state(); + enable_os_lock(); + write_data = '\0'; + wp_data_addr = 0; + install_wp(PC(write_data)); + write_data = 'x'; + GUEST_ASSERT_EQ(write_data, 'x'); + GUEST_ASSERT_EQ(wp_data_addr, 0); + + GUEST_SYNC(9); + + /* OS Lock blocking single-step */ + reset_debug_state(); + enable_os_lock(); + ss_addr[0] = 0; + install_ss(); + ss_idx = 0; + asm volatile("mrs x0, esr_el1\n\t" + "add x0, x0, #1\n\t" + "msr daifset, #8\n\t" + : : : "x0"); + GUEST_ASSERT_EQ(ss_addr[0], 0); + GUEST_DONE(); } @@ -223,7 +277,7 @@ int main(int argc, char *argv[]) vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, ESR_EC_SVC64, guest_svc_handler); - for (stage = 0; stage < 7; stage++) { + for (stage = 0; stage < 11; stage++) { vcpu_run(vm, VCPU_ID); switch (get_ucall(vm, VCPU_ID, &uc)) { -- cgit v1.2.3 From c340f7899af6f83bd937f8838949bb32da54c8a4 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 18 Jan 2022 01:57:03 +0000 Subject: KVM: selftests: Add vgic initialization for dirty log perf test for ARM For ARM64, if no vgic is setup before the dirty log perf test, the userspace irqchip would be used, which would affect the dirty log perf test result. Signed-off-by: Jing Zhang Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220118015703.3630552-4-jingzhangos@google.com --- tools/testing/selftests/kvm/dirty_log_perf_test.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 1954b964d1cf..b501338d9430 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -18,6 +18,12 @@ #include "test_util.h" #include "perf_test_util.h" #include "guest_modes.h" +#ifdef __aarch64__ +#include "aarch64/vgic.h" + +#define GICD_BASE_GPA 0x8000000ULL +#define GICR_BASE_GPA 0x80A0000ULL +#endif /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/ #define TEST_HOST_LOOP_N 2UL @@ -200,6 +206,10 @@ static void run_test(enum vm_guest_mode mode, void *arg) vm_enable_cap(vm, &cap); } +#ifdef __aarch64__ + vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA); +#endif + /* Start the iterations */ iteration = 0; host_quit = false; -- cgit v1.2.3 From cc94d47ce16d4147d546e47c8248e8bd12ba5fe5 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:54 -0800 Subject: kvm: selftests: aarch64: fix assert in gicv3_access_reg The val argument in gicv3_access_reg can have any value when used for a read, not necessarily 0. Fix the assert by checking val only for writes. Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-2-ricarkol@google.com --- tools/testing/selftests/kvm/lib/aarch64/gic_v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c index 00f613c0583c..e4945fe66620 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c @@ -159,7 +159,7 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset, uint32_t cpu_or_dist; GUEST_ASSERT(bits_per_field <= reg_bits); - GUEST_ASSERT(*val < (1U << bits_per_field)); + GUEST_ASSERT(!write || *val < (1U << bits_per_field)); /* Some registers like IROUTER are 64 bit long. Those are currently not * supported by readl nor writel, so just asserting here until then. */ -- cgit v1.2.3 From 11024a7a0ac26dd31ddfa0f6590e158bdf9ab858 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:55 -0800 Subject: kvm: selftests: aarch64: pass vgic_irq guest args as a pointer The guest in vgic_irq gets its arguments in a struct. This struct used to fit nicely in a single register so vcpu_args_set() was able to pass it by value by setting x0 with it. Unfortunately, this args struct grew after some commits and some guest args became random (specically kvm_supports_irqfd). Fix this by passing the guest args as a pointer (after allocating some guest memory for it). Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-3-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_irq.c | 29 ++++++++++++++------------ 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index e6c7d7f8fbd1..b701eb80128d 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -472,10 +472,10 @@ static void test_restore_active(struct test_args *args, struct kvm_inject_desc * guest_restore_active(args, MIN_SPI, 4, f->cmd); } -static void guest_code(struct test_args args) +static void guest_code(struct test_args *args) { - uint32_t i, nr_irqs = args.nr_irqs; - bool level_sensitive = args.level_sensitive; + uint32_t i, nr_irqs = args->nr_irqs; + bool level_sensitive = args->level_sensitive; struct kvm_inject_desc *f, *inject_fns; gic_init(GIC_V3, 1, dist, redist); @@ -484,11 +484,11 @@ static void guest_code(struct test_args args) gic_irq_enable(i); for (i = MIN_SPI; i < nr_irqs; i++) - gic_irq_set_config(i, !args.level_sensitive); + gic_irq_set_config(i, !level_sensitive); - gic_set_eoi_split(args.eoi_split); + gic_set_eoi_split(args->eoi_split); - reset_priorities(&args); + reset_priorities(args); gic_set_priority_mask(CPU_PRIO_MASK); inject_fns = level_sensitive ? inject_level_fns @@ -497,17 +497,17 @@ static void guest_code(struct test_args args) local_irq_enable(); /* Start the tests. */ - for_each_supported_inject_fn(&args, inject_fns, f) { - test_injection(&args, f); - test_preemption(&args, f); - test_injection_failure(&args, f); + for_each_supported_inject_fn(args, inject_fns, f) { + test_injection(args, f); + test_preemption(args, f); + test_injection_failure(args, f); } /* Restore the active state of IRQs. This would happen when live * migrating IRQs in the middle of being handled. */ - for_each_supported_activate_fn(&args, set_active_fns, f) - test_restore_active(&args, f); + for_each_supported_activate_fn(args, set_active_fns, f) + test_restore_active(args, f); GUEST_DONE(); } @@ -739,6 +739,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) int gic_fd; struct kvm_vm *vm; struct kvm_inject_args inject_args; + vm_vaddr_t args_gva; struct test_args args = { .nr_irqs = nr_irqs, @@ -757,7 +758,9 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) vcpu_init_descriptor_tables(vm, VCPU_ID); /* Setup the guest args page (so it gets the args). */ - vcpu_args_set(vm, 0, 1, args); + args_gva = vm_vaddr_alloc_page(vm); + memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); + vcpu_args_set(vm, 0, 1, args_gva); gic_fd = vgic_v3_setup(vm, 1, nr_irqs, GICD_BASE_GPA, GICR_BASE_GPA); -- cgit v1.2.3 From 5b7898648f02083012900e48d063e51ccbdad165 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:56 -0800 Subject: kvm: selftests: aarch64: fix the failure check in kvm_set_gsi_routing_irqchip_check kvm_set_gsi_routing_irqchip_check(expect_failure=true) is used to check the error code returned by the kernel when trying to setup an invalid gsi routing table. The ioctl fails if "pin >= KVM_IRQCHIP_NUM_PINS", so kvm_set_gsi_routing_irqchip_check() should test the error only when "intid >= KVM_IRQCHIP_NUM_PINS+32". The issue is that the test check is "intid >= KVM_IRQCHIP_NUM_PINS", so for a case like "intid = KVM_IRQCHIP_NUM_PINS" the test wrongly assumes that the kernel will return an error. Fix this by using the right check. Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-4-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index b701eb80128d..0106fc464afe 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -573,8 +573,8 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, kvm_gsi_routing_write(vm, routing); } else { ret = _kvm_gsi_routing_write(vm, routing); - /* The kernel only checks for KVM_IRQCHIP_NUM_PINS. */ - if (intid >= KVM_IRQCHIP_NUM_PINS) + /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */ + if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS) TEST_ASSERT(ret != 0 && errno == EINVAL, "Bad intid %u did not cause KVM_SET_GSI_ROUTING " "error: rc: %i errno: %i", intid, ret, errno); -- cgit v1.2.3 From a5cd38fd9c47b23abc6df08d6ee6a71b39038185 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:57 -0800 Subject: kvm: selftests: aarch64: fix some vgic related comments Fix the formatting of some comments and the wording of one of them (in gicv3_access_reg). Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-5-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_irq.c | 12 ++++++++---- tools/testing/selftests/kvm/lib/aarch64/gic_v3.c | 10 ++++++---- tools/testing/selftests/kvm/lib/aarch64/vgic.c | 3 ++- 3 files changed, 16 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index 0106fc464afe..f0230711fbe9 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -306,7 +306,8 @@ static void guest_restore_active(struct test_args *args, uint32_t prio, intid, ap1r; int i; - /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs + /* + * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs * in descending order, so intid+1 can preempt intid. */ for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) { @@ -315,7 +316,8 @@ static void guest_restore_active(struct test_args *args, gic_set_priority(intid, prio); } - /* In a real migration, KVM would restore all GIC state before running + /* + * In a real migration, KVM would restore all GIC state before running * guest code. */ for (i = 0; i < num; i++) { @@ -503,7 +505,8 @@ static void guest_code(struct test_args *args) test_injection_failure(args, f); } - /* Restore the active state of IRQs. This would happen when live + /* + * Restore the active state of IRQs. This would happen when live * migrating IRQs in the middle of being handled. */ for_each_supported_activate_fn(args, set_active_fns, f) @@ -840,7 +843,8 @@ int main(int argc, char **argv) } } - /* If the user just specified nr_irqs and/or gic_version, then run all + /* + * If the user just specified nr_irqs and/or gic_version, then run all * combinations. */ if (default_args) { diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c index e4945fe66620..263bf3ed8fd5 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c @@ -19,7 +19,7 @@ struct gicv3_data { unsigned int nr_spis; }; -#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) +#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) #define DIST_BIT (1U << 31) enum gicv3_intid_range { @@ -105,7 +105,8 @@ static void gicv3_set_eoi_split(bool split) { uint32_t val; - /* All other fields are read-only, so no need to read CTLR first. In + /* + * All other fields are read-only, so no need to read CTLR first. In * fact, the kernel does the same. */ val = split ? (1U << 1) : 0; @@ -160,8 +161,9 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset, GUEST_ASSERT(bits_per_field <= reg_bits); GUEST_ASSERT(!write || *val < (1U << bits_per_field)); - /* Some registers like IROUTER are 64 bit long. Those are currently not - * supported by readl nor writel, so just asserting here until then. + /* + * This function does not support 64 bit accesses. Just asserting here + * until we implement readq/writeq. */ GUEST_ASSERT(reg_bits == 32); diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c index b3a0fca0d780..79864b941617 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -150,7 +150,8 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, attr += SZ_64K; } - /* All calls will succeed, even with invalid intid's, as long as the + /* + * All calls will succeed, even with invalid intid's, as long as the * addr part of the attr is within 32 bits (checked above). An invalid * intid will just make the read/writes point to above the intended * register space (i.e., ICPENDR after ISPENDR). -- cgit v1.2.3 From b53de63a89244c196d8a2ea76b6754e3fdb4b626 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:58 -0800 Subject: kvm: selftests: aarch64: use a tighter assert in vgic_poke_irq() vgic_poke_irq() checks that the attr argument passed to the vgic device ioctl is sane. Make this check tighter by moving it to after the last attr update. Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-6-ricarkol@google.com --- tools/testing/selftests/kvm/lib/aarch64/vgic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c index 79864b941617..f365c32a7296 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -138,9 +138,6 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, uint64_t val; bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid); - /* Check that the addr part of the attr is within 32 bits. */ - assert(attr <= KVM_DEV_ARM_VGIC_OFFSET_MASK); - uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS : KVM_DEV_ARM_VGIC_GRP_DIST_REGS; @@ -150,6 +147,9 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, attr += SZ_64K; } + /* Check that the addr part of the attr is within 32 bits. */ + assert((attr & ~KVM_DEV_ARM_VGIC_OFFSET_MASK) == 0); + /* * All calls will succeed, even with invalid intid's, as long as the * addr part of the attr is within 32 bits (checked above). An invalid -- cgit v1.2.3 From 6ee7fca2a4a023b14aa1f1f3c4f6c833116116ef Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Thu, 27 Jan 2022 16:17:58 +0000 Subject: KVM: arm64: Add KVM_ARM_VCPU_PMU_V3_SET_PMU attribute When KVM creates an event and there are more than one PMUs present on the system, perf_init_event() will go through the list of available PMUs and will choose the first one that can create the event. The order of the PMUs in this list depends on the probe order, which can change under various circumstances, for example if the order of the PMU nodes change in the DTB or if asynchronous driver probing is enabled on the kernel command line (with the driver_async_probe=armv8-pmu option). Another consequence of this approach is that on heteregeneous systems all virtual machines that KVM creates will use the same PMU. This might cause unexpected behaviour for userspace: when a VCPU is executing on the physical CPU that uses this default PMU, PMU events in the guest work correctly; but when the same VCPU executes on another CPU, PMU events in the guest will suddenly stop counting. Fortunately, perf core allows user to specify on which PMU to create an event by using the perf_event_attr->type field, which is used by perf_init_event() as an index in the radix tree of available PMUs. Add the KVM_ARM_VCPU_PMU_V3_CTRL(KVM_ARM_VCPU_PMU_V3_SET_PMU) VCPU attribute to allow userspace to specify the arm_pmu that KVM will use when creating events for that VCPU. KVM will make no attempt to run the VCPU on the physical CPUs that share the PMU, leaving it up to userspace to manage the VCPU threads' affinity accordingly. To ensure that KVM doesn't expose an asymmetric system to the guest, the PMU set for one VCPU will be used by all other VCPUs. Once a VCPU has run, the PMU cannot be changed in order to avoid changing the list of available events for a VCPU, or to change the semantics of existing events. Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127161759.53553-6-alexandru.elisei@arm.com --- Documentation/virt/kvm/devices/vcpu.rst | 28 +++++++++++++++++++++++ arch/arm64/include/uapi/asm/kvm.h | 1 + arch/arm64/kvm/pmu-emul.c | 40 +++++++++++++++++++++++++++++++++ tools/arch/arm64/include/uapi/asm/kvm.h | 1 + 4 files changed, 70 insertions(+) (limited to 'tools') diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst index d063aaee5bb7..e8c5770590a2 100644 --- a/Documentation/virt/kvm/devices/vcpu.rst +++ b/Documentation/virt/kvm/devices/vcpu.rst @@ -104,6 +104,34 @@ hardware event. Filtering event 0x1E (CHAIN) has no effect either, as it isn't strictly speaking an event. Filtering the cycle counter is possible using event 0x11 (CPU_CYCLES). +1.4 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_SET_PMU +------------------------------------------ + +:Parameters: in kvm_device_attr.addr the address to an int representing the PMU + identifier. + +:Returns: + + ======= ==================================================== + -EBUSY PMUv3 already initialized, a VCPU has already run or + an event filter has already been set + -EFAULT Error accessing the PMU identifier + -ENXIO PMU not found + -ENODEV PMUv3 not supported or GIC not initialized + -ENOMEM Could not allocate memory + ======= ==================================================== + +Request that the VCPU uses the specified hardware PMU when creating guest events +for the purpose of PMU emulation. The PMU identifier can be read from the "type" +file for the desired PMU instance under /sys/devices (or, equivalent, +/sys/bus/even_source). This attribute is particularly useful on heterogeneous +systems where there are at least two CPU PMUs on the system. The PMU that is set +for one VCPU will be used by all the other VCPUs. It isn't possible to set a PMU +if a PMU event filter is already present. + +Note that KVM will not make any attempts to run the VCPU on the physical CPUs +associated with the PMU specified by this attribute. This is entirely left to +userspace. 2. GROUP: KVM_ARM_VCPU_TIMER_CTRL ================================= diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index b3edde68bc3e..1d0a0a2a9711 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -362,6 +362,7 @@ struct kvm_arm_copy_mte_tags { #define KVM_ARM_VCPU_PMU_V3_IRQ 0 #define KVM_ARM_VCPU_PMU_V3_INIT 1 #define KVM_ARM_VCPU_PMU_V3_FILTER 2 +#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3 #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 7bab73f85b58..18361f367495 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -948,6 +948,36 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) return true; } +static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) +{ + struct kvm *kvm = vcpu->kvm; + struct arm_pmu_entry *entry; + struct arm_pmu *arm_pmu; + int ret = -ENXIO; + + mutex_lock(&kvm->lock); + mutex_lock(&arm_pmus_lock); + + list_for_each_entry(entry, &arm_pmus, entry) { + arm_pmu = entry->arm_pmu; + if (arm_pmu->pmu.type == pmu_id) { + if (kvm->arch.ran_once || + (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { + ret = -EBUSY; + break; + } + + kvm->arch.arm_pmu = arm_pmu; + ret = 0; + break; + } + } + + mutex_unlock(&arm_pmus_lock); + mutex_unlock(&kvm->lock); + return ret; +} + int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { struct kvm *kvm = vcpu->kvm; @@ -1046,6 +1076,15 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return 0; } + case KVM_ARM_VCPU_PMU_V3_SET_PMU: { + int __user *uaddr = (int __user *)(long)attr->addr; + int pmu_id; + + if (get_user(pmu_id, uaddr)) + return -EFAULT; + + return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id); + } case KVM_ARM_VCPU_PMU_V3_INIT: return kvm_arm_pmu_v3_init(vcpu); } @@ -1083,6 +1122,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) case KVM_ARM_VCPU_PMU_V3_IRQ: case KVM_ARM_VCPU_PMU_V3_INIT: case KVM_ARM_VCPU_PMU_V3_FILTER: + case KVM_ARM_VCPU_PMU_V3_SET_PMU: if (kvm_vcpu_has_pmu(vcpu)) return 0; } diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index b3edde68bc3e..1d0a0a2a9711 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -362,6 +362,7 @@ struct kvm_arm_copy_mte_tags { #define KVM_ARM_VCPU_PMU_V3_IRQ 0 #define KVM_ARM_VCPU_PMU_V3_INIT 1 #define KVM_ARM_VCPU_PMU_V3_FILTER 2 +#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3 #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 -- cgit v1.2.3