diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2022-03-15 17:19:02 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-03-15 17:19:02 -0400 |
commit | 3b53f5535d30ed8667f84042bff35163d0fa5483 (patch) | |
tree | 71b02a464cf9c94c0cf092460c412e83f20ca076 /tools | |
parent | 4a204f7895878363ca8211f50ec610408c8c70aa (diff) | |
parent | 3bcc372c9865bec3ab9bfcf30b2426cf68bc18af (diff) | |
download | linux-3b53f5535d30ed8667f84042bff35163d0fa5483.tar.bz2 |
Merge tag 'kvm-s390-next-5.18-2' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390: Fix, test and feature for 5.18 part 2
- memop selftest
- fix SCK locking
- adapter interruptions virtualization for secure guests
Diffstat (limited to 'tools')
-rw-r--r-- | tools/testing/selftests/kvm/s390x/memop.c | 735 |
1 files changed, 617 insertions, 118 deletions
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c index d19c3ffdea3f..b04c2c1b3c30 100644 --- a/tools/testing/selftests/kvm/s390x/memop.c +++ b/tools/testing/selftests/kvm/s390x/memop.c @@ -13,169 +13,668 @@ #include "test_util.h" #include "kvm_util.h" +enum mop_target { + LOGICAL, + SIDA, + ABSOLUTE, + INVALID, +}; + +enum mop_access_mode { + READ, + WRITE, +}; + +struct mop_desc { + uintptr_t gaddr; + uintptr_t gaddr_v; + uint64_t set_flags; + unsigned int f_check : 1; + unsigned int f_inject : 1; + unsigned int f_key : 1; + unsigned int _gaddr_v : 1; + unsigned int _set_flags : 1; + unsigned int _sida_offset : 1; + unsigned int _ar : 1; + uint32_t size; + enum mop_target target; + enum mop_access_mode mode; + void *buf; + uint32_t sida_offset; + uint8_t ar; + uint8_t key; +}; + +static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc) +{ + struct kvm_s390_mem_op ksmo = { + .gaddr = (uintptr_t)desc.gaddr, + .size = desc.size, + .buf = ((uintptr_t)desc.buf), + .reserved = "ignored_ignored_ignored_ignored" + }; + + switch (desc.target) { + case LOGICAL: + if (desc.mode == READ) + ksmo.op = KVM_S390_MEMOP_LOGICAL_READ; + if (desc.mode == WRITE) + ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; + break; + case SIDA: + if (desc.mode == READ) + ksmo.op = KVM_S390_MEMOP_SIDA_READ; + if (desc.mode == WRITE) + ksmo.op = KVM_S390_MEMOP_SIDA_WRITE; + break; + case ABSOLUTE: + if (desc.mode == READ) + ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ; + if (desc.mode == WRITE) + ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE; + break; + case INVALID: + ksmo.op = -1; + } + if (desc.f_check) + ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; + if (desc.f_inject) + ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION; + if (desc._set_flags) + ksmo.flags = desc.set_flags; + if (desc.f_key) { + ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; + ksmo.key = desc.key; + } + if (desc._ar) + ksmo.ar = desc.ar; + else + ksmo.ar = 0; + if (desc._sida_offset) + ksmo.sida_offset = desc.sida_offset; + + return ksmo; +} + +/* vcpu dummy id signifying that vm instead of vcpu ioctl is to occur */ +const uint32_t VM_VCPU_ID = (uint32_t)-1; + +struct test_vcpu { + struct kvm_vm *vm; + uint32_t id; +}; + +#define PRINT_MEMOP false +static void print_memop(uint32_t vcpu_id, const struct kvm_s390_mem_op *ksmo) +{ + if (!PRINT_MEMOP) + return; + + if (vcpu_id == VM_VCPU_ID) + printf("vm memop("); + else + printf("vcpu memop("); + switch (ksmo->op) { + case KVM_S390_MEMOP_LOGICAL_READ: + printf("LOGICAL, READ, "); + break; + case KVM_S390_MEMOP_LOGICAL_WRITE: + printf("LOGICAL, WRITE, "); + break; + case KVM_S390_MEMOP_SIDA_READ: + printf("SIDA, READ, "); + break; + case KVM_S390_MEMOP_SIDA_WRITE: + printf("SIDA, WRITE, "); + break; + case KVM_S390_MEMOP_ABSOLUTE_READ: + printf("ABSOLUTE, READ, "); + break; + case KVM_S390_MEMOP_ABSOLUTE_WRITE: + printf("ABSOLUTE, WRITE, "); + break; + } + printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u", + ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key); + if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY) + printf(", CHECK_ONLY"); + if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) + printf(", INJECT_EXCEPTION"); + if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) + printf(", SKEY_PROTECTION"); + puts(")"); +} + +static void memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo) +{ + if (vcpu.id == VM_VCPU_ID) + vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo); + else + vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo); +} + +static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo) +{ + if (vcpu.id == VM_VCPU_ID) + return _vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo); + else + return _vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo); +} + +#define MEMOP(err, vcpu_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \ +({ \ + struct test_vcpu __vcpu = (vcpu_p); \ + struct mop_desc __desc = { \ + .target = (mop_target_p), \ + .mode = (access_mode_p), \ + .buf = (buf_p), \ + .size = (size_p), \ + __VA_ARGS__ \ + }; \ + struct kvm_s390_mem_op __ksmo; \ + \ + if (__desc._gaddr_v) { \ + if (__desc.target == ABSOLUTE) \ + __desc.gaddr = addr_gva2gpa(__vcpu.vm, __desc.gaddr_v); \ + else \ + __desc.gaddr = __desc.gaddr_v; \ + } \ + __ksmo = ksmo_from_desc(__desc); \ + print_memop(__vcpu.id, &__ksmo); \ + err##memop_ioctl(__vcpu, &__ksmo); \ +}) + +#define MOP(...) MEMOP(, __VA_ARGS__) +#define ERR_MOP(...) MEMOP(err_, __VA_ARGS__) + +#define GADDR(a) .gaddr = ((uintptr_t)a) +#define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v) +#define CHECK_ONLY .f_check = 1 +#define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f) +#define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o) +#define AR(a) ._ar = 1, .ar = (a) +#define KEY(a) .f_key = 1, .key = (a) + +#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); }) + #define VCPU_ID 1 +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1ULL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) +#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39)) static uint8_t mem1[65536]; static uint8_t mem2[65536]; -static void guest_code(void) +struct test_default { + struct kvm_vm *kvm_vm; + struct test_vcpu vm; + struct test_vcpu vcpu; + struct kvm_run *run; + int size; +}; + +static struct test_default test_default_init(void *guest_code) +{ + struct test_default t; + + t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1)); + t.kvm_vm = vm_create_default(VCPU_ID, 0, guest_code); + t.vm = (struct test_vcpu) { t.kvm_vm, VM_VCPU_ID }; + t.vcpu = (struct test_vcpu) { t.kvm_vm, VCPU_ID }; + t.run = vcpu_state(t.kvm_vm, VCPU_ID); + return t; +} + +enum stage { + /* Synced state set by host, e.g. DAT */ + STAGE_INITED, + /* Guest did nothing */ + STAGE_IDLED, + /* Guest set storage keys (specifics up to test case) */ + STAGE_SKEYS_SET, + /* Guest copied memory (locations up to test case) */ + STAGE_COPIED, +}; + +#define HOST_SYNC(vcpu_p, stage) \ +({ \ + struct test_vcpu __vcpu = (vcpu_p); \ + struct ucall uc; \ + int __stage = (stage); \ + \ + vcpu_run(__vcpu.vm, __vcpu.id); \ + get_ucall(__vcpu.vm, __vcpu.id, &uc); \ + ASSERT_EQ(uc.cmd, UCALL_SYNC); \ + ASSERT_EQ(uc.args[1], __stage); \ +}) \ + +static void prepare_mem12(void) { int i; + for (i = 0; i < sizeof(mem1); i++) + mem1[i] = rand(); + memset(mem2, 0xaa, sizeof(mem2)); +} + +#define ASSERT_MEM_EQ(p1, p2, size) \ + TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!") + +#define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \ +({ \ + struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \ + enum mop_target __target = (mop_target_p); \ + uint32_t __size = (size); \ + \ + prepare_mem12(); \ + CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \ + GADDR_V(mem1), ##__VA_ARGS__); \ + HOST_SYNC(__copy_cpu, STAGE_COPIED); \ + CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, \ + GADDR_V(mem2), ##__VA_ARGS__); \ + ASSERT_MEM_EQ(mem1, mem2, __size); \ +}) + +#define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \ +({ \ + struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \ + enum mop_target __target = (mop_target_p); \ + uint32_t __size = (size); \ + \ + prepare_mem12(); \ + CHECK_N_DO(MOP, __mop_cpu, __target, WRITE, mem1, __size, \ + GADDR_V(mem1)); \ + HOST_SYNC(__copy_cpu, STAGE_COPIED); \ + CHECK_N_DO(MOP, __mop_cpu, __target, READ, mem2, __size, ##__VA_ARGS__);\ + ASSERT_MEM_EQ(mem1, mem2, __size); \ +}) + +static void guest_copy(void) +{ + GUEST_SYNC(STAGE_INITED); + memcpy(&mem2, &mem1, sizeof(mem2)); + GUEST_SYNC(STAGE_COPIED); +} + +static void test_copy(void) +{ + struct test_default t = test_default_init(guest_copy); + + HOST_SYNC(t.vcpu, STAGE_INITED); + + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size); + + kvm_vm_free(t.kvm_vm); +} + +static void set_storage_key_range(void *addr, size_t len, uint8_t key) +{ + uintptr_t _addr, abs, i; + int not_mapped = 0; + + _addr = (uintptr_t)addr; + for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) { + abs = i; + asm volatile ( + "lra %[abs], 0(0,%[abs])\n" + " jz 0f\n" + " llill %[not_mapped],1\n" + " j 1f\n" + "0: sske %[key], %[abs]\n" + "1:" + : [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped) + : [key] "r" (key) + : "cc" + ); + GUEST_ASSERT_EQ(not_mapped, 0); + } +} + +static void guest_copy_key(void) +{ + set_storage_key_range(mem1, sizeof(mem1), 0x90); + set_storage_key_range(mem2, sizeof(mem2), 0x90); + GUEST_SYNC(STAGE_SKEYS_SET); + for (;;) { - for (i = 0; i < sizeof(mem2); i++) - mem2[i] = mem1[i]; - GUEST_SYNC(0); + memcpy(&mem2, &mem1, sizeof(mem2)); + GUEST_SYNC(STAGE_COPIED); } } -int main(int argc, char *argv[]) +static void test_copy_key(void) { - struct kvm_vm *vm; - struct kvm_run *run; - struct kvm_s390_mem_op ksmo; - int rv, i, maxsize; + struct test_default t = test_default_init(guest_copy_key); - setbuf(stdout, NULL); /* Tell stdout not to buffer its content */ + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); - maxsize = kvm_check_cap(KVM_CAP_S390_MEM_OP); - if (!maxsize) { - print_skip("CAP_S390_MEM_OP not supported"); - exit(KSFT_SKIP); + /* vm, no key */ + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size); + + /* vm/vcpu, machting key or key 0 */ + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0)); + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9)); + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0)); + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9)); + /* + * There used to be different code paths for key handling depending on + * if the region crossed a page boundary. + * There currently are not, but the more tests the merrier. + */ + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0)); + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9)); + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0)); + DEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9)); + + /* vm/vcpu, mismatching keys on read, but no fetch protection */ + DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2)); + DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2)); + + kvm_vm_free(t.kvm_vm); +} + +static void guest_copy_key_fetch_prot(void) +{ + /* + * For some reason combining the first sync with override enablement + * results in an exception when calling HOST_SYNC. + */ + GUEST_SYNC(STAGE_INITED); + /* Storage protection override applies to both store and fetch. */ + set_storage_key_range(mem1, sizeof(mem1), 0x98); + set_storage_key_range(mem2, sizeof(mem2), 0x98); + GUEST_SYNC(STAGE_SKEYS_SET); + + for (;;) { + memcpy(&mem2, &mem1, sizeof(mem2)); + GUEST_SYNC(STAGE_COPIED); } - if (maxsize > sizeof(mem1)) - maxsize = sizeof(mem1); +} + +static void test_copy_key_storage_prot_override(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot); - /* Create VM */ - vm = vm_create_default(VCPU_ID, 0, guest_code); - run = vcpu_state(vm, VCPU_ID); + HOST_SYNC(t.vcpu, STAGE_INITED); + t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE; + t.run->kvm_dirty_regs = KVM_SYNC_CRS; + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); - for (i = 0; i < sizeof(mem1); i++) - mem1[i] = i * i + i; - - /* Set the first array */ - ksmo.gaddr = addr_gva2gpa(vm, (uintptr_t)mem1); - ksmo.flags = 0; - ksmo.size = maxsize; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); - - /* Let the guest code copy the first array to the second */ - vcpu_run(vm, VCPU_ID); - TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, - "Unexpected exit reason: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + /* vcpu, mismatching keys, storage protection override in effect */ + DEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(2)); - memset(mem2, 0xaa, sizeof(mem2)); + kvm_vm_free(t.kvm_vm); +} + +static void test_copy_key_fetch_prot(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot); + + HOST_SYNC(t.vcpu, STAGE_INITED); + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vm/vcpu, matching key, fetch protection in effect */ + DEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(9)); + DEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem2), KEY(9)); + + kvm_vm_free(t.kvm_vm); +} + +#define ERR_PROT_MOP(...) \ +({ \ + int rv; \ + \ + rv = ERR_MOP(__VA_ARGS__); \ + TEST_ASSERT(rv == 4, "Should result in protection exception"); \ +}) + +static void test_errors_key(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot); + + HOST_SYNC(t.vcpu, STAGE_INITED); + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vm/vcpu, mismatching keys, fetch protection in effect */ + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem2), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2)); + + kvm_vm_free(t.kvm_vm); +} + +static void test_errors_key_storage_prot_override(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot); + + HOST_SYNC(t.vcpu, STAGE_INITED); + t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE; + t.run->kvm_dirty_regs = KVM_SYNC_CRS; + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vm, mismatching keys, storage protection override not applicable to vm */ + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2)); + + kvm_vm_free(t.kvm_vm); +} + +const uint64_t last_page_addr = -PAGE_SIZE; + +static void guest_copy_key_fetch_prot_override(void) +{ + int i; + char *page_0 = 0; + + GUEST_SYNC(STAGE_INITED); + set_storage_key_range(0, PAGE_SIZE, 0x18); + set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0); + asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0), [key] "r"(0x18) : "cc"); + GUEST_SYNC(STAGE_SKEYS_SET); + + for (;;) { + for (i = 0; i < PAGE_SIZE; i++) + page_0[i] = mem1[i]; + GUEST_SYNC(STAGE_COPIED); + } +} + +static void test_copy_key_fetch_prot_override(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); + vm_vaddr_t guest_0_page, guest_last_page; + + guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + if (guest_0_page != 0 || guest_last_page != last_page_addr) { + print_skip("did not allocate guest pages at required positions"); + goto out; + } + + HOST_SYNC(t.vcpu, STAGE_INITED); + t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE; + t.run->kvm_dirty_regs = KVM_SYNC_CRS; + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* vcpu, mismatching keys on fetch, fetch protection override applies */ + prepare_mem12(); + MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1)); + HOST_SYNC(t.vcpu, STAGE_COPIED); + CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2)); + ASSERT_MEM_EQ(mem1, mem2, 2048); + + /* + * vcpu, mismatching keys on fetch, fetch protection override applies, + * wraparound + */ + prepare_mem12(); + MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page)); + HOST_SYNC(t.vcpu, STAGE_COPIED); + CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048, + GADDR_V(guest_last_page), KEY(2)); + ASSERT_MEM_EQ(mem1, mem2, 2048); + +out: + kvm_vm_free(t.kvm_vm); +} + +static void test_errors_key_fetch_prot_override_not_enabled(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); + vm_vaddr_t guest_0_page, guest_last_page; + + guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + if (guest_0_page != 0 || guest_last_page != last_page_addr) { + print_skip("did not allocate guest pages at required positions"); + goto out; + } + HOST_SYNC(t.vcpu, STAGE_INITED); + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); - /* Get the second array */ - ksmo.gaddr = (uintptr_t)mem2; - ksmo.flags = 0; - ksmo.size = maxsize; - ksmo.op = KVM_S390_MEMOP_LOGICAL_READ; - ksmo.buf = (uintptr_t)mem2; - ksmo.ar = 0; - vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); - - TEST_ASSERT(!memcmp(mem1, mem2, maxsize), - "Memory contents do not match!"); - - /* Check error conditions - first bad size: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = -1; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + /* vcpu, mismatching keys on fetch, fetch protection override not enabled */ + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2)); + +out: + kvm_vm_free(t.kvm_vm); +} + +static void test_errors_key_fetch_prot_override_enabled(void) +{ + struct test_default t = test_default_init(guest_copy_key_fetch_prot_override); + vm_vaddr_t guest_0_page, guest_last_page; + + guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0); + guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr); + if (guest_0_page != 0 || guest_last_page != last_page_addr) { + print_skip("did not allocate guest pages at required positions"); + goto out; + } + HOST_SYNC(t.vcpu, STAGE_INITED); + t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE; + t.run->kvm_dirty_regs = KVM_SYNC_CRS; + HOST_SYNC(t.vcpu, STAGE_SKEYS_SET); + + /* + * vcpu, mismatching keys on fetch, + * fetch protection override does not apply because memory range acceeded + */ + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1, + GADDR_V(guest_last_page), KEY(2)); + /* vm, fetch protected override does not apply */ + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2)); + CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2)); + +out: + kvm_vm_free(t.kvm_vm); +} + +static void guest_idle(void) +{ + GUEST_SYNC(STAGE_INITED); /* for consistency's sake */ + for (;;) + GUEST_SYNC(STAGE_IDLED); +} + +static void _test_errors_common(struct test_vcpu vcpu, enum mop_target target, int size) +{ + int rv; + + /* Bad size: */ + rv = ERR_MOP(vcpu, target, WRITE, mem1, -1, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes"); /* Zero size: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = 0; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(vcpu, target, WRITE, mem1, 0, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM), "ioctl allows 0 as size"); /* Bad flags: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = -1; - ksmo.size = maxsize; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags"); - /* Bad operation: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = maxsize; - ksmo.op = -1; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); - TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations"); - /* Bad guest address: */ - ksmo.gaddr = ~0xfffUL; - ksmo.flags = KVM_S390_MEMOP_F_CHECK_ONLY; - ksmo.size = maxsize; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY); TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access"); /* Bad host address: */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = maxsize; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = 0; - ksmo.ar = 0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(vcpu, target, WRITE, 0, size, GADDR_V(mem1)); TEST_ASSERT(rv == -1 && errno == EFAULT, "ioctl does not report bad host memory address"); + /* Bad key: */ + rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17)); + TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key"); +} + +static void test_errors(void) +{ + struct test_default t = test_default_init(guest_idle); + int rv; + + HOST_SYNC(t.vcpu, STAGE_INITED); + + _test_errors_common(t.vcpu, LOGICAL, t.size); + _test_errors_common(t.vm, ABSOLUTE, t.size); + + /* Bad operation: */ + rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1)); + TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations"); + /* virtual addresses are not translated when passing INVALID */ + rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0)); + TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations"); + /* Bad access register: */ - run->psw_mask &= ~(3UL << (63 - 17)); - run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */ - vcpu_run(vm, VCPU_ID); /* To sync new state to SIE block */ - ksmo.gaddr = (uintptr_t)mem1; - ksmo.flags = 0; - ksmo.size = maxsize; - ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE; - ksmo.buf = (uintptr_t)mem1; - ksmo.ar = 17; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + t.run->psw_mask &= ~(3UL << (63 - 17)); + t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */ + HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */ + rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15"); - run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */ - vcpu_run(vm, VCPU_ID); /* Run to sync new state */ + t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */ + HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */ /* Check that the SIDA calls are rejected for non-protected guests */ - ksmo.gaddr = 0; - ksmo.flags = 0; - ksmo.size = 8; - ksmo.op = KVM_S390_MEMOP_SIDA_READ; - ksmo.buf = (uintptr_t)mem1; - ksmo.sida_offset = 0x1c0; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl does not reject SIDA_READ in non-protected mode"); - ksmo.op = KVM_S390_MEMOP_SIDA_WRITE; - rv = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_MEM_OP, &ksmo); + rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0)); TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl does not reject SIDA_WRITE in non-protected mode"); - kvm_vm_free(vm); + kvm_vm_free(t.kvm_vm); +} + +int main(int argc, char *argv[]) +{ + int memop_cap, extension_cap; + + setbuf(stdout, NULL); /* Tell stdout not to buffer its content */ + + memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP); + extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION); + if (!memop_cap) { + print_skip("CAP_S390_MEM_OP not supported"); + exit(KSFT_SKIP); + } + + test_copy(); + if (extension_cap > 0) { + test_copy_key(); + test_copy_key_storage_prot_override(); + test_copy_key_fetch_prot(); + test_copy_key_fetch_prot_override(); + test_errors_key(); + test_errors_key_storage_prot_override(); + test_errors_key_fetch_prot_override_not_enabled(); + test_errors_key_fetch_prot_override_enabled(); + } else { + print_skip("storage key memop extension not supported"); + } + test_errors(); return 0; } |