diff options
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/arm.c | 5 | ||||
-rw-r--r-- | virt/kvm/arm/mmu.c | 40 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-debug.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-its.c | 10 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio-v2.c | 47 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.h | 16 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 7 |
7 files changed, 89 insertions, 40 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index a39a1e161e63..b9f68e4add71 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -416,6 +416,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) && !v->arch.power_off && !v->arch.pause); } +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return vcpu_mode_priv(vcpu); +} + /* Just ensure a guest exit from a particular CPU */ static void exit_vm_noop(void *info) { diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 2ea21dac0b44..b36945d49986 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1454,25 +1454,6 @@ out: kvm_set_pfn_accessed(pfn); } -static bool is_abort_sea(unsigned long fault_status) -{ - switch (fault_status) { - case FSC_SEA: - case FSC_SEA_TTW0: - case FSC_SEA_TTW1: - case FSC_SEA_TTW2: - case FSC_SEA_TTW3: - case FSC_SECC: - case FSC_SECC_TTW0: - case FSC_SECC_TTW1: - case FSC_SECC_TTW2: - case FSC_SECC_TTW3: - return true; - default: - return false; - } -} - /** * kvm_handle_guest_abort - handles all 2nd stage aborts * @vcpu: the VCPU pointer @@ -1498,20 +1479,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) fault_status = kvm_vcpu_trap_get_fault_type(vcpu); fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); + is_iabt = kvm_vcpu_trap_is_iabt(vcpu); - /* - * The host kernel will handle the synchronous external abort. There - * is no need to pass the error into the guest. - */ - if (is_abort_sea(fault_status)) { + /* Synchronous External Abort? */ + if (kvm_vcpu_dabt_isextabt(vcpu)) { + /* + * For RAS the host kernel may handle this abort. + * There is no need to pass the error into the guest. + */ if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu))) return 1; - } - is_iabt = kvm_vcpu_trap_is_iabt(vcpu); - if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) { - kvm_inject_vabt(vcpu); - return 1; + if (unlikely(!is_iabt)) { + kvm_inject_vabt(vcpu); + return 1; + } } trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c index 7072ab743332..10b38178cff2 100644 --- a/virt/kvm/arm/vgic/vgic-debug.c +++ b/virt/kvm/arm/vgic/vgic-debug.c @@ -234,7 +234,7 @@ static int vgic_debug_show(struct seq_file *s, void *v) return 0; } -static struct seq_operations vgic_debug_seq_ops = { +static const struct seq_operations vgic_debug_seq_ops = { .start = vgic_debug_start, .next = vgic_debug_next, .stop = vgic_debug_stop, @@ -255,7 +255,7 @@ static int debug_open(struct inode *inode, struct file *file) return ret; }; -static struct file_operations vgic_debug_fops = { +static const struct file_operations vgic_debug_fops = { .owner = THIS_MODULE, .open = debug_open, .read = seq_read, diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index aa6b68db80b4..f51c1e1b3f70 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -144,7 +144,6 @@ struct its_ite { struct vgic_irq *irq; struct its_collection *collection; - u32 lpi; u32 event_id; }; @@ -813,7 +812,7 @@ static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id) /* Must be called with its_lock mutex held */ static struct its_ite *vgic_its_alloc_ite(struct its_device *device, struct its_collection *collection, - u32 lpi_id, u32 event_id) + u32 event_id) { struct its_ite *ite; @@ -823,7 +822,6 @@ static struct its_ite *vgic_its_alloc_ite(struct its_device *device, ite->event_id = event_id; ite->collection = collection; - ite->lpi = lpi_id; list_add_tail(&ite->ite_list, &device->itt_head); return ite; @@ -873,7 +871,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, new_coll = collection; } - ite = vgic_its_alloc_ite(device, collection, lpi_nr, event_id); + ite = vgic_its_alloc_ite(device, collection, event_id); if (IS_ERR(ite)) { if (new_coll) vgic_its_free_collection(its, coll_id); @@ -1848,7 +1846,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, next_offset = compute_next_eventid_offset(&dev->itt_head, ite); val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) | - ((u64)ite->lpi << KVM_ITS_ITE_PINTID_SHIFT) | + ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | ite->collection->collection_id; val = cpu_to_le64(val); return kvm_write_guest(kvm, gpa, &val, ite_esz); @@ -1895,7 +1893,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id, if (!collection) return -EINVAL; - ite = vgic_its_alloc_ite(dev, collection, lpi_id, event_id); + ite = vgic_its_alloc_ite(dev, collection, event_id); if (IS_ERR(ite)) return PTR_ERR(ite); diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 37522e65eb53..b3d4a10f09a1 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -303,6 +303,51 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, vgic_set_vmcr(vcpu, &vmcr); } +static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + int n; /* which APRn is this */ + + n = (addr >> 2) & 0x3; + + if (kvm_vgic_global_state.type == VGIC_V2) { + /* GICv2 hardware systems support max. 32 groups */ + if (n != 0) + return 0; + return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr; + } else { + struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; + + if (n > vgic_v3_max_apr_idx(vcpu)) + return 0; + /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ + return vgicv3->vgic_ap1r[n]; + } +} + +static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + unsigned long val) +{ + int n; /* which APRn is this */ + + n = (addr >> 2) & 0x3; + + if (kvm_vgic_global_state.type == VGIC_V2) { + /* GICv2 hardware systems support max. 32 groups */ + if (n != 0) + return; + vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val; + } else { + struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; + + if (n > vgic_v3_max_apr_idx(vcpu)) + return; + /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ + vgicv3->vgic_ap1r[n] = val; + } +} + static const struct vgic_register_region vgic_v2_dist_registers[] = { REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL, vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12, @@ -364,7 +409,7 @@ static const struct vgic_register_region vgic_v2_cpu_registers[] = { vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO, - vgic_mmio_read_raz, vgic_mmio_write_wi, 16, + vgic_mmio_read_apr, vgic_mmio_write_apr, 16, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT, vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4, diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index bba7fa22a7f7..bf9ceab67c77 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -220,4 +220,20 @@ int vgic_debug_destroy(struct kvm *kvm); bool lock_all_vcpus(struct kvm *kvm); void unlock_all_vcpus(struct kvm *kvm); +static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu; + + /* + * num_pri_bits are initialized with HW supported values. + * We can rely safely on num_pri_bits even if VM has not + * restored ICC_CTLR_EL1 before restoring APnR registers. + */ + switch (cpu_if->num_pri_bits) { + case 7: return 3; + case 6: return 1; + default: return 0; + } +} + #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4d81f6ded88e..6ed1c2021198 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1609,7 +1609,7 @@ int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages) { unsigned long addr; - gfn_t entry; + gfn_t entry = 0; addr = gfn_to_hva_many(slot, gfn, &entry); if (kvm_is_error_hva(addr)) @@ -1928,6 +1928,7 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, * verify that the entire region is valid here. */ while (start_gfn <= end_gfn) { + nr_pages_avail = 0; ghc->memslot = __gfn_to_memslot(slots, start_gfn); ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); @@ -2275,7 +2276,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) #endif } -void kvm_vcpu_on_spin(struct kvm_vcpu *me) +void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode) { struct kvm *kvm = me->kvm; struct kvm_vcpu *vcpu; @@ -2306,6 +2307,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) continue; if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) continue; + if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu)) + continue; if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) continue; |