summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2022-05-16 17:48:35 +0100
committerMarc Zyngier <maz@kernel.org>2022-05-16 17:48:35 +0100
commitec2cff6cbdbecc4897f97b01992efc0ab86aa492 (patch)
tree7d2b70308922025170595d0f8ce6a25d4d00646f /arch/arm64/kvm
parent3b8e21e3c3b7d3fb5bdfe09ba3dfa9b828b1ca96 (diff)
parent49a1a2c70a7fd820fe60a65a3e38ab9095082dc9 (diff)
downloadlinux-ec2cff6cbdbecc4897f97b01992efc0ab86aa492.tar.bz2
Merge branch kvm-arm64/vgic-invlpir into kvmarm-master/next
* kvm-arm64/vgic-invlpir: : . : Implement MMIO-based LPI invalidation for vGICv3. : . KVM: arm64: vgic-v3: Advertise GICR_CTLR.{IR, CES} as a new GICD_IIDR revision KVM: arm64: vgic-v3: Implement MMIO-based LPI invalidation KVM: arm64: vgic-v3: Expose GICR_CTLR.RWP when disabling LPIs irqchip/gic-v3: Exposes bit values for GICR_CTLR.{IR, CES} Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c7
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c64
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v2.c18
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c125
-rw-r--r--arch/arm64/kvm/vgic/vgic.h10
5 files changed, 187 insertions, 37 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index fc00304fe7d8..f84e04f334c6 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -319,7 +319,12 @@ int vgic_init(struct kvm *kvm)
vgic_debug_init(kvm);
- dist->implementation_rev = 2;
+ /*
+ * If userspace didn't set the GIC implementation revision,
+ * default to the latest and greatest. You know want it.
+ */
+ if (!dist->implementation_rev)
+ dist->implementation_rev = KVM_VGIC_IMP_REV_LATEST;
dist->initialized = true;
out:
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 2e13402be3bd..292ca5bb8a50 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -683,7 +683,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
if (!vcpu)
return E_ITS_INT_UNMAPPED_INTERRUPT;
- if (!vcpu->arch.vgic_cpu.lpis_enabled)
+ if (!vgic_lpis_enabled(vcpu))
return -EBUSY;
vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
@@ -1272,6 +1272,11 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
return 0;
}
+int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq)
+{
+ return update_lpi_config(kvm, irq, NULL, true);
+}
+
/*
* The INV command syncs the configuration bits from the memory table.
* Must be called with the its_lock mutex held.
@@ -1288,7 +1293,41 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
if (!ite)
return E_ITS_INV_UNMAPPED_INTERRUPT;
- return update_lpi_config(kvm, ite->irq, NULL, true);
+ return vgic_its_inv_lpi(kvm, ite->irq);
+}
+
+/**
+ * vgic_its_invall - invalidate all LPIs targetting a given vcpu
+ * @vcpu: the vcpu for which the RD is targetted by an invalidation
+ *
+ * Contrary to the INVALL command, this targets a RD instead of a
+ * collection, and we don't need to hold the its_lock, since no ITS is
+ * involved here.
+ */
+int vgic_its_invall(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ int irq_count, i = 0;
+ u32 *intids;
+
+ irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
+ if (irq_count < 0)
+ return irq_count;
+
+ for (i = 0; i < irq_count; i++) {
+ struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intids[i]);
+ if (!irq)
+ continue;
+ update_lpi_config(kvm, irq, vcpu, false);
+ vgic_put_irq(kvm, irq);
+ }
+
+ kfree(intids);
+
+ if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
+ its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
+
+ return 0;
}
/*
@@ -1305,32 +1344,13 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
u32 coll_id = its_cmd_get_collection(its_cmd);
struct its_collection *collection;
struct kvm_vcpu *vcpu;
- struct vgic_irq *irq;
- u32 *intids;
- int irq_count, i;
collection = find_collection(its, coll_id);
if (!its_is_collection_mapped(collection))
return E_ITS_INVALL_UNMAPPED_COLLECTION;
vcpu = kvm_get_vcpu(kvm, collection->target_addr);
-
- irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
- if (irq_count < 0)
- return irq_count;
-
- for (i = 0; i < irq_count; i++) {
- irq = vgic_get_irq(kvm, NULL, intids[i]);
- if (!irq)
- continue;
- update_lpi_config(kvm, irq, vcpu, false);
- vgic_put_irq(kvm, irq);
- }
-
- kfree(intids);
-
- if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
- its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
+ vgic_its_invall(vcpu);
return 0;
}
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c
index 12e4c223e6b8..77a67e9d3d14 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c
@@ -73,9 +73,13 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ u32 reg;
+
switch (addr & 0x0c) {
case GIC_DIST_IIDR:
- if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
+ reg = vgic_mmio_read_v2_misc(vcpu, addr, len);
+ if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
return -EINVAL;
/*
@@ -87,8 +91,16 @@ static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
* migration from old kernels to new kernels with legacy
* userspace.
*/
- vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
- return 0;
+ reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
+ switch (reg) {
+ case KVM_VGIC_IMP_REV_2:
+ case KVM_VGIC_IMP_REV_3:
+ vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
+ dist->implementation_rev = reg;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
vgic_mmio_write_v2_misc(vcpu, addr, len, val);
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 58e40b4874f8..f7aa7bcd6fb8 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -155,13 +155,27 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
unsigned long val)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ u32 reg;
switch (addr & 0x0c) {
case GICD_TYPER2:
- case GICD_IIDR:
if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
return -EINVAL;
return 0;
+ case GICD_IIDR:
+ reg = vgic_mmio_read_v3_misc(vcpu, addr, len);
+ if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
+ return -EINVAL;
+
+ reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
+ switch (reg) {
+ case KVM_VGIC_IMP_REV_2:
+ case KVM_VGIC_IMP_REV_3:
+ dist->implementation_rev = reg;
+ return 0;
+ default:
+ return -EINVAL;
+ }
case GICD_CTLR:
/* Not a GICv4.1? No HW SGIs */
if (!kvm_vgic_global_state.has_gicv4_1)
@@ -221,34 +235,58 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
vgic_put_irq(vcpu->kvm, irq);
}
+bool vgic_lpis_enabled(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+ return atomic_read(&vgic_cpu->ctlr) == GICR_CTLR_ENABLE_LPIS;
+}
+
static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ unsigned long val;
- return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0;
-}
+ val = atomic_read(&vgic_cpu->ctlr);
+ if (vgic_get_implementation_rev(vcpu) >= KVM_VGIC_IMP_REV_3)
+ val |= GICR_CTLR_IR | GICR_CTLR_CES;
+ return val;
+}
static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- bool was_enabled = vgic_cpu->lpis_enabled;
+ u32 ctlr;
if (!vgic_has_its(vcpu->kvm))
return;
- vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
+ if (!(val & GICR_CTLR_ENABLE_LPIS)) {
+ /*
+ * Don't disable if RWP is set, as there already an
+ * ongoing disable. Funky guest...
+ */
+ ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr,
+ GICR_CTLR_ENABLE_LPIS,
+ GICR_CTLR_RWP);
+ if (ctlr != GICR_CTLR_ENABLE_LPIS)
+ return;
- if (was_enabled && !vgic_cpu->lpis_enabled) {
vgic_flush_pending_lpis(vcpu);
vgic_its_invalidate_cache(vcpu->kvm);
- }
+ atomic_set_release(&vgic_cpu->ctlr, 0);
+ } else {
+ ctlr = atomic_cmpxchg_acquire(&vgic_cpu->ctlr, 0,
+ GICR_CTLR_ENABLE_LPIS);
+ if (ctlr != 0)
+ return;
- if (!was_enabled && vgic_cpu->lpis_enabled)
vgic_enable_lpis(vcpu);
+ }
}
static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu)
@@ -478,11 +516,10 @@ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
unsigned long val)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
u64 old_propbaser, propbaser;
/* Storing a value with LPIs already enabled is undefined */
- if (vgic_cpu->lpis_enabled)
+ if (vgic_lpis_enabled(vcpu))
return;
do {
@@ -513,7 +550,7 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
u64 old_pendbaser, pendbaser;
/* Storing a value with LPIs already enabled is undefined */
- if (vgic_cpu->lpis_enabled)
+ if (vgic_lpis_enabled(vcpu))
return;
do {
@@ -525,6 +562,63 @@ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
pendbaser) != old_pendbaser);
}
+static unsigned long vgic_mmio_read_sync(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ return !!atomic_read(&vcpu->arch.vgic_cpu.syncr_busy);
+}
+
+static void vgic_set_rdist_busy(struct kvm_vcpu *vcpu, bool busy)
+{
+ if (busy) {
+ atomic_inc(&vcpu->arch.vgic_cpu.syncr_busy);
+ smp_mb__after_atomic();
+ } else {
+ smp_mb__before_atomic();
+ atomic_dec(&vcpu->arch.vgic_cpu.syncr_busy);
+ }
+}
+
+static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ struct vgic_irq *irq;
+
+ /*
+ * If the guest wrote only to the upper 32bit part of the
+ * register, drop the write on the floor, as it is only for
+ * vPEs (which we don't support for obvious reasons).
+ *
+ * Also discard the access if LPIs are not enabled.
+ */
+ if ((addr & 4) || !vgic_lpis_enabled(vcpu))
+ return;
+
+ vgic_set_rdist_busy(vcpu, true);
+
+ irq = vgic_get_irq(vcpu->kvm, NULL, lower_32_bits(val));
+ if (irq) {
+ vgic_its_inv_lpi(vcpu->kvm, irq);
+ vgic_put_irq(vcpu->kvm, irq);
+ }
+
+ vgic_set_rdist_busy(vcpu, false);
+}
+
+static void vgic_mmio_write_invall(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ /* See vgic_mmio_write_invlpi() for the early return rationale */
+ if ((addr & 4) || !vgic_lpis_enabled(vcpu))
+ return;
+
+ vgic_set_rdist_busy(vcpu, true);
+ vgic_its_invall(vcpu);
+ vgic_set_rdist_busy(vcpu, false);
+}
+
/*
* The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
* redistributors, while SPIs are covered by registers in the distributor
@@ -630,6 +724,15 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_INVLPIR,
+ vgic_mmio_read_raz, vgic_mmio_write_invlpi, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_INVALLR,
+ vgic_mmio_read_raz, vgic_mmio_write_invall, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_SYNCR,
+ vgic_mmio_read_sync, vgic_mmio_write_wi, 4,
+ VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
VGIC_ACCESS_32bit),
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 3fd6c86a7ef3..4c6bdd321faa 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -98,6 +98,11 @@
#define DEBUG_SPINLOCK_BUG_ON(p)
#endif
+static inline u32 vgic_get_implementation_rev(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.vgic.implementation_rev;
+}
+
/* Requires the irq_lock to be held by the caller. */
static inline bool irq_is_pending(struct vgic_irq *irq)
{
@@ -308,6 +313,7 @@ static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
(base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
}
+bool vgic_lpis_enabled(struct kvm_vcpu *vcpu);
int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
u32 devid, u32 eventid, struct vgic_irq **irq);
@@ -317,6 +323,10 @@ void vgic_lpi_translation_cache_init(struct kvm *kvm);
void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
void vgic_its_invalidate_cache(struct kvm *kvm);
+/* GICv4.1 MMIO interface */
+int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq);
+int vgic_its_invall(struct kvm_vcpu *vcpu);
+
bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);