summaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/vgic
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/vgic')
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c199
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c17
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c44
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic.c62
-rw-r--r--virt/kvm/arm/vgic/vgic.h3
8 files changed, 206 insertions, 158 deletions
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 547f12dc4d54..d2a99ab0ade7 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -278,6 +278,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
u8 prop;
int ret;
+ unsigned long flags;
ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
&prop, 1);
@@ -285,15 +286,15 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
if (ret)
return ret;
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
irq->priority = LPI_PROP_PRIORITY(prop);
irq->enabled = LPI_PROP_ENABLE_BIT(prop);
- vgic_queue_irq_unlock(kvm, irq);
+ vgic_queue_irq_unlock(kvm, irq, flags);
} else {
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
}
return 0;
@@ -393,6 +394,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
int ret = 0;
u32 *intids;
int nr_irqs, i;
+ unsigned long flags;
nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
if (nr_irqs < 0)
@@ -420,9 +422,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
}
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = pendmask & (1U << bit_nr);
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -515,6 +517,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
{
struct kvm_vcpu *vcpu;
struct its_ite *ite;
+ unsigned long flags;
if (!its->enabled)
return -EBUSY;
@@ -530,9 +533,9 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
if (!vcpu->arch.vgic_cpu.lpis_enabled)
return -EBUSY;
- spin_lock(&ite->irq->irq_lock);
+ spin_lock_irqsave(&ite->irq->irq_lock, flags);
ite->irq->pending_latch = true;
- vgic_queue_irq_unlock(kvm, ite->irq);
+ vgic_queue_irq_unlock(kvm, ite->irq, flags);
return 0;
}
@@ -894,7 +897,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
}
/* Requires the its_lock to be held. */
-static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
+static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
{
struct its_ite *ite, *temp;
@@ -910,6 +913,24 @@ static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
kfree(device);
}
+/* its lock must be held */
+static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
+{
+ struct its_device *cur, *temp;
+
+ list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
+ vgic_its_free_device(kvm, cur);
+}
+
+/* its lock must be held */
+static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
+{
+ struct its_collection *cur, *temp;
+
+ list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
+ vgic_its_free_collection(its, cur->collection_id);
+}
+
/* Must be called with its_lock mutex held */
static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
u32 device_id, gpa_t itt_addr,
@@ -957,7 +978,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
* by removing the mapping and re-establishing it.
*/
if (device)
- vgic_its_unmap_device(kvm, device);
+ vgic_its_free_device(kvm, device);
/*
* The spec does not say whether unmapping a not-mapped device
@@ -1410,7 +1431,7 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
unsigned long val)
{
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
- u64 entry_size, device_type;
+ u64 entry_size, table_type;
u64 reg, *regptr, clearbits = 0;
/* When GITS_CTLR.Enable is 1, we ignore write accesses. */
@@ -1421,12 +1442,12 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
case 0:
regptr = &its->baser_device_table;
entry_size = abi->dte_esz;
- device_type = GITS_BASER_TYPE_DEVICE;
+ table_type = GITS_BASER_TYPE_DEVICE;
break;
case 1:
regptr = &its->baser_coll_table;
entry_size = abi->cte_esz;
- device_type = GITS_BASER_TYPE_COLLECTION;
+ table_type = GITS_BASER_TYPE_COLLECTION;
clearbits = GITS_BASER_INDIRECT;
break;
default:
@@ -1438,10 +1459,24 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
reg &= ~clearbits;
reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
- reg |= device_type << GITS_BASER_TYPE_SHIFT;
+ reg |= table_type << GITS_BASER_TYPE_SHIFT;
reg = vgic_sanitise_its_baser(reg);
*regptr = reg;
+
+ if (!(reg & GITS_BASER_VALID)) {
+ /* Take the its_lock to prevent a race with a save/restore */
+ mutex_lock(&its->its_lock);
+ switch (table_type) {
+ case GITS_BASER_TYPE_DEVICE:
+ vgic_its_free_device_list(kvm, its);
+ break;
+ case GITS_BASER_TYPE_COLLECTION:
+ vgic_its_free_collection_list(kvm, its);
+ break;
+ }
+ mutex_unlock(&its->its_lock);
+ }
}
static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
@@ -1623,46 +1658,17 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
}
-static void vgic_its_free_device(struct kvm *kvm, struct its_device *dev)
-{
- struct its_ite *ite, *tmp;
-
- list_for_each_entry_safe(ite, tmp, &dev->itt_head, ite_list)
- its_free_ite(kvm, ite);
- list_del(&dev->dev_list);
- kfree(dev);
-}
-
static void vgic_its_destroy(struct kvm_device *kvm_dev)
{
struct kvm *kvm = kvm_dev->kvm;
struct vgic_its *its = kvm_dev->private;
- struct list_head *cur, *temp;
-
- /*
- * We may end up here without the lists ever having been initialized.
- * Check this and bail out early to avoid dereferencing a NULL pointer.
- */
- if (!its->device_list.next)
- return;
mutex_lock(&its->its_lock);
- list_for_each_safe(cur, temp, &its->device_list) {
- struct its_device *dev;
- dev = list_entry(cur, struct its_device, dev_list);
- vgic_its_free_device(kvm, dev);
- }
+ vgic_its_free_device_list(kvm, its);
+ vgic_its_free_collection_list(kvm, its);
- list_for_each_safe(cur, temp, &its->collection_list) {
- struct its_collection *coll;
-
- coll = list_entry(cur, struct its_collection, coll_list);
- list_del(cur);
- kfree(coll);
- }
mutex_unlock(&its->its_lock);
-
kfree(its);
}
@@ -2290,29 +2296,13 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
*/
static int vgic_its_save_tables_v0(struct vgic_its *its)
{
- struct kvm *kvm = its->dev->kvm;
int ret;
- mutex_lock(&kvm->lock);
- mutex_lock(&its->its_lock);
-
- if (!lock_all_vcpus(kvm)) {
- mutex_unlock(&its->its_lock);
- mutex_unlock(&kvm->lock);
- return -EBUSY;
- }
-
ret = vgic_its_save_device_tables(its);
if (ret)
- goto out;
-
- ret = vgic_its_save_collection_table(its);
+ return ret;
-out:
- unlock_all_vcpus(kvm);
- mutex_unlock(&its->its_lock);
- mutex_unlock(&kvm->lock);
- return ret;
+ return vgic_its_save_collection_table(its);
}
/**
@@ -2322,29 +2312,13 @@ out:
*/
static int vgic_its_restore_tables_v0(struct vgic_its *its)
{
- struct kvm *kvm = its->dev->kvm;
int ret;
- mutex_lock(&kvm->lock);
- mutex_lock(&its->its_lock);
-
- if (!lock_all_vcpus(kvm)) {
- mutex_unlock(&its->its_lock);
- mutex_unlock(&kvm->lock);
- return -EBUSY;
- }
-
ret = vgic_its_restore_collection_table(its);
if (ret)
- goto out;
-
- ret = vgic_its_restore_device_tables(its);
-out:
- unlock_all_vcpus(kvm);
- mutex_unlock(&its->its_lock);
- mutex_unlock(&kvm->lock);
+ return ret;
- return ret;
+ return vgic_its_restore_device_tables(its);
}
static int vgic_its_commit_v0(struct vgic_its *its)
@@ -2363,6 +2337,19 @@ static int vgic_its_commit_v0(struct vgic_its *its)
return 0;
}
+static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
+{
+ /* We need to keep the ABI specific field values */
+ its->baser_coll_table &= ~GITS_BASER_VALID;
+ its->baser_device_table &= ~GITS_BASER_VALID;
+ its->cbaser = 0;
+ its->creadr = 0;
+ its->cwriter = 0;
+ its->enabled = 0;
+ vgic_its_free_device_list(kvm, its);
+ vgic_its_free_collection_list(kvm, its);
+}
+
static int vgic_its_has_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
@@ -2377,6 +2364,8 @@ static int vgic_its_has_attr(struct kvm_device *dev,
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
return 0;
+ case KVM_DEV_ARM_ITS_CTRL_RESET:
+ return 0;
case KVM_DEV_ARM_ITS_SAVE_TABLES:
return 0;
case KVM_DEV_ARM_ITS_RESTORE_TABLES:
@@ -2389,6 +2378,41 @@ static int vgic_its_has_attr(struct kvm_device *dev,
return -ENXIO;
}
+static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
+{
+ const struct vgic_its_abi *abi = vgic_its_get_abi(its);
+ int ret = 0;
+
+ if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
+ return 0;
+
+ mutex_lock(&kvm->lock);
+ mutex_lock(&its->its_lock);
+
+ if (!lock_all_vcpus(kvm)) {
+ mutex_unlock(&its->its_lock);
+ mutex_unlock(&kvm->lock);
+ return -EBUSY;
+ }
+
+ switch (attr) {
+ case KVM_DEV_ARM_ITS_CTRL_RESET:
+ vgic_its_reset(kvm, its);
+ break;
+ case KVM_DEV_ARM_ITS_SAVE_TABLES:
+ ret = abi->save_tables(its);
+ break;
+ case KVM_DEV_ARM_ITS_RESTORE_TABLES:
+ ret = abi->restore_tables(its);
+ break;
+ }
+
+ unlock_all_vcpus(kvm);
+ mutex_unlock(&its->its_lock);
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
static int vgic_its_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
@@ -2414,19 +2438,8 @@ static int vgic_its_set_attr(struct kvm_device *dev,
return vgic_register_its_iodev(dev->kvm, its, addr);
}
- case KVM_DEV_ARM_VGIC_GRP_CTRL: {
- const struct vgic_its_abi *abi = vgic_its_get_abi(its);
-
- switch (attr->attr) {
- case KVM_DEV_ARM_VGIC_CTRL_INIT:
- /* Nothing to do */
- return 0;
- case KVM_DEV_ARM_ITS_SAVE_TABLES:
- return abi->save_tables(its);
- case KVM_DEV_ARM_ITS_RESTORE_TABLES:
- return abi->restore_tables(its);
- }
- }
+ case KVM_DEV_ARM_VGIC_GRP_CTRL:
+ return vgic_its_ctrl(dev->kvm, its, attr->attr);
case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
u64 reg;
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index b3d4a10f09a1..e21e2f49b005 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -74,6 +74,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
int mode = (val >> 24) & 0x03;
int c;
struct kvm_vcpu *vcpu;
+ unsigned long flags;
switch (mode) {
case 0x0: /* as specified by targets */
@@ -97,11 +98,11 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
irq->source |= 1U << source_vcpu->vcpu_id;
- vgic_queue_irq_unlock(source_vcpu->kvm, irq);
+ vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
vgic_put_irq(source_vcpu->kvm, irq);
}
}
@@ -131,6 +132,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
int i;
+ unsigned long flags;
/* GICD_ITARGETSR[0-7] are read-only */
if (intid < VGIC_NR_PRIVATE_IRQS)
@@ -140,13 +142,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
int target;
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->targets = (val >> (i * 8)) & cpu_mask;
target = irq->targets ? __ffs(irq->targets) : 0;
irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -174,17 +176,18 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
{
u32 intid = addr & 0x0f;
int i;
+ unsigned long flags;
for (i = 0; i < len; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->source &= ~((val >> (i * 8)) & 0xff);
if (!irq->source)
irq->pending_latch = false;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -195,19 +198,20 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
{
u32 intid = addr & 0x0f;
int i;
+ unsigned long flags;
for (i = 0; i < len; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->source |= (val >> (i * 8)) & 0xff;
if (irq->source) {
irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
}
vgic_put_irq(vcpu->kvm, irq);
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 408ef06638fc..83786108829e 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -129,6 +129,7 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
{
int intid = VGIC_ADDR_TO_INTID(addr, 64);
struct vgic_irq *irq;
+ unsigned long flags;
/* The upper word is WI for us since we don't implement Aff3. */
if (addr & 4)
@@ -139,13 +140,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
if (!irq)
return;
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
/* We only care about and preserve Aff0, Aff1 and Aff2. */
irq->mpidr = val & GENMASK(23, 0);
irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -241,11 +242,12 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
+ unsigned long flags;
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i, &val)) {
/*
* pending_latch is set irrespective of irq type
@@ -253,10 +255,10 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
* restore irq config before pending info.
*/
irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
irq->pending_latch = false;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
}
vgic_put_irq(vcpu->kvm, irq);
@@ -799,6 +801,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
int sgi, c;
int vcpu_id = vcpu->vcpu_id;
bool broadcast;
+ unsigned long flags;
sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
@@ -837,10 +840,10 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index c1e4bdd66131..deb51ee16a3d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -69,13 +69,14 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
+ unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->enabled = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -87,15 +88,16 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
+ unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->enabled = false;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -126,14 +128,15 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
+ unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -144,15 +147,16 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
+ unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = false;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -181,7 +185,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
bool new_active_state)
{
struct kvm_vcpu *requester_vcpu;
- spin_lock(&irq->irq_lock);
+ unsigned long flags;
+ spin_lock_irqsave(&irq->irq_lock, flags);
/*
* The vcpu parameter here can mean multiple things depending on how
@@ -216,9 +221,9 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
irq->active = new_active_state;
if (new_active_state)
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
}
/*
@@ -352,14 +357,15 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
int i;
+ unsigned long flags;
for (i = 0; i < len; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
/* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -390,6 +396,7 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
int i;
+ unsigned long flags;
for (i = 0; i < len * 4; i++) {
struct vgic_irq *irq;
@@ -404,14 +411,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
continue;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i * 2 + 1, &val))
irq->config = VGIC_CONFIG_EDGE;
else
irq->config = VGIC_CONFIG_LEVEL;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
@@ -443,6 +450,7 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
{
int i;
int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+ unsigned long flags;
for (i = 0; i < 32; i++) {
struct vgic_irq *irq;
@@ -459,12 +467,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
* restore irq config before line level.
*/
new_level = !!(val & (1U << i));
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->line_level = new_level;
if (new_level)
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index e4187e52bb26..80897102da26 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -62,6 +62,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
int lr;
+ unsigned long flags;
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
@@ -77,7 +78,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
/* Always preserve the active bit */
irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -104,7 +105,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
irq->pending_latch = false;
}
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 96ea597db0e7..863351c090d8 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -44,6 +44,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
int lr;
+ unsigned long flags;
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
@@ -66,7 +67,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
if (!irq) /* An LPI could have been unmapped. */
continue;
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
/* Always preserve the active bit */
irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -94,7 +95,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
irq->pending_latch = false;
}
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -278,6 +279,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
bool status;
u8 val;
int ret;
+ unsigned long flags;
retry:
vcpu = irq->target_vcpu;
@@ -296,13 +298,13 @@ retry:
status = val & (1 << bit_nr);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->target_vcpu != vcpu) {
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
goto retry;
}
irq->pending_latch = status;
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
if (status) {
/* clear consumed data */
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index fed717e07938..e54ef2fdf73d 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -53,6 +53,10 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* vcpuX->vcpu_id < vcpuY->vcpu_id:
* spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
* spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
+ *
+ * Since the VGIC must support injecting virtual interrupts from ISRs, we have
+ * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
+ * spinlocks for any lock that may be taken while injecting an interrupt.
*/
/*
@@ -261,7 +265,8 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne
* Needs to be entered with the IRQ lock already held, but will return
* with all locks dropped.
*/
-bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
+bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
+ unsigned long flags)
{
struct kvm_vcpu *vcpu;
@@ -279,7 +284,7 @@ retry:
* not need to be inserted into an ap_list and there is also
* no more work for us to do.
*/
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
/*
* We have to kick the VCPU here, because we could be
@@ -301,11 +306,11 @@ retry:
* We must unlock the irq lock to take the ap_list_lock where
* we are going to insert this new pending interrupt.
*/
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
/* someone can do stuff here, which we re-check below */
- spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
spin_lock(&irq->irq_lock);
/*
@@ -322,9 +327,9 @@ retry:
if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
spin_unlock(&irq->irq_lock);
- spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
goto retry;
}
@@ -337,7 +342,7 @@ retry:
irq->vcpu = vcpu;
spin_unlock(&irq->irq_lock);
- spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
@@ -367,6 +372,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
{
struct kvm_vcpu *vcpu;
struct vgic_irq *irq;
+ unsigned long flags;
int ret;
trace_vgic_update_irq_pending(cpuid, intid, level);
@@ -383,11 +389,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
if (!irq)
return -EINVAL;
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
if (!vgic_validate_injection(irq, level, owner)) {
/* Nothing to see here, move along... */
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(kvm, irq);
return 0;
}
@@ -397,7 +403,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
else
irq->pending_latch = true;
- vgic_queue_irq_unlock(kvm, irq);
+ vgic_queue_irq_unlock(kvm, irq, flags);
vgic_put_irq(kvm, irq);
return 0;
@@ -406,15 +412,16 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
{
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ unsigned long flags;
BUG_ON(!irq);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->hw = true;
irq->hwintid = phys_irq;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return 0;
@@ -423,6 +430,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{
struct vgic_irq *irq;
+ unsigned long flags;
if (!vgic_initialized(vcpu->kvm))
return -EAGAIN;
@@ -430,12 +438,12 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
BUG_ON(!irq);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->hw = false;
irq->hwintid = 0;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return 0;
@@ -486,9 +494,10 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq, *tmp;
+ unsigned long flags;
retry:
- spin_lock(&vgic_cpu->ap_list_lock);
+ spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
@@ -528,7 +537,7 @@ retry:
/* This interrupt looks like it has to be migrated. */
spin_unlock(&irq->irq_lock);
- spin_unlock(&vgic_cpu->ap_list_lock);
+ spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
/*
* Ensure locking order by always locking the smallest
@@ -542,7 +551,7 @@ retry:
vcpuB = vcpu;
}
- spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+ spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
SINGLE_DEPTH_NESTING);
spin_lock(&irq->irq_lock);
@@ -566,11 +575,11 @@ retry:
spin_unlock(&irq->irq_lock);
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
- spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+ spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
goto retry;
}
- spin_unlock(&vgic_cpu->ap_list_lock);
+ spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
}
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -703,6 +712,8 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return;
+ DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
+
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
vgic_flush_lr_state(vcpu);
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
@@ -735,11 +746,12 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
bool pending = false;
+ unsigned long flags;
if (!vcpu->kvm->arch.vgic.enabled)
return false;
- spin_lock(&vgic_cpu->ap_list_lock);
+ spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock);
@@ -750,7 +762,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
break;
}
- spin_unlock(&vgic_cpu->ap_list_lock);
+ spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
return pending;
}
@@ -776,10 +788,14 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
bool map_is_active;
+ unsigned long flags;
- spin_lock(&irq->irq_lock);
+ if (!vgic_initialized(vcpu->kvm))
+ return false;
+
+ spin_lock_irqsave(&irq->irq_lock, flags);
map_is_active = irq->hw && irq->active;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
return map_is_active;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index bf9ceab67c77..4f8aecb07ae6 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -140,7 +140,8 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid);
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
-bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq);
+bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
+ unsigned long flags);
void vgic_kick_vcpus(struct kvm *kvm);
int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,