summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_xive_template.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-17 10:33:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-17 10:33:30 -0700
commit0ef0fd351550130129bbdb77362488befd7b69d2 (patch)
tree23186172f5f85c06e18e3ee1a9619879df03c5df /arch/powerpc/kvm/book3s_xive_template.c
parent4489da7183099f569a7d3dd819c975073c04bc72 (diff)
parentc011d23ba046826ccf8c4a4a6c1d01c9ccaa1403 (diff)
downloadlinux-0ef0fd351550130129bbdb77362488befd7b69d2.tar.bz2
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "ARM: - support for SVE and Pointer Authentication in guests - PMU improvements POWER: - support for direct access to the POWER9 XIVE interrupt controller - memory and performance optimizations x86: - support for accessing memory not backed by struct page - fixes and refactoring Generic: - dirty page tracking improvements" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (155 commits) kvm: fix compilation on aarch64 Revert "KVM: nVMX: Expose RDPMC-exiting only when guest supports PMU" kvm: x86: Fix L1TF mitigation for shadow MMU KVM: nVMX: Disable intercept for FS/GS base MSRs in vmcs02 when possible KVM: PPC: Book3S: Remove useless checks in 'release' method of KVM device KVM: PPC: Book3S HV: XIVE: Fix spelling mistake "acessing" -> "accessing" KVM: PPC: Book3S HV: Make sure to load LPID for radix VCPUs kvm: nVMX: Set nested_run_pending in vmx_set_nested_state after checks complete tests: kvm: Add tests for KVM_SET_NESTED_STATE KVM: nVMX: KVM_SET_NESTED_STATE - Tear down old EVMCS state before setting new state tests: kvm: Add tests for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_CPU_ID tests: kvm: Add tests to .gitignore KVM: Introduce KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 KVM: Fix kvm_clear_dirty_log_protect off-by-(minus-)one KVM: Fix the bitmap range to copy during clear dirty KVM: arm64: Fix ptrauth ID register masking logic KVM: x86: use direct accessors for RIP and RSP KVM: VMX: Use accessors for GPRs outside of dedicated caching logic KVM: x86: Omit caching logic for always-available GPRs kvm, x86: Properly check whether a pfn is an MMIO or not ...
Diffstat (limited to 'arch/powerpc/kvm/book3s_xive_template.c')
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c78
1 files changed, 40 insertions, 38 deletions
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 033363d6e764..0737acfd17f1 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -130,24 +130,14 @@ static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
*/
prio = ffs(pending) - 1;
- /*
- * If the most favoured prio we found pending is less
- * favored (or equal) than a pending IPI, we return
- * the IPI instead.
- *
- * Note: If pending was 0 and mfrr is 0xff, we will
- * not spurriously take an IPI because mfrr cannot
- * then be smaller than cppr.
- */
- if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
- prio = xc->mfrr;
- hirq = XICS_IPI;
- break;
- }
-
/* Don't scan past the guest cppr */
- if (prio >= xc->cppr || prio > 7)
+ if (prio >= xc->cppr || prio > 7) {
+ if (xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ }
break;
+ }
/* Grab queue and pointers */
q = &xc->queues[prio];
@@ -184,9 +174,12 @@ skip_ipi:
* been set and another occurrence of the IPI will trigger.
*/
if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
- if (scan_type == scan_fetch)
+ if (scan_type == scan_fetch) {
GLUE(X_PFX,source_eoi)(xc->vp_ipi,
&xc->vp_ipi_data);
+ q->idx = idx;
+ q->toggle = toggle;
+ }
/* Loop back on same queue with updated idx/toggle */
#ifdef XIVE_RUNTIME_CHECKS
WARN_ON(hirq && hirq != XICS_IPI);
@@ -199,32 +192,41 @@ skip_ipi:
if (hirq == XICS_DUMMY)
goto skip_ipi;
- /* If fetching, update queue pointers */
- if (scan_type == scan_fetch) {
- q->idx = idx;
- q->toggle = toggle;
- }
-
- /* Something found, stop searching */
- if (hirq)
- break;
-
- /* Clear the pending bit on the now empty queue */
- pending &= ~(1 << prio);
+ /* Clear the pending bit if the queue is now empty */
+ if (!hirq) {
+ pending &= ~(1 << prio);
- /*
- * Check if the queue count needs adjusting due to
- * interrupts being moved away.
- */
- if (atomic_read(&q->pending_count)) {
- int p = atomic_xchg(&q->pending_count, 0);
- if (p) {
+ /*
+ * Check if the queue count needs adjusting due to
+ * interrupts being moved away.
+ */
+ if (atomic_read(&q->pending_count)) {
+ int p = atomic_xchg(&q->pending_count, 0);
+ if (p) {
#ifdef XIVE_RUNTIME_CHECKS
- WARN_ON(p > atomic_read(&q->count));
+ WARN_ON(p > atomic_read(&q->count));
#endif
- atomic_sub(p, &q->count);
+ atomic_sub(p, &q->count);
+ }
}
}
+
+ /*
+ * If the most favoured prio we found pending is less
+ * favored (or equal) than a pending IPI, we return
+ * the IPI instead.
+ */
+ if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ break;
+ }
+
+ /* If fetching, update queue pointers */
+ if (scan_type == scan_fetch) {
+ q->idx = idx;
+ q->toggle = toggle;
+ }
}
/* If we are just taking a "peek", do nothing else */