summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c32
1 files changed, 22 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 7cfc0639b1ef..c8a1149cb229 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -501,6 +501,12 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool flush_needed = false;
tdp_root_for_each_pte(iter, root, start, end) {
+ if (can_yield &&
+ tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
+ flush_needed = false;
+ continue;
+ }
+
if (!is_shadow_present_pte(iter.old_spte))
continue;
@@ -515,9 +521,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
continue;
tdp_mmu_set_spte(kvm, &iter, 0);
-
- flush_needed = !(can_yield &&
- tdp_mmu_iter_cond_resched(kvm, &iter, true));
+ flush_needed = true;
}
return flush_needed;
}
@@ -880,6 +884,9 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
min_level, start, end) {
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+ continue;
+
if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
continue;
@@ -888,8 +895,6 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true;
-
- tdp_mmu_iter_cond_resched(kvm, &iter, false);
}
return spte_set;
}
@@ -933,6 +938,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool spte_set = false;
tdp_root_for_each_leaf_pte(iter, root, start, end) {
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+ continue;
+
if (spte_ad_need_write_protect(iter.old_spte)) {
if (is_writable_pte(iter.old_spte))
new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
@@ -947,8 +955,6 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true;
-
- tdp_mmu_iter_cond_resched(kvm, &iter, false);
}
return spte_set;
}
@@ -1056,6 +1062,9 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool spte_set = false;
tdp_root_for_each_pte(iter, root, start, end) {
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+ continue;
+
if (!is_shadow_present_pte(iter.old_spte))
continue;
@@ -1063,8 +1072,6 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte(kvm, &iter, new_spte);
spte_set = true;
-
- tdp_mmu_iter_cond_resched(kvm, &iter, false);
}
return spte_set;
@@ -1105,6 +1112,11 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
bool spte_set = false;
tdp_root_for_each_pte(iter, root, start, end) {
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
+ spte_set = false;
+ continue;
+ }
+
if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
continue;
@@ -1116,7 +1128,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
tdp_mmu_set_spte(kvm, &iter, 0);
- spte_set = !tdp_mmu_iter_cond_resched(kvm, &iter, true);
+ spte_set = true;
}
if (spte_set)