summaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorAndrew Honig <ahonig@google.com>2013-03-29 09:35:21 -0700
committerGleb Natapov <gleb@redhat.com>2013-04-07 13:05:35 +0300
commit8f964525a121f2ff2df948dac908dcc65be21b5b (patch)
tree1986d7677a1cae8f639c91812da2d8c6ed5bba26 /virt/kvm/kvm_main.c
parent09a6e1f4ad32243989b30485f78985c0923284cd (diff)
downloadlinux-8f964525a121f2ff2df948dac908dcc65be21b5b.tar.bz2
KVM: Allow cross page reads and writes from cached translations.
This patch adds support for kvm_gfn_to_hva_cache_init functions for reads and writes that will cross a page. If the range falls within the same memslot, then this will be a fast operation. If the range is split between two memslots, then the slower kvm_read_guest and kvm_write_guest are used. Tested: Test against kvm_clock unit tests. Signed-off-by: Andrew Honig <ahonig@google.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c47
1 files changed, 37 insertions, 10 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index adc68feb5c5a..f18013f09e68 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
}
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- gpa_t gpa)
+ gpa_t gpa, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int offset = offset_in_page(gpa);
- gfn_t gfn = gpa >> PAGE_SHIFT;
+ gfn_t start_gfn = gpa >> PAGE_SHIFT;
+ gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
+ gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
+ gfn_t nr_pages_avail;
ghc->gpa = gpa;
ghc->generation = slots->generation;
- ghc->memslot = gfn_to_memslot(kvm, gfn);
- ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
- if (!kvm_is_error_hva(ghc->hva))
+ ghc->len = len;
+ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
+ if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
ghc->hva += offset;
- else
- return -EFAULT;
-
+ } else {
+ /*
+ * If the requested region crosses two memslots, we still
+ * verify that the entire region is valid here.
+ */
+ while (start_gfn <= end_gfn) {
+ ghc->memslot = gfn_to_memslot(kvm, start_gfn);
+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
+ &nr_pages_avail);
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
+ start_gfn += nr_pages_avail;
+ }
+ /* Use the slow path for cross page reads and writes. */
+ ghc->memslot = NULL;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
@@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
+ BUG_ON(len > ghc->len);
+
if (slots->generation != ghc->generation)
- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+
+ if (unlikely(!ghc->memslot))
+ return kvm_write_guest(kvm, ghc->gpa, data, len);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
@@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
+ BUG_ON(len > ghc->len);
+
if (slots->generation != ghc->generation)
- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+
+ if (unlikely(!ghc->memslot))
+ return kvm_read_guest(kvm, ghc->gpa, data, len);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;