summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2015-02-11 15:15:09 +1030
committerRusty Russell <rusty@rustcorp.com.au>2015-02-11 16:47:31 +1030
commitc9e433e4b852b70ea267388cf9b5d8096b04c44c (patch)
treee456fdc2a7b08ded02723f9235e6d6afd7991642
parent8ed313001a892f240269dea05d4b925cbd150492 (diff)
downloadlinux-c9e433e4b852b70ea267388cf9b5d8096b04c44c.tar.bz2
lguest: add infrastructure to check mappings.
We normally abort the guest unconditionally when it gives us a bad address, but in the next patch we want to copy some bytes which may not be mapped. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/page_tables.c42
2 files changed, 30 insertions, 13 deletions
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 020fec5bb072..9da4f351e077 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -202,6 +202,7 @@ void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode);
void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
+bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr);
unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
void page_table_guest_data_init(struct lg_cpu *cpu);
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index e8b55c3a6170..69c35caa955a 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -647,7 +647,7 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu)
/*:*/
/* We walk down the guest page tables to get a guest-physical address */
-unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
+bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr)
{
pgd_t gpgd;
pte_t gpte;
@@ -656,31 +656,47 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
#endif
/* Still not set up? Just map 1:1. */
- if (unlikely(cpu->linear_pages))
- return vaddr;
+ if (unlikely(cpu->linear_pages)) {
+ *paddr = vaddr;
+ return true;
+ }
/* First step: get the top-level Guest page table entry. */
gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
/* Toplevel not present? We can't map it in. */
- if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
- kill_guest(cpu, "Bad address %#lx", vaddr);
- return -1UL;
- }
+ if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+ goto fail;
#ifdef CONFIG_X86_PAE
gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
- if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) {
- kill_guest(cpu, "Bad address %#lx", vaddr);
- return -1UL;
- }
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ goto fail;
gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
#else
gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
#endif
if (!(pte_flags(gpte) & _PAGE_PRESENT))
- kill_guest(cpu, "Bad address %#lx", vaddr);
+ goto fail;
+
+ *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
+ return true;
+
+fail:
+ *paddr = -1UL;
+ return false;
+}
- return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
+/*
+ * This is the version we normally use: kills the Guest if it uses a
+ * bad address
+ */
+unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
+{
+ unsigned long paddr;
+
+ if (!__guest_pa(cpu, vaddr, &paddr))
+ kill_guest(cpu, "Bad address %#lx", vaddr);
+ return paddr;
}
/*