summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-10-21 15:52:04 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-10-21 15:52:04 +1100
commita02efb906d12c9d4eb2ab7c59049ba9545e5412d (patch)
treebf1f6467978ec63a22f42299ecac2ee7f7e73336 /arch/powerpc
parent84dfcb4b318463cd4883b6a19937824f49aee564 (diff)
parent2515ddc6db8eb49a79f0fe5e67ff09ac7c81eab4 (diff)
downloadlinux-a02efb906d12c9d4eb2ab7c59049ba9545e5412d.tar.bz2
Merge commit 'origin' into master
Manual merge of: arch/powerpc/Kconfig arch/powerpc/include/asm/page.h
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/include/asm/page.h6
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h7
-rw-r--r--arch/powerpc/include/asm/pci.h11
-rw-r--r--arch/powerpc/include/asm/ps3av.h3
-rw-r--r--arch/powerpc/include/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/asm/types.h7
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/crash_dump.c10
-rw-r--r--arch/powerpc/kernel/iommu.c23
-rw-r--r--arch/powerpc/kernel/pci-common.c136
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c99
-rw-r--r--arch/powerpc/kvm/44x_tlb.c53
-rw-r--r--arch/powerpc/kvm/Kconfig11
-rw-r--r--arch/powerpc/kvm/Makefile6
-rw-r--r--arch/powerpc/kvm/booke_guest.c17
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S79
-rw-r--r--arch/powerpc/kvm/emulate.c8
-rw-r--r--arch/powerpc/kvm/powerpc.c99
-rw-r--r--arch/powerpc/mm/mem.c17
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c16
26 files changed, 436 insertions, 215 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bbe149c26518..369d93e7377c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -19,6 +19,9 @@ config WORD_SIZE
default 64 if PPC64
default 32 if !PPC64
+config ARCH_PHYS_ADDR_T_64BIT
+ def_bool PPC64 || PHYS_64BIT
+
config MMU
bool
default y
@@ -224,6 +227,8 @@ config PPC_OF_PLATFORM_PCI
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
source "arch/powerpc/sysdev/Kconfig"
source "arch/powerpc/platforms/Kconfig"
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 64c6ee22eefd..d812929390e4 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -232,7 +232,7 @@ typedef elf_vrregset_t elf_fpxregset_t;
#endif /* __powerpc64__ */
#ifdef __powerpc64__
-# define SET_PERSONALITY(ex, ibcs2) \
+# define SET_PERSONALITY(ex) \
do { \
unsigned long new_flags = 0; \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
@@ -256,7 +256,7 @@ do { \
# define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
(exec_stk != EXSTACK_DISABLE_X) : 0)
#else
-# define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
+# define SET_PERSONALITY(ex) set_personality(PER_LINUX)
#endif /* __powerpc64__ */
extern int dcache_bsize;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 2655e2a4831e..34b52b7180cd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -81,11 +81,17 @@ struct kvm_vcpu_arch {
struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
/* Pages which are referenced in the shadow TLB. */
struct page *shadow_pages[PPC44x_TLB_SIZE];
- /* Copy of the host's TLB. */
- struct tlbe host_tlb[PPC44x_TLB_SIZE];
+
+ /* Track which TLB entries we've modified in the current exit. */
+ u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
u32 host_stack;
u32 host_pid;
+ u32 host_dbcr0;
+ u32 host_dbcr1;
+ u32 host_dbcr2;
+ u32 host_iac[4];
+ u32 host_msr;
u64 fpr[32];
u32 gpr[32];
@@ -123,7 +129,11 @@ struct kvm_vcpu_arch {
u32 ivor[16];
u32 ivpr;
u32 pir;
+
+ u32 shadow_pid;
u32 pid;
+ u32 swap_pid;
+
u32 pvr;
u32 ccr0;
u32 ccr1;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a8b068792260..8931ba729d2b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -64,6 +64,10 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eend, u32 asid);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
+extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
+
+/* XXX Book E specific */
+extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
@@ -92,4 +96,12 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
kvm_vcpu_block(vcpu);
}
+static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
+{
+ if (vcpu->arch.pid != new_pid) {
+ vcpu->arch.pid = new_pid;
+ vcpu->arch.swap_pid = 1;
+ }
+}
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 120f4d494257..c0b8d4a29a91 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -10,11 +10,13 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <asm/asm-compat.h>
-#include <asm/kdump.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#else
+#include <asm/types.h>
#endif
+#include <asm/asm-compat.h>
+#include <asm/kdump.h>
/*
* On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index ae2ea803a0f2..9047af7baa69 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -74,6 +74,13 @@ struct pci_controller {
unsigned long pci_io_size;
#endif
+ /* Some machines have a special region to forward the ISA
+ * "memory" cycles such as VGA memory regions. Left to 0
+ * if unsupported
+ */
+ resource_size_t isa_mem_phys;
+ resource_size_t isa_mem_size;
+
struct pci_ops *ops;
unsigned int __iomem *cfg_addr;
void __iomem *cfg_data;
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 0e52c7828ea4..39d547fde956 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -123,6 +123,16 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
+extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
+ size_t count);
+extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
+ size_t count);
+extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state);
+
+#define HAVE_PCI_LEGACY 1
+
#if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE)
/*
* For 64-bit kernels, pci_unmap_{single,page} is not a nop.
@@ -226,5 +236,6 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
extern void pcibios_do_bus_setup(struct pci_bus *bus);
extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus);
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index fda98715cd35..5aa22cffdbd6 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -678,6 +678,8 @@ struct ps3av_pkt_avb_param {
u8 buf[PS3AV_PKT_AVB_PARAM_MAX_BUF_SIZE];
};
+/* channel status */
+extern u8 ps3av_mode_cs_info[];
/** command status **/
#define PS3AV_STATUS_SUCCESS 0x0000 /* success */
@@ -735,6 +737,7 @@ extern int ps3av_get_mode(void);
extern int ps3av_video_mode2res(u32, u32 *, u32 *);
extern int ps3av_video_mute(int);
extern int ps3av_audio_mute(int);
+extern int ps3av_audio_mute_analog(int);
extern int ps3av_dev_open(void);
extern int ps3av_dev_close(void);
extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 734e0754fb9b..280a90cc9894 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -129,7 +129,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno,
#define CHECK_FULL_REGS(regs) \
do { \
if ((regs)->trap & 1) \
- printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
+ printk(KERN_CRIT "%s: partial register set\n", __func__); \
} while (0)
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index a9a9262e84a3..c004c13f291e 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -48,13 +48,6 @@ typedef struct {
typedef __vector128 vector128;
-/* Physical address used by some IO functions */
-#if defined(CONFIG_PPC64) || defined(CONFIG_PHYS_64BIT)
-typedef u64 phys_addr_t;
-#else
-typedef u32 phys_addr_t;
-#endif
-
#if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT)
typedef u64 dma_addr_t;
#else
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 09febc582584..75c5dd0138fd 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -359,8 +359,8 @@ int main(void)
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
- DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
+ DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
@@ -372,7 +372,7 @@ int main(void)
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
- DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid));
+ DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index a323c9b32ee1..97e056379728 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -27,6 +27,9 @@
#define DBG(fmt...)
#endif
+/* Stores the physical address of elf header of crash image. */
+unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+
void __init reserve_kdump_trampoline(void)
{
lmb_reserve(0, KDUMP_RESERVE_LIMIT);
@@ -66,7 +69,11 @@ void __init setup_kdump_trampoline(void)
DBG(" <- setup_kdump_trampoline()\n");
}
-#ifdef CONFIG_PROC_VMCORE
+/*
+ * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
+ * is_kdump_kernel() to determine if we are booting after a panic. Hence
+ * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
+ */
static int __init parse_elfcorehdr(char *p)
{
if (p)
@@ -75,7 +82,6 @@ static int __init parse_elfcorehdr(char *p)
return 1;
}
__setup("elfcorehdr=", parse_elfcorehdr);
-#endif
static int __init parse_savemaxmem(char *p)
{
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 550a19399bfa..ea1ba89f9c90 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -51,17 +51,6 @@ static int protect4gb = 1;
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
-static inline unsigned long iommu_num_pages(unsigned long vaddr,
- unsigned long slen)
-{
- unsigned long npages;
-
- npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
- npages >>= IOMMU_PAGE_SHIFT;
-
- return npages;
-}
-
static int __init setup_protect4gb(char *str)
{
if (strcmp(str, "on") == 0)
@@ -325,7 +314,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long) sg_virt(s);
- npages = iommu_num_pages(vaddr, slen);
+ npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
align = 0;
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
@@ -418,7 +407,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
unsigned long vaddr, npages;
vaddr = s->dma_address & IOMMU_PAGE_MASK;
- npages = iommu_num_pages(s->dma_address, s->dma_length);
+ npages = iommu_num_pages(s->dma_address, s->dma_length,
+ IOMMU_PAGE_SIZE);
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -452,7 +442,8 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (sg->dma_length == 0)
break;
- npages = iommu_num_pages(dma_handle, sg->dma_length);
+ npages = iommu_num_pages(dma_handle, sg->dma_length,
+ IOMMU_PAGE_SIZE);
__iommu_free(tbl, dma_handle, npages);
sg = sg_next(sg);
}
@@ -584,7 +575,7 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
- npages = iommu_num_pages(uaddr, size);
+ npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
if (tbl) {
align = 0;
@@ -617,7 +608,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
BUG_ON(direction == DMA_NONE);
if (tbl) {
- npages = iommu_num_pages(dma_handle, size);
+ npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
iommu_free(tbl, dma_handle, npages);
}
}
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 01ce8c38bae6..3815d84a1ef4 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -451,7 +451,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
pci_dev_put(pdev);
}
- DBG("non-PCI map for %lx, prot: %lx\n", offset, prot);
+ DBG("non-PCI map for %llx, prot: %lx\n",
+ (unsigned long long)offset, prot);
return __pgprot(prot);
}
@@ -490,6 +491,131 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return ret;
}
+/* This provides legacy IO read access on a bus */
+int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
+{
+ unsigned long offset;
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct resource *rp = &hose->io_resource;
+ void __iomem *addr;
+
+ /* Check if port can be supported by that bus. We only check
+ * the ranges of the PHB though, not the bus itself as the rules
+ * for forwarding legacy cycles down bridges are not our problem
+ * here. So if the host bridge supports it, we do it.
+ */
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ offset += port;
+
+ if (!(rp->flags & IORESOURCE_IO))
+ return -ENXIO;
+ if (offset < rp->start || (offset + size) > rp->end)
+ return -ENXIO;
+ addr = hose->io_base_virt + port;
+
+ switch(size) {
+ case 1:
+ *((u8 *)val) = in_8(addr);
+ return 1;
+ case 2:
+ if (port & 1)
+ return -EINVAL;
+ *((u16 *)val) = in_le16(addr);
+ return 2;
+ case 4:
+ if (port & 3)
+ return -EINVAL;
+ *((u32 *)val) = in_le32(addr);
+ return 4;
+ }
+ return -EINVAL;
+}
+
+/* This provides legacy IO write access on a bus */
+int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
+{
+ unsigned long offset;
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct resource *rp = &hose->io_resource;
+ void __iomem *addr;
+
+ /* Check if port can be supported by that bus. We only check
+ * the ranges of the PHB though, not the bus itself as the rules
+ * for forwarding legacy cycles down bridges are not our problem
+ * here. So if the host bridge supports it, we do it.
+ */
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ offset += port;
+
+ if (!(rp->flags & IORESOURCE_IO))
+ return -ENXIO;
+ if (offset < rp->start || (offset + size) > rp->end)
+ return -ENXIO;
+ addr = hose->io_base_virt + port;
+
+ /* WARNING: The generic code is idiotic. It gets passed a pointer
+ * to what can be a 1, 2 or 4 byte quantity and always reads that
+ * as a u32, which means that we have to correct the location of
+ * the data read within those 32 bits for size 1 and 2
+ */
+ switch(size) {
+ case 1:
+ out_8(addr, val >> 24);
+ return 1;
+ case 2:
+ if (port & 1)
+ return -EINVAL;
+ out_le16(addr, val >> 16);
+ return 2;
+ case 4:
+ if (port & 3)
+ return -EINVAL;
+ out_le32(addr, val);
+ return 4;
+ }
+ return -EINVAL;
+}
+
+/* This provides legacy IO or memory mmap access on a bus */
+int pci_mmap_legacy_page_range(struct pci_bus *bus,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ resource_size_t offset =
+ ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
+ resource_size_t size = vma->vm_end - vma->vm_start;
+ struct resource *rp;
+
+ pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
+ pci_domain_nr(bus), bus->number,
+ mmap_state == pci_mmap_mem ? "MEM" : "IO",
+ (unsigned long long)offset,
+ (unsigned long long)(offset + size - 1));
+
+ if (mmap_state == pci_mmap_mem) {
+ if ((offset + size) > hose->isa_mem_size)
+ return -ENXIO;
+ offset += hose->isa_mem_phys;
+ } else {
+ unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ unsigned long roffset = offset + io_offset;
+ rp = &hose->io_resource;
+ if (!(rp->flags & IORESOURCE_IO))
+ return -ENXIO;
+ if (roffset < rp->start || (roffset + size) > rp->end)
+ return -ENXIO;
+ offset += hose->io_base_phys;
+ }
+ pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
+
+ vma->vm_pgoff = offset >> PAGE_SHIFT;
+ vma->vm_page_prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
+ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end)
@@ -592,6 +718,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
cpu_addr = of_translate_address(dev, ranges + 3);
size = of_read_number(ranges + pna + 3, 2);
ranges += np;
+
+ /* If we failed translation or got a zero-sized region
+ * (some FW try to feed us with non sensical zero sized regions
+ * such as power3 which look like some kind of attempt at exposing
+ * the VGA memory hole)
+ */
if (cpu_addr == OF_BAD_ADDR || size == 0)
continue;
@@ -665,6 +797,8 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
isa_hole = memno;
if (primary || isa_mem_base == 0)
isa_mem_base = cpu_addr;
+ hose->isa_mem_phys = cpu_addr;
+ hose->isa_mem_size = size;
}
/* We get the PCI/Mem offset from the first range or
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index ff7de7b0797e..bb1cfcfdbbbb 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -61,42 +61,6 @@ asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x));
}
-int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
-{
- compat_ino_t ino;
- long err;
-
- if (stat->size > MAX_NON_LFS || !new_valid_dev(stat->dev) ||
- !new_valid_dev(stat->rdev))
- return -EOVERFLOW;
-
- ino = stat->ino;
- if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino)
- return -EOVERFLOW;
-
- err = access_ok(VERIFY_WRITE, statbuf, sizeof(*statbuf)) ? 0 : -EFAULT;
- err |= __put_user(new_encode_dev(stat->dev), &statbuf->st_dev);
- err |= __put_user(ino, &statbuf->st_ino);
- err |= __put_user(stat->mode, &statbuf->st_mode);
- err |= __put_user(stat->nlink, &statbuf->st_nlink);
- err |= __put_user(stat->uid, &statbuf->st_uid);
- err |= __put_user(stat->gid, &statbuf->st_gid);
- err |= __put_user(new_encode_dev(stat->rdev), &statbuf->st_rdev);
- err |= __put_user(stat->size, &statbuf->st_size);
- err |= __put_user(stat->atime.tv_sec, &statbuf->st_atime);
- err |= __put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
- err |= __put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
- err |= __put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
- err |= __put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
- err |= __put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
- err |= __put_user(stat->blksize, &statbuf->st_blksize);
- err |= __put_user(stat->blocks, &statbuf->st_blocks);
- err |= __put_user(0, &statbuf->__unused4[0]);
- err |= __put_user(0, &statbuf->__unused4[1]);
-
- return err;
-}
-
/* Note: it is necessary to treat option as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
@@ -107,69 +71,6 @@ asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
return sys_sysfs((int)option, arg1, arg2);
}
-static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
-{
- long usec;
-
- if (!access_ok(VERIFY_READ, i, sizeof(*i)))
- return -EFAULT;
- if (__get_user(o->tv_sec, &i->tv_sec))
- return -EFAULT;
- if (__get_user(usec, &i->tv_usec))
- return -EFAULT;
- o->tv_nsec = usec * 1000;
- return 0;
-}
-
-static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
-{
- return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
- (__put_user(i->tv_sec, &o->tv_sec) |
- __put_user(i->tv_usec, &o->tv_usec)));
-}
-
-
-
-
-/* Translations due to time_t size differences. Which affects all
- sorts of things, like timeval and itimerval. */
-extern struct timezone sys_tz;
-
-asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-{
- if (tv) {
- struct timeval ktv;
- do_gettimeofday(&ktv);
- if (put_tv32(tv, &ktv))
- return -EFAULT;
- }
- if (tz) {
- if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-
-
-asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-{
- struct timespec kts;
- struct timezone ktz;
-
- if (tv) {
- if (get_ts32(&kts, tv))
- return -EFAULT;
- }
- if (tz) {
- if (copy_from_user(&ktz, tz, sizeof(ktz)))
- return -EFAULT;
- }
-
- return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
-}
-
#ifdef CONFIG_SYSVIPC
long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
u32 fifth)
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5a5602da5091..2e227a412bc2 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <asm/mmu-44x.h>
@@ -109,7 +110,6 @@ static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
}
-/* Must be called with mmap_sem locked for writing. */
static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
unsigned int index)
{
@@ -124,6 +124,11 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
}
}
+void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
+{
+ vcpu->arch.shadow_tlb_mod[i] = 1;
+}
+
/* Caller must ensure that the specified guest TLB entry is safe to insert into
* the shadow TLB. */
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
@@ -142,19 +147,16 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
stlbe = &vcpu->arch.shadow_tlb[victim];
/* Get reference to new page. */
- down_read(&current->mm->mmap_sem);
new_page = gfn_to_page(vcpu->kvm, gfn);
if (is_error_page(new_page)) {
printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
kvm_release_page_clean(new_page);
- up_read(&current->mm->mmap_sem);
return;
}
hpaddr = page_to_phys(new_page);
/* Drop reference to old page. */
kvmppc_44x_shadow_release(vcpu, victim);
- up_read(&current->mm->mmap_sem);
vcpu->arch.shadow_pages[victim] = new_page;
@@ -164,27 +166,30 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
/* XXX what about AS? */
- stlbe->tid = asid & 0xff;
+ stlbe->tid = !(asid & 0xff);
/* Force TS=1 for all guest mappings. */
/* For now we hardcode 4KB mappings, but it will be important to
* use host large pages in the future. */
stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
| PPC44x_TLB_4K;
-
stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
vcpu->arch.msr & MSR_PR);
+ kvmppc_tlbe_set_modified(vcpu, victim);
+
+ KVMTRACE_5D(STLB_WRITE, vcpu, victim,
+ stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
+ handler);
}
void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eend, u32 asid)
{
- unsigned int pid = asid & 0xff;
+ unsigned int pid = !(asid & 0xff);
int i;
/* XXX Replace loop with fancy data structures. */
- down_write(&current->mm->mmap_sem);
for (i = 0; i <= tlb_44x_hwater; i++) {
struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
unsigned int tid;
@@ -204,21 +209,35 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
kvmppc_44x_shadow_release(vcpu, i);
stlbe->word0 = 0;
+ kvmppc_tlbe_set_modified(vcpu, i);
+ KVMTRACE_5D(STLB_INVAL, vcpu, i,
+ stlbe->tid, stlbe->word0, stlbe->word1,
+ stlbe->word2, handler);
}
- up_write(&current->mm->mmap_sem);
}
-/* Invalidate all mappings, so that when they fault back in they will get the
- * proper permission bits. */
+/* Invalidate all mappings on the privilege switch after PID has been changed.
+ * The guest always runs with PID=1, so we must clear the entire TLB when
+ * switching address spaces. */
void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
{
int i;
- /* XXX Replace loop with fancy data structures. */
- down_write(&current->mm->mmap_sem);
- for (i = 0; i <= tlb_44x_hwater; i++) {
- kvmppc_44x_shadow_release(vcpu, i);
- vcpu->arch.shadow_tlb[i].word0 = 0;
+ if (vcpu->arch.swap_pid) {
+ /* XXX Replace loop with fancy data structures. */
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
+
+ /* Future optimization: clear only userspace mappings. */
+ kvmppc_44x_shadow_release(vcpu, i);
+ stlbe->word0 = 0;
+ kvmppc_tlbe_set_modified(vcpu, i);
+ KVMTRACE_5D(STLB_INVAL, vcpu, i,
+ stlbe->tid, stlbe->word0, stlbe->word1,
+ stlbe->word2, handler);
+ }
+ vcpu->arch.swap_pid = 0;
}
- up_write(&current->mm->mmap_sem);
+
+ vcpu->arch.shadow_pid = !usermode;
}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 6b076010213b..53aaa66b25e5 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -37,6 +37,17 @@ config KVM_BOOKE_HOST
Provides host support for KVM on Book E PowerPC processors. Currently
this works on 440 processors only.
+config KVM_TRACE
+ bool "KVM trace support"
+ depends on KVM && MARKERS && SYSFS
+ select RELAY
+ select DEBUG_FS
+ default n
+ ---help---
+ This option allows reading a trace of kvm-related events through
+ relayfs. Note the ABI is not considered stable and will be
+ modified in future updates.
+
source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 04e3449e1f42..2a5d4397ac4b 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -4,9 +4,11 @@
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
-common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
-kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o
+common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o)
+
+kvm-objs := $(common-objs-y) powerpc.o emulate.o booke_guest.o
obj-$(CONFIG_KVM) += kvm.o
AFLAGS_booke_interrupts.o := -I$(obj)
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
index 9c8ad850c6e3..7b2591e26bae 100644
--- a/arch/powerpc/kvm/booke_guest.c
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -410,6 +410,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
+ case BOOKE_INTERRUPT_DEBUG: {
+ u32 dbsr;
+
+ vcpu->arch.pc = mfspr(SPRN_CSRR0);
+
+ /* clear IAC events in DBSR register */
+ dbsr = mfspr(SPRN_DBSR);
+ dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
+ mtspr(SPRN_DBSR, dbsr);
+
+ run->exit_reason = KVM_EXIT_DEBUG;
+ r = RESUME_HOST;
+ break;
+ }
+
default:
printk(KERN_EMERG "exit_nr %d\n", exit_nr);
BUG();
@@ -471,6 +486,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.msr = 0;
vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
+ vcpu->arch.shadow_pid = 1;
+
/* Eye-catching number so we know if the guest takes an interrupt
* before it's programmed its own IVPR. */
vcpu->arch.ivpr = 0x55550000;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 3b653b5309b8..95e165baf85f 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -42,7 +42,8 @@
#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
- (1<<BOOKE_INTERRUPT_DTLB_MISS))
+ (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
+ (1<<BOOKE_INTERRUPT_DEBUG))
#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
(1<<BOOKE_INTERRUPT_DTLB_MISS))
@@ -331,51 +332,57 @@ lightweight_exit:
mfspr r3, SPRN_PID
stw r3, VCPU_HOST_PID(r4)
- lwz r3, VCPU_PID(r4)
+ lwz r3, VCPU_SHADOW_PID(r4)
mtspr SPRN_PID, r3
- /* Prevent all TLB updates. */
+ /* Prevent all asynchronous TLB updates. */
mfmsr r5
lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
andc r6, r5, r6
mtmsr r6
- /* Save the host's non-pinned TLB mappings, and load the guest mappings
- * over them. Leave the host's "pinned" kernel mappings in place. */
- /* XXX optimization: use generation count to avoid swapping unmodified
- * entries. */
+ /* Load the guest mappings, leaving the host's "pinned" kernel mappings
+ * in place. */
mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
- lis r8, tlb_44x_hwater@ha
- lwz r8, tlb_44x_hwater@l(r8)
- addi r3, r4, VCPU_HOST_TLB - 4
- addi r9, r4, VCPU_SHADOW_TLB - 4
- li r6, 0
+ li r5, PPC44x_TLB_SIZE
+ lis r5, tlb_44x_hwater@ha
+ lwz r5, tlb_44x_hwater@l(r5)
+ mtctr r5
+ addi r9, r4, VCPU_SHADOW_TLB
+ addi r5, r4, VCPU_SHADOW_MOD
+ li r3, 0
1:
- /* Save host entry. */
- tlbre r7, r6, PPC44x_TLB_PAGEID
- mfspr r5, SPRN_MMUCR
- stwu r5, 4(r3)
- stwu r7, 4(r3)
- tlbre r7, r6, PPC44x_TLB_XLAT
- stwu r7, 4(r3)
- tlbre r7, r6, PPC44x_TLB_ATTRIB
- stwu r7, 4(r3)
+ lbzx r7, r3, r5
+ cmpwi r7, 0
+ beq 3f
+
/* Load guest entry. */
- lwzu r7, 4(r9)
+ mulli r11, r3, TLBE_BYTES
+ add r11, r11, r9
+ lwz r7, 0(r11)
mtspr SPRN_MMUCR, r7
- lwzu r7, 4(r9)
- tlbwe r7, r6, PPC44x_TLB_PAGEID
- lwzu r7, 4(r9)
- tlbwe r7, r6, PPC44x_TLB_XLAT
- lwzu r7, 4(r9)
- tlbwe r7, r6, PPC44x_TLB_ATTRIB
- /* Increment index. */
- addi r6, r6, 1
- cmpw r6, r8
- blt 1b
+ lwz r7, 4(r11)
+ tlbwe r7, r3, PPC44x_TLB_PAGEID
+ lwz r7, 8(r11)
+ tlbwe r7, r3, PPC44x_TLB_XLAT
+ lwz r7, 12(r11)
+ tlbwe r7, r3, PPC44x_TLB_ATTRIB
+3:
+ addi r3, r3, 1 /* Increment index. */
+ bdnz 1b
+
mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
+ /* Clear bitmap of modified TLB entries */
+ li r5, PPC44x_TLB_SIZE>>2
+ mtctr r5
+ addi r5, r4, VCPU_SHADOW_MOD - 4
+ li r6, 0
+1:
+ stwu r6, 4(r5)
+ bdnz 1b
+
iccci 0, 0 /* XXX hack */
/* Load some guest volatiles. */
@@ -431,6 +438,14 @@ lightweight_exit:
oris r3, r3, KVMPPC_MSR_MASK@h
ori r3, r3, KVMPPC_MSR_MASK@l
mtsrr1 r3
+
+ /* Clear any debug events which occurred since we disabled MSR[DE].
+ * XXX This gives us a 3-instruction window in which a breakpoint
+ * intended for guest context could fire in the host instead. */
+ lis r3, 0xffff
+ ori r3, r3, 0xffff
+ mtspr SPRN_DBSR, r3
+
lwz r3, VCPU_GPR(r3)(r4)
lwz r4, VCPU_GPR(r4)(r4)
rfi
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 8c605d0a5488..0fce4fbdc20d 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -170,6 +170,10 @@ static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
}
+ KVMTRACE_5D(GTLB_WRITE, vcpu, index,
+ tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2,
+ handler);
+
return EMULATE_DONE;
}
@@ -504,7 +508,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_MMUCR:
vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
case SPRN_PID:
- vcpu->arch.pid = vcpu->arch.gpr[rs]; break;
+ kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
case SPRN_CCR0:
vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
case SPRN_CCR1:
@@ -765,6 +769,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
}
+ KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit);
+
if (advance)
vcpu->arch.pc += 4; /* Advance past emulated instruction. */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 53826a5f6c06..90a6fc422b23 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -27,6 +27,7 @@
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
+#include <asm/tlbflush.h>
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
@@ -239,18 +240,114 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
+/* Note: clearing MSR[DE] just means that the debug interrupt will not be
+ * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
+ * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
+ * will be delivered as an "imprecise debug event" (which is indicated by
+ * DBSR[IDE].
+ */
+static void kvmppc_disable_debug_interrupts(void)
+{
+ mtmsr(mfmsr() & ~MSR_DE);
+}
+
+static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
+{
+ kvmppc_disable_debug_interrupts();
+
+ mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
+ mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
+ mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
+ mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
+ mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
+ mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
+ mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
+ mtmsr(vcpu->arch.host_msr);
+}
+
+static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
+{
+ struct kvm_guest_debug *dbg = &vcpu->guest_debug;
+ u32 dbcr0 = 0;
+
+ vcpu->arch.host_msr = mfmsr();
+ kvmppc_disable_debug_interrupts();
+
+ /* Save host debug register state. */
+ vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
+ vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
+ vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
+ vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
+ vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
+ vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
+ vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
+
+ /* set registers up for guest */
+
+ if (dbg->bp[0]) {
+ mtspr(SPRN_IAC1, dbg->bp[0]);
+ dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
+ }
+ if (dbg->bp[1]) {
+ mtspr(SPRN_IAC2, dbg->bp[1]);
+ dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
+ }
+ if (dbg->bp[2]) {
+ mtspr(SPRN_IAC3, dbg->bp[2]);
+ dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
+ }
+ if (dbg->bp[3]) {
+ mtspr(SPRN_IAC4, dbg->bp[3]);
+ dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
+ }
+
+ mtspr(SPRN_DBCR0, dbcr0);
+ mtspr(SPRN_DBCR1, 0);
+ mtspr(SPRN_DBCR2, 0);
+}
+
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ int i;
+
+ if (vcpu->guest_debug.enabled)
+ kvmppc_load_guest_debug_registers(vcpu);
+
+ /* Mark every guest entry in the shadow TLB entry modified, so that they
+ * will all be reloaded on the next vcpu run (instead of being
+ * demand-faulted). */
+ for (i = 0; i <= tlb_44x_hwater; i++)
+ kvmppc_tlbe_set_modified(vcpu, i);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ if (vcpu->guest_debug.enabled)
+ kvmppc_restore_host_debug_state(vcpu);
+
+ /* Don't leave guest TLB entries resident when being de-scheduled. */
+ /* XXX It would be nice to differentiate between heavyweight exit and
+ * sched_out here, since we could avoid the TLB flush for heavyweight
+ * exits. */
+ _tlbia();
}
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg)
{
- return -ENOTSUPP;
+ int i;
+
+ vcpu->guest_debug.enabled = dbg->enabled;
+ if (vcpu->guest_debug.enabled) {
+ for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) {
+ if (dbg->breakpoints[i].enabled)
+ vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
+ else
+ vcpu->guest_debug.bp[i] = 0;
+ }
+ }
+
+ return 0;
}
static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 98d7bf99533a..b9e1a1da6e52 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -134,23 +134,6 @@ int arch_add_memory(int nid, u64 start, u64 size)
return __add_pages(zone, start_pfn, nr_pages);
}
-
-#ifdef CONFIG_MEMORY_HOTREMOVE
-int remove_memory(u64 start, u64 size)
-{
- unsigned long start_pfn, end_pfn;
- int ret;
-
- start_pfn = start >> PAGE_SHIFT;
- end_pfn = start_pfn + (size >> PAGE_SHIFT);
- ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
- if (ret)
- goto out;
- /* Arch-specific calls go here - next patch */
-out:
- return ret;
-}
-#endif /* CONFIG_MEMORY_HOTREMOVE */
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 439c5ba34ecf..548efa55c8fe 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -135,7 +135,6 @@ config PTE_64BIT
config PHYS_64BIT
bool 'Large physical address support' if E500 || PPC_86xx
depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx
- select RESOURCES_64BIT
---help---
This option enables kernel support for larger than 32-bit physical
addresses. This feature may not be available on all cores.
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index a2460e2e1928..d0b1f3f4d9c8 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -263,6 +263,7 @@ static void __exit sputrace_exit(void)
remove_proc_entry("sputrace", NULL);
kfree(sputrace_log);
+ marker_synchronize_unregister();
}
module_init(sputrace_init);
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 9f6f73d584d6..d3e4d61030b5 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -39,13 +39,10 @@ static int dma_offset_set;
#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
-#ifdef CONFIG_RESOURCES_64BIT
-#define RES_TO_U32_LOW(val) U64_TO_U32_LOW(val)
-#define RES_TO_U32_HIGH(val) U64_TO_U32_HIGH(val)
-#else
-#define RES_TO_U32_LOW(val) (val)
-#define RES_TO_U32_HIGH(val) (0)
-#endif
+#define RES_TO_U32_LOW(val) \
+ ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
+#define RES_TO_U32_HIGH(val) \
+ ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
static inline int ppc440spe_revA(void)
{
@@ -144,12 +141,11 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
/* Use that */
res->start = pci_addr;
-#ifndef CONFIG_RESOURCES_64BIT
/* Beware of 32 bits resources */
- if ((pci_addr + size) > 0x100000000ull)
+ if (sizeof(resource_size_t) == sizeof(u32) &&
+ (pci_addr + size) > 0x100000000ull)
res->end = 0xffffffff;
else
-#endif
res->end = res->start + size - 1;
break;
}