summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h38
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h25
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h4
-rw-r--r--arch/powerpc/include/asm/prom.h3
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c8
-rw-r--r--arch/powerpc/kernel/prom.c43
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/Makefile1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c150
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c42
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c36
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c246
-rw-r--r--arch/powerpc/kvm/book3s_hv_cma.c240
-rw-r--r--arch/powerpc/kvm/book3s_hv_cma.h27
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c139
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S14
-rw-r--r--arch/powerpc/kvm/book3s_pr.c35
-rw-r--r--arch/powerpc/kvm/book3s_xics.c1
-rw-r--r--arch/powerpc/kvm/booke.c6
-rw-r--r--arch/powerpc/kvm/powerpc.c26
-rw-r--r--arch/powerpc/mm/init_32.c2
-rw-r--r--arch/powerpc/oprofile/common.c28
-rw-r--r--arch/powerpc/perf/power7-events-list.h548
-rw-r--r--arch/powerpc/perf/power7-pmu.c148
-rw-r--r--arch/powerpc/platforms/44x/warp.c1
-rw-r--r--arch/powerpc/platforms/ps3/time.c2
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c112
-rw-r--r--arch/powerpc/sysdev/rtc_cmos_setup.c2
39 files changed, 1319 insertions, 649 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 04f1e94c3437..a4e3a93bf2d4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -389,9 +389,9 @@ config KEXEC
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
- initially work for you. It may help to enable device hotplugging
- support. As of this writing the exact hardware interface is
- strongly in flux, so no good recommendation can be made.
+ initially work for you. As of this writing the exact hardware
+ interface is strongly in flux, so no good recommendation can be
+ made.
config CRASH_DUMP
bool "Build a kdump crash kernel"
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 650757c300db..704e6f10ae80 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -2,3 +2,4 @@
generic-y += clkdev.h
generic-y += rwsem.h
generic-y += trace_clock.h
+generic-y += vtime.h \ No newline at end of file
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 08891d07aeb6..fa19e2f1a874 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -334,6 +334,27 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
return r;
}
+/*
+ * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
+ * Because the sc instruction sets SRR0 to point to the following
+ * instruction, we have to fetch from pc - 4.
+ */
+static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu) - 4;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ u32 r;
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
+
+ r = svcpu->last_inst;
+ svcpu_put(svcpu);
+ return r;
+}
+
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
@@ -446,6 +467,23 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
return vcpu->arch.last_inst;
}
+/*
+ * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
+ * Because the sc instruction sets SRR0 to point to the following
+ * instruction, we have to fetch from pc - 4.
+ */
+static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu) - 4;
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
+
+ return vcpu->arch.last_inst;
+}
+
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index a1ecb14e4442..86d638a3b359 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -37,7 +37,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
#ifdef CONFIG_KVM_BOOK3S_64_HV
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
-extern int kvm_hpt_order; /* order of preallocated HPTs */
+extern unsigned long kvm_rma_pages;
#endif
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
@@ -100,7 +100,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
/* (masks depend on page size) */
rb |= 0x1000; /* page encoding in LP field */
rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
- rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
+ rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */
}
} else {
/* 4kB page */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index af326cde7cb6..33283532e9d8 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -183,13 +183,9 @@ struct kvmppc_spapr_tce_table {
struct page *pages[0];
};
-struct kvmppc_linear_info {
- void *base_virt;
- unsigned long base_pfn;
- unsigned long npages;
- struct list_head list;
- atomic_t use_count;
- int type;
+struct kvm_rma_info {
+ atomic_t use_count;
+ unsigned long base_pfn;
};
/* XICS components, defined in book3s_xics.c */
@@ -246,7 +242,7 @@ struct kvm_arch {
int tlbie_lock;
unsigned long lpcr;
unsigned long rmor;
- struct kvmppc_linear_info *rma;
+ struct kvm_rma_info *rma;
unsigned long vrma_slb_v;
int rma_setup_done;
int using_mmu_notifiers;
@@ -259,7 +255,7 @@ struct kvm_arch {
spinlock_t slot_phys_lock;
cpumask_t need_tlb_flush;
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
- struct kvmppc_linear_info *hpt_li;
+ int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a5287fe03d77..b15554a26c20 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -137,10 +137,10 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma);
-extern struct kvmppc_linear_info *kvm_alloc_rma(void);
-extern void kvm_release_rma(struct kvmppc_linear_info *ri);
-extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
-extern void kvm_release_hpt(struct kvmppc_linear_info *li);
+extern struct kvm_rma_info *kvm_alloc_rma(void);
+extern void kvm_release_rma(struct kvm_rma_info *ri);
+extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
+extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
@@ -261,6 +261,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
struct openpic;
#ifdef CONFIG_KVM_BOOK3S_64_HV
+extern void kvm_cma_reserve(void) __init;
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{
paca[cpu].kvm_hstate.xics_phys = addr;
@@ -281,13 +282,12 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
}
extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
-extern void kvm_linear_init(void);
#else
-static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+static inline void __init kvm_cma_reserve(void)
{}
-static inline void kvm_linear_init(void)
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}
static inline u32 kvmppc_get_xics_latch(void)
@@ -394,10 +394,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
}
}
-/* Please call after prepare_to_enter. This function puts the lazy ee state
- back to normal mode, without actually enabling interrupts. */
-static inline void kvmppc_lazy_ee_enable(void)
+/*
+ * Please call after prepare_to_enter. This function puts the lazy ee and irq
+ * disabled tracking state back to normal mode, without actually enabling
+ * interrupts.
+ */
+static inline void kvmppc_fix_ee_before_entry(void)
{
+ trace_hardirqs_on();
+
#ifdef CONFIG_PPC64
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 8b2492644754..3fd2f1b6f906 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -138,11 +138,11 @@ extern ssize_t power_events_sysfs_show(struct device *dev,
#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
#define EVENT_ATTR(_name, _id, _suffix) \
- PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \
+ PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_##_id, \
power_events_sysfs_show)
#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
-#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p)
+#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p)
#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index e6ec2cffba16..7d0c7f3a7171 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
extern void kdump_move_device_tree(void);
-/* CPU OF node matching */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-
/* cache lookup */
struct device_node *of_find_next_cache_node(struct device_node *np);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8207459efe56..d8958be5f31a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -454,6 +454,7 @@ int main(void)
DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
#endif
+ DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3));
DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index b20ff173a671..0adab06ce5c0 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -105,7 +105,7 @@ static int __init fail_iommu_debugfs(void)
struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
NULL, &fail_iommu);
- return PTR_RET(dir);
+ return PTR_ERR_OR_ZERO(dir);
}
late_initcall(fail_iommu_debugfs);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index eae0ee00ca25..905a24bb7acc 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1673,12 +1673,8 @@ void pcibios_scan_phb(struct pci_controller *hose)
/* Configure PCI Express settings */
if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
struct pci_bus *child;
- list_for_each_entry(child, &bus->children, node) {
- struct pci_dev *self = child->self;
- if (!self)
- continue;
- pcie_bus_configure_settings(child, self->pcie_mpss);
- }
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
}
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index d87e03fc8cfd..6bfcab97c981 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -893,49 +893,10 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup);
#endif
-/* Find the device node for a given logical cpu number, also returns the cpu
- * local thread number (index in ibm,interrupt-server#s) if relevant and
- * asked for (non NULL)
- */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
- int hardid;
- struct device_node *np;
-
- hardid = get_hard_smp_processor_id(cpu);
-
- for_each_node_by_type(np, "cpu") {
- const __be32 *intserv;
- unsigned int plen, t;
-
- /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
- * fallback to "reg" property and assume no threads
- */
- intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
- &plen);
- if (intserv == NULL) {
- const __be32 *reg = of_get_property(np, "reg", NULL);
- if (reg == NULL)
- continue;
- if (be32_to_cpup(reg) == hardid) {
- if (thread)
- *thread = 0;
- return np;
- }
- } else {
- plen /= sizeof(u32);
- for (t = 0; t < plen; t++) {
- if (hardid == be32_to_cpu(intserv[t])) {
- if (thread)
- *thread = t;
- return np;
- }
- }
- }
- }
- return NULL;
+ return (int)phys_id == get_hard_smp_processor_id(cpu);
}
-EXPORT_SYMBOL(of_get_cpu_node);
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 45f2d1fac670..278ca93e1f28 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -232,6 +232,8 @@ void __init early_setup(unsigned long dt_ptr)
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ kvm_cma_reserve();
+
/*
* Reserve any gigantic pages requested on the command line.
* memblock needs to have been initialized by the time this is
@@ -624,8 +626,6 @@ void __init setup_arch(char **cmdline_p)
/* Initialize the MMU context management stuff */
mmu_context_init();
- kvm_linear_init();
-
/* Interrupt code needs to be 64K-aligned */
if ((unsigned long)_stext & 0xffff)
panic("Kernelbase not 64K-aligned (0x%lx)!\n",
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index b2bcd34f72d2..192b051df97e 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1049,7 +1049,7 @@ static int __init rtc_init(void)
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
- return PTR_RET(pdev);
+ return PTR_ERR_OR_ZERO(pdev);
}
module_init(rtc_init);
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index eb643f862579..ffaef2cb101a 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -72,6 +72,7 @@ config KVM_BOOK3S_64_HV
bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
depends on KVM_BOOK3S_64
select MMU_NOTIFIER
+ select CMA
---help---
Support running unmodified book3s_64 guest kernels in
virtual machines on POWER7 and PPC970 processors that have
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 008cd856c5b5..6646c952c5e3 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -81,6 +81,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_64_vio_hv.o \
book3s_hv_ras.o \
book3s_hv_builtin.o \
+ book3s_hv_cma.o \
$(kvm-book3s_64-builtin-xics-objs-y)
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 739bfbadb85e..7e345e00661a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -182,10 +182,13 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
hva_t ptegp;
u64 pteg[16];
u64 avpn = 0;
+ u64 v, r;
+ u64 v_val, v_mask;
+ u64 eaddr_mask;
int i;
- u8 key = 0;
+ u8 pp, key = 0;
bool found = false;
- int second = 0;
+ bool second = false;
ulong mp_ea = vcpu->arch.magic_page_ea;
/* Magic page override */
@@ -208,8 +211,16 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
goto no_seg_found;
avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
+ v_val = avpn & HPTE_V_AVPN;
+
if (slbe->tb)
- avpn |= SLB_VSID_B_1T;
+ v_val |= SLB_VSID_B_1T;
+ if (slbe->large)
+ v_val |= HPTE_V_LARGE;
+ v_val |= HPTE_V_VALID;
+
+ v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
+ HPTE_V_SECONDARY;
do_second:
ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
@@ -227,91 +238,74 @@ do_second:
key = 4;
for (i=0; i<16; i+=2) {
- u64 v = pteg[i];
- u64 r = pteg[i+1];
-
- /* Valid check */
- if (!(v & HPTE_V_VALID))
- continue;
- /* Hash check */
- if ((v & HPTE_V_SECONDARY) != second)
- continue;
-
- /* AVPN compare */
- if (HPTE_V_COMPARE(avpn, v)) {
- u8 pp = (r & HPTE_R_PP) | key;
- int eaddr_mask = 0xFFF;
-
- gpte->eaddr = eaddr;
- gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
- eaddr,
- data);
- if (slbe->large)
- eaddr_mask = 0xFFFFFF;
- gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask);
- gpte->may_execute = ((r & HPTE_R_N) ? false : true);
- gpte->may_read = false;
- gpte->may_write = false;
-
- switch (pp) {
- case 0:
- case 1:
- case 2:
- case 6:
- gpte->may_write = true;
- /* fall through */
- case 3:
- case 5:
- case 7:
- gpte->may_read = true;
- break;
- }
-
- dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
- "-> 0x%lx\n",
- eaddr, avpn, gpte->vpage, gpte->raddr);
+ /* Check all relevant fields of 1st dword */
+ if ((pteg[i] & v_mask) == v_val) {
found = true;
break;
}
}
- /* Update PTE R and C bits, so the guest's swapper knows we used the
- * page */
- if (found) {
- u32 oldr = pteg[i+1];
+ if (!found) {
+ if (second)
+ goto no_page_found;
+ v_val |= HPTE_V_SECONDARY;
+ second = true;
+ goto do_second;
+ }
- if (gpte->may_read) {
- /* Set the accessed flag */
- pteg[i+1] |= HPTE_R_R;
- }
- if (gpte->may_write) {
- /* Set the dirty flag */
- pteg[i+1] |= HPTE_R_C;
- } else {
- dprintk("KVM: Mapping read-only page!\n");
- }
+ v = pteg[i];
+ r = pteg[i+1];
+ pp = (r & HPTE_R_PP) | key;
+ eaddr_mask = 0xFFF;
+
+ gpte->eaddr = eaddr;
+ gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
+ if (slbe->large)
+ eaddr_mask = 0xFFFFFF;
+ gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
+ gpte->may_execute = ((r & HPTE_R_N) ? false : true);
+ gpte->may_read = false;
+ gpte->may_write = false;
+
+ switch (pp) {
+ case 0:
+ case 1:
+ case 2:
+ case 6:
+ gpte->may_write = true;
+ /* fall through */
+ case 3:
+ case 5:
+ case 7:
+ gpte->may_read = true;
+ break;
+ }
- /* Write back into the PTEG */
- if (pteg[i+1] != oldr)
- copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
+ dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
+ "-> 0x%lx\n",
+ eaddr, avpn, gpte->vpage, gpte->raddr);
- if (!gpte->may_read)
- return -EPERM;
- return 0;
- } else {
- dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx "
- "ptegp=0x%lx)\n",
- eaddr, to_book3s(vcpu)->sdr1, ptegp);
- for (i = 0; i < 16; i += 2)
- dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n",
- i, pteg[i], pteg[i+1], avpn);
-
- if (!second) {
- second = HPTE_V_SECONDARY;
- goto do_second;
- }
+ /* Update PTE R and C bits, so the guest's swapper knows we used the
+ * page */
+ if (gpte->may_read) {
+ /* Set the accessed flag */
+ r |= HPTE_R_R;
+ }
+ if (data && gpte->may_write) {
+ /* Set the dirty flag -- XXX even if not writing */
+ r |= HPTE_R_C;
+ }
+
+ /* Write back into the PTEG */
+ if (pteg[i+1] != r) {
+ pteg[i+1] = r;
+ copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
}
+ if (!gpte->may_read)
+ return -EPERM;
+ return 0;
+
no_page_found:
return -ENOENT;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 710d31317d81..043eec8461e7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,6 +37,8 @@
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
+#include "book3s_hv_cma.h"
+
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
@@ -52,8 +54,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
{
unsigned long hpt;
struct revmap_entry *rev;
- struct kvmppc_linear_info *li;
- long order = kvm_hpt_order;
+ struct page *page = NULL;
+ long order = KVM_DEFAULT_HPT_ORDER;
if (htab_orderp) {
order = *htab_orderp;
@@ -61,26 +63,23 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
order = PPC_MIN_HPT_ORDER;
}
+ kvm->arch.hpt_cma_alloc = 0;
/*
- * If the user wants a different size from default,
* try first to allocate it from the kernel page allocator.
+ * We keep the CMA reserved for failed allocation.
*/
- hpt = 0;
- if (order != kvm_hpt_order) {
- hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
- __GFP_NOWARN, order - PAGE_SHIFT);
- if (!hpt)
- --order;
- }
+ hpt = __get_free_pages(GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT |
+ __GFP_NOWARN, order - PAGE_SHIFT);
/* Next try to allocate from the preallocated pool */
if (!hpt) {
- li = kvm_alloc_hpt();
- if (li) {
- hpt = (ulong)li->base_virt;
- kvm->arch.hpt_li = li;
- order = kvm_hpt_order;
- }
+ VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
+ page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
+ if (page) {
+ hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+ kvm->arch.hpt_cma_alloc = 1;
+ } else
+ --order;
}
/* Lastly try successively smaller sizes from the page allocator */
@@ -118,8 +117,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
return 0;
out_freehpt:
- if (kvm->arch.hpt_li)
- kvm_release_hpt(kvm->arch.hpt_li);
+ if (kvm->arch.hpt_cma_alloc)
+ kvm_release_hpt(page, 1 << (order - PAGE_SHIFT));
else
free_pages(hpt, order - PAGE_SHIFT);
return -ENOMEM;
@@ -165,8 +164,9 @@ void kvmppc_free_hpt(struct kvm *kvm)
{
kvmppc_free_lpid(kvm->arch.lpid);
vfree(kvm->arch.revmap);
- if (kvm->arch.hpt_li)
- kvm_release_hpt(kvm->arch.hpt_li);
+ if (kvm->arch.hpt_cma_alloc)
+ kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt),
+ 1 << (kvm->arch.hpt_order - PAGE_SHIFT));
else
free_pages(kvm->arch.hpt_virt,
kvm->arch.hpt_order - PAGE_SHIFT);
@@ -1579,7 +1579,7 @@ int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
ctx->first_pass = 1;
rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
- ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag);
+ ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
if (ret < 0) {
kvm_put_kvm(kvm);
return ret;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index b2d3f3b2de72..54cf9bc94dad 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -136,7 +136,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
- stt, O_RDWR);
+ stt, O_RDWR | O_CLOEXEC);
fail:
if (stt) {
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 1f6344c4408d..360ce68c9809 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -458,6 +458,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
case SPRN_MSSSR0:
+ case SPRN_DABR:
break;
unprivileged:
default:
@@ -555,6 +556,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
case SPRN_MSSSR0:
+ case SPRN_DABR:
*spr_val = 0;
break;
default:
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index e8d51cb76752..62a2b5ab08ed 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -680,13 +680,12 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+ struct kvm_sregs *sregs)
{
int i;
- sregs->pvr = vcpu->arch.pvr;
-
memset(sregs, 0, sizeof(struct kvm_sregs));
+ sregs->pvr = vcpu->arch.pvr;
for (i = 0; i < vcpu->arch.slb_max; i++) {
sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
@@ -696,7 +695,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+ struct kvm_sregs *sregs)
{
int i, j;
@@ -1511,10 +1510,10 @@ static inline int lpcr_rmls(unsigned long rma_size)
static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct kvmppc_linear_info *ri = vma->vm_file->private_data;
struct page *page;
+ struct kvm_rma_info *ri = vma->vm_file->private_data;
- if (vmf->pgoff >= ri->npages)
+ if (vmf->pgoff >= kvm_rma_pages)
return VM_FAULT_SIGBUS;
page = pfn_to_page(ri->base_pfn + vmf->pgoff);
@@ -1536,7 +1535,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
static int kvm_rma_release(struct inode *inode, struct file *filp)
{
- struct kvmppc_linear_info *ri = filp->private_data;
+ struct kvm_rma_info *ri = filp->private_data;
kvm_release_rma(ri);
return 0;
@@ -1549,18 +1548,27 @@ static const struct file_operations kvm_rma_fops = {
long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
{
- struct kvmppc_linear_info *ri;
long fd;
+ struct kvm_rma_info *ri;
+ /*
+ * Only do this on PPC970 in HV mode
+ */
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_201))
+ return -EINVAL;
+
+ if (!kvm_rma_pages)
+ return -EINVAL;
ri = kvm_alloc_rma();
if (!ri)
return -ENOMEM;
- fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
+ fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
if (fd < 0)
kvm_release_rma(ri);
- ret->rma_size = ri->npages << PAGE_SHIFT;
+ ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
return fd;
}
@@ -1725,7 +1733,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
struct kvm *kvm = vcpu->kvm;
- struct kvmppc_linear_info *ri = NULL;
+ struct kvm_rma_info *ri = NULL;
unsigned long hva;
struct kvm_memory_slot *memslot;
struct vm_area_struct *vma;
@@ -1803,7 +1811,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
} else {
/* Set up to use an RMO region */
- rma_size = ri->npages;
+ rma_size = kvm_rma_pages;
if (rma_size > memslot->npages)
rma_size = memslot->npages;
rma_size <<= PAGE_SHIFT;
@@ -1831,14 +1839,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* POWER7 */
lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
lpcr |= rmls << LPCR_RMLS_SH;
- kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
+ kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
}
kvm->arch.lpcr = lpcr;
pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
/* Initialize phys addrs of pages in RMO */
- npages = ri->npages;
+ npages = kvm_rma_pages;
porder = __ilog2(npages);
physp = memslot->arch.slot_phys;
if (physp) {
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index ec0a9e5de100..8cd0daebb82d 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -13,33 +13,34 @@
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
#include <asm/cputable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
-#define KVM_LINEAR_RMA 0
-#define KVM_LINEAR_HPT 1
-
-static void __init kvm_linear_init_one(ulong size, int count, int type);
-static struct kvmppc_linear_info *kvm_alloc_linear(int type);
-static void kvm_release_linear(struct kvmppc_linear_info *ri);
-
-int kvm_hpt_order = KVM_DEFAULT_HPT_ORDER;
-EXPORT_SYMBOL_GPL(kvm_hpt_order);
-
-/*************** RMA *************/
-
+#include "book3s_hv_cma.h"
+/*
+ * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
+ * should be power of 2.
+ */
+#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
+/*
+ * By default we reserve 5% of memory for hash pagetable allocation.
+ */
+static unsigned long kvm_cma_resv_ratio = 5;
/*
- * This maintains a list of RMAs (real mode areas) for KVM guests to use.
+ * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
* Each RMA has to be physically contiguous and of a size that the
* hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
* and other larger sizes. Since we are unlikely to be allocate that
* much physically contiguous memory after the system is up and running,
- * we preallocate a set of RMAs in early boot for KVM to use.
+ * we preallocate a set of RMAs in early boot using CMA.
+ * should be power of 2.
*/
-static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
-static unsigned long kvm_rma_count;
+unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
+EXPORT_SYMBOL_GPL(kvm_rma_pages);
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */
@@ -69,165 +70,114 @@ static inline int lpcr_rmls(unsigned long rma_size)
static int __init early_parse_rma_size(char *p)
{
- if (!p)
- return 1;
+ unsigned long kvm_rma_size;
+ pr_debug("%s(%s)\n", __func__, p);
+ if (!p)
+ return -EINVAL;
kvm_rma_size = memparse(p, &p);
-
+ /*
+ * Check that the requested size is one supported in hardware
+ */
+ if (lpcr_rmls(kvm_rma_size) < 0) {
+ pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
+ return -EINVAL;
+ }
+ kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
return 0;
}
early_param("kvm_rma_size", early_parse_rma_size);
-static int __init early_parse_rma_count(char *p)
+struct kvm_rma_info *kvm_alloc_rma()
{
- if (!p)
- return 1;
-
- kvm_rma_count = simple_strtoul(p, NULL, 0);
-
- return 0;
-}
-early_param("kvm_rma_count", early_parse_rma_count);
-
-struct kvmppc_linear_info *kvm_alloc_rma(void)
-{
- return kvm_alloc_linear(KVM_LINEAR_RMA);
+ struct page *page;
+ struct kvm_rma_info *ri;
+
+ ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
+ if (!ri)
+ return NULL;
+ page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
+ if (!page)
+ goto err_out;
+ atomic_set(&ri->use_count, 1);
+ ri->base_pfn = page_to_pfn(page);
+ return ri;
+err_out:
+ kfree(ri);
+ return NULL;
}
EXPORT_SYMBOL_GPL(kvm_alloc_rma);
-void kvm_release_rma(struct kvmppc_linear_info *ri)
+void kvm_release_rma(struct kvm_rma_info *ri)
{
- kvm_release_linear(ri);
+ if (atomic_dec_and_test(&ri->use_count)) {
+ kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages);
+ kfree(ri);
+ }
}
EXPORT_SYMBOL_GPL(kvm_release_rma);
-/*************** HPT *************/
-
-/*
- * This maintains a list of big linear HPT tables that contain the GVA->HPA
- * memory mappings. If we don't reserve those early on, we might not be able
- * to get a big (usually 16MB) linear memory region from the kernel anymore.
- */
-
-static unsigned long kvm_hpt_count;
-
-static int __init early_parse_hpt_count(char *p)
+static int __init early_parse_kvm_cma_resv(char *p)
{
+ pr_debug("%s(%s)\n", __func__, p);
if (!p)
- return 1;
-
- kvm_hpt_count = simple_strtoul(p, NULL, 0);
-
- return 0;
+ return -EINVAL;
+ return kstrtoul(p, 0, &kvm_cma_resv_ratio);
}
-early_param("kvm_hpt_count", early_parse_hpt_count);
+early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
-struct kvmppc_linear_info *kvm_alloc_hpt(void)
+struct page *kvm_alloc_hpt(unsigned long nr_pages)
{
- return kvm_alloc_linear(KVM_LINEAR_HPT);
+ unsigned long align_pages = HPT_ALIGN_PAGES;
+
+ /* Old CPUs require HPT aligned on a multiple of its size */
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ align_pages = nr_pages;
+ return kvm_alloc_cma(nr_pages, align_pages);
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
-void kvm_release_hpt(struct kvmppc_linear_info *li)
+void kvm_release_hpt(struct page *page, unsigned long nr_pages)
{
- kvm_release_linear(li);
+ kvm_release_cma(page, nr_pages);
}
EXPORT_SYMBOL_GPL(kvm_release_hpt);
-/*************** generic *************/
-
-static LIST_HEAD(free_linears);
-static DEFINE_SPINLOCK(linear_lock);
-
-static void __init kvm_linear_init_one(ulong size, int count, int type)
-{
- unsigned long i;
- unsigned long j, npages;
- void *linear;
- struct page *pg;
- const char *typestr;
- struct kvmppc_linear_info *linear_info;
-
- if (!count)
- return;
-
- typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT";
-
- npages = size >> PAGE_SHIFT;
- linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
- for (i = 0; i < count; ++i) {
- linear = alloc_bootmem_align(size, size);
- pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
- size >> 20);
- linear_info[i].base_virt = linear;
- linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
- linear_info[i].npages = npages;
- linear_info[i].type = type;
- list_add_tail(&linear_info[i].list, &free_linears);
- atomic_set(&linear_info[i].use_count, 0);
-
- pg = pfn_to_page(linear_info[i].base_pfn);
- for (j = 0; j < npages; ++j) {
- atomic_inc(&pg->_count);
- ++pg;
- }
- }
-}
-
-static struct kvmppc_linear_info *kvm_alloc_linear(int type)
-{
- struct kvmppc_linear_info *ri, *ret;
-
- ret = NULL;
- spin_lock(&linear_lock);
- list_for_each_entry(ri, &free_linears, list) {
- if (ri->type != type)
- continue;
-
- list_del(&ri->list);
- atomic_inc(&ri->use_count);
- memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
- ret = ri;
- break;
- }
- spin_unlock(&linear_lock);
- return ret;
-}
-
-static void kvm_release_linear(struct kvmppc_linear_info *ri)
-{
- if (atomic_dec_and_test(&ri->use_count)) {
- spin_lock(&linear_lock);
- list_add_tail(&ri->list, &free_linears);
- spin_unlock(&linear_lock);
-
- }
-}
-
-/*
- * Called at boot time while the bootmem allocator is active,
- * to allocate contiguous physical memory for the hash page
- * tables for guests.
+/**
+ * kvm_cma_reserve() - reserve area for kvm hash pagetable
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
*/
-void __init kvm_linear_init(void)
+void __init kvm_cma_reserve(void)
{
- /* HPT */
- kvm_linear_init_one(1 << kvm_hpt_order, kvm_hpt_count, KVM_LINEAR_HPT);
-
- /* RMA */
- /* Only do this on PPC970 in HV mode */
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
- !cpu_has_feature(CPU_FTR_ARCH_201))
- return;
-
- if (!kvm_rma_size || !kvm_rma_count)
- return;
-
- /* Check that the requested size is one supported in hardware */
- if (lpcr_rmls(kvm_rma_size) < 0) {
- pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
- return;
+ unsigned long align_size;
+ struct memblock_region *reg;
+ phys_addr_t selected_size = 0;
+ /*
+ * We cannot use memblock_phys_mem_size() here, because
+ * memblock_analyze() has not been called yet.
+ */
+ for_each_memblock(memory, reg)
+ selected_size += memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+
+ selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
+ if (selected_size) {
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ (unsigned long)selected_size / SZ_1M);
+ /*
+ * Old CPUs require HPT aligned on a multiple of its size. So for them
+ * make the alignment as max size we could request.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ align_size = __rounddown_pow_of_two(selected_size);
+ else
+ align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
+
+ align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
+ kvm_cma_declare_contiguous(selected_size, align_size);
}
-
- kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA);
}
diff --git a/arch/powerpc/kvm/book3s_hv_cma.c b/arch/powerpc/kvm/book3s_hv_cma.c
new file mode 100644
index 000000000000..d9d3d8553d51
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_cma.c
@@ -0,0 +1,240 @@
+/*
+ * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
+ * for DMA mapping framework
+ *
+ * Copyright IBM Corporation, 2013
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ *
+ */
+#define pr_fmt(fmt) "kvm_cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+# define DEBUG
+#endif
+#endif
+
+#include <linux/memblock.h>
+#include <linux/mutex.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include "book3s_hv_cma.h"
+
+struct kvm_cma {
+ unsigned long base_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+};
+
+static DEFINE_MUTEX(kvm_cma_mutex);
+static struct kvm_cma kvm_cma_area;
+
+/**
+ * kvm_cma_declare_contiguous() - reserve area for contiguous memory handling
+ * for kvm hash pagetable
+ * @size: Size of the reserved memory.
+ * @alignment: Alignment for the contiguous memory area
+ *
+ * This function reserves memory for kvm cma area. It should be
+ * called by arch code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+long __init kvm_cma_declare_contiguous(phys_addr_t size, phys_addr_t alignment)
+{
+ long base_pfn;
+ phys_addr_t addr;
+ struct kvm_cma *cma = &kvm_cma_area;
+
+ pr_debug("%s(size %lx)\n", __func__, (unsigned long)size);
+
+ if (!size)
+ return -EINVAL;
+ /*
+ * Sanitise input arguments.
+ * We should be pageblock aligned for CMA.
+ */
+ alignment = max(alignment, (phys_addr_t)(PAGE_SIZE << pageblock_order));
+ size = ALIGN(size, alignment);
+ /*
+ * Reserve memory
+ * Use __memblock_alloc_base() since
+ * memblock_alloc_base() panic()s.
+ */
+ addr = __memblock_alloc_base(size, alignment, 0);
+ if (!addr) {
+ base_pfn = -ENOMEM;
+ goto err;
+ } else
+ base_pfn = PFN_DOWN(addr);
+
+ /*
+ * Each reserved area must be initialised later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ cma->base_pfn = base_pfn;
+ cma->count = size >> PAGE_SHIFT;
+ pr_info("CMA: reserved %ld MiB\n", (unsigned long)size / SZ_1M);
+ return 0;
+err:
+ pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
+ return base_pfn;
+}
+
+/**
+ * kvm_alloc_cma() - allocate pages from contiguous area
+ * @nr_pages: Requested number of pages.
+ * @align_pages: Requested alignment in number of pages
+ *
+ * This function allocates memory buffer for hash pagetable.
+ */
+struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
+{
+ int ret;
+ struct page *page = NULL;
+ struct kvm_cma *cma = &kvm_cma_area;
+ unsigned long chunk_count, nr_chunk;
+ unsigned long mask, pfn, pageno, start = 0;
+
+
+ if (!cma || !cma->count)
+ return NULL;
+
+ pr_debug("%s(cma %p, count %lu, align pages %lu)\n", __func__,
+ (void *)cma, nr_pages, align_pages);
+
+ if (!nr_pages)
+ return NULL;
+ /*
+ * align mask with chunk size. The bit tracks pages in chunk size
+ */
+ VM_BUG_ON(!is_power_of_2(align_pages));
+ mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
+ BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
+
+ chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+ nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+
+ mutex_lock(&kvm_cma_mutex);
+ for (;;) {
+ pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
+ start, nr_chunk, mask);
+ if (pageno >= chunk_count)
+ break;
+
+ pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
+ ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
+ if (ret == 0) {
+ bitmap_set(cma->bitmap, pageno, nr_chunk);
+ page = pfn_to_page(pfn);
+ memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
+ break;
+ } else if (ret != -EBUSY) {
+ break;
+ }
+ pr_debug("%s(): memory range at %p is busy, retrying\n",
+ __func__, pfn_to_page(pfn));
+ /* try again with a bit different memory target */
+ start = pageno + mask + 1;
+ }
+ mutex_unlock(&kvm_cma_mutex);
+ pr_debug("%s(): returned %p\n", __func__, page);
+ return page;
+}
+
+/**
+ * kvm_release_cma() - release allocated pages for hash pagetable
+ * @pages: Allocated pages.
+ * @nr_pages: Number of allocated pages.
+ *
+ * This function releases memory allocated by kvm_alloc_cma().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
+{
+ unsigned long pfn;
+ unsigned long nr_chunk;
+ struct kvm_cma *cma = &kvm_cma_area;
+
+ if (!cma || !pages)
+ return false;
+
+ pr_debug("%s(page %p count %lu)\n", __func__, (void *)pages, nr_pages);
+
+ pfn = page_to_pfn(pages);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
+ nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+
+ mutex_lock(&kvm_cma_mutex);
+ bitmap_clear(cma->bitmap,
+ (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
+ nr_chunk);
+ free_contig_range(pfn, nr_pages);
+ mutex_unlock(&kvm_cma_mutex);
+
+ return true;
+}
+
+static int __init kvm_cma_activate_area(unsigned long base_pfn,
+ unsigned long count)
+{
+ unsigned long pfn = base_pfn;
+ unsigned i = count >> pageblock_order;
+ struct zone *zone;
+
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ zone = page_zone(pfn_to_page(pfn));
+ do {
+ unsigned j;
+ base_pfn = pfn;
+ for (j = pageblock_nr_pages; j; --j, pfn++) {
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ /*
+ * alloc_contig_range requires the pfn range
+ * specified to be in the same zone. Make this
+ * simple by forcing the entire CMA resv range
+ * to be in the same zone.
+ */
+ if (page_zone(pfn_to_page(pfn)) != zone)
+ return -EINVAL;
+ }
+ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+ } while (--i);
+ return 0;
+}
+
+static int __init kvm_cma_init_reserved_areas(void)
+{
+ int bitmap_size, ret;
+ unsigned long chunk_count;
+ struct kvm_cma *cma = &kvm_cma_area;
+
+ pr_debug("%s()\n", __func__);
+ if (!cma->count)
+ return 0;
+ chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+ bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!cma->bitmap)
+ return -ENOMEM;
+
+ ret = kvm_cma_activate_area(cma->base_pfn, cma->count);
+ if (ret)
+ goto error;
+ return 0;
+
+error:
+ kfree(cma->bitmap);
+ return ret;
+}
+core_initcall(kvm_cma_init_reserved_areas);
diff --git a/arch/powerpc/kvm/book3s_hv_cma.h b/arch/powerpc/kvm/book3s_hv_cma.h
new file mode 100644
index 000000000000..655144f75fa5
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_cma.h
@@ -0,0 +1,27 @@
+/*
+ * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
+ * for DMA mapping framework
+ *
+ * Copyright IBM Corporation, 2013
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ *
+ */
+
+#ifndef __POWERPC_KVM_CMA_ALLOC_H__
+#define __POWERPC_KVM_CMA_ALLOC_H__
+/*
+ * Both RMA and Hash page allocation will be multiple of 256K.
+ */
+#define KVM_CMA_CHUNK_ORDER 18
+
+extern struct page *kvm_alloc_cma(unsigned long nr_pages,
+ unsigned long align_pages);
+extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages);
+extern long kvm_cma_declare_contiguous(phys_addr_t size,
+ phys_addr_t alignment) __init;
+#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index c3785d4aeed7..9c515440ad1a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -387,6 +387,80 @@ static inline int try_lock_tlbie(unsigned int *lock)
return old == 0;
}
+/*
+ * tlbie/tlbiel is a bit different on the PPC970 compared to later
+ * processors such as POWER7; the large page bit is in the instruction
+ * not RB, and the top 16 bits and the bottom 12 bits of the VA
+ * in RB must be 0.
+ */
+static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
+ long npages, int global, bool need_sync)
+{
+ long i;
+
+ if (global) {
+ while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ if (need_sync)
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < npages; ++i) {
+ unsigned long rb = rbvalues[i];
+
+ if (rb & 1) /* large page */
+ asm volatile("tlbie %0,1" : :
+ "r" (rb & 0x0000fffffffff000ul));
+ else
+ asm volatile("tlbie %0,0" : :
+ "r" (rb & 0x0000fffffffff000ul));
+ }
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ if (need_sync)
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < npages; ++i) {
+ unsigned long rb = rbvalues[i];
+
+ if (rb & 1) /* large page */
+ asm volatile("tlbiel %0,1" : :
+ "r" (rb & 0x0000fffffffff000ul));
+ else
+ asm volatile("tlbiel %0,0" : :
+ "r" (rb & 0x0000fffffffff000ul));
+ }
+ asm volatile("ptesync" : : : "memory");
+ }
+}
+
+static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
+ long npages, int global, bool need_sync)
+{
+ long i;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+ /* PPC970 tlbie instruction is a bit different */
+ do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
+ return;
+ }
+ if (global) {
+ while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ if (need_sync)
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < npages; ++i)
+ asm volatile(PPC_TLBIE(%1,%0) : :
+ "r" (rbvalues[i]), "r" (kvm->arch.lpid));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ if (need_sync)
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < npages; ++i)
+ asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
+ asm volatile("ptesync" : : : "memory");
+ }
+}
+
long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
unsigned long pte_index, unsigned long avpn,
unsigned long *hpret)
@@ -412,19 +486,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
if (v & HPTE_V_VALID) {
hpte[0] &= ~HPTE_V_VALID;
rb = compute_tlbie_rb(v, hpte[1], pte_index);
- if (global_invalidates(kvm, flags)) {
- while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- asm volatile("ptesync" : : : "memory");
- asm volatile("tlbiel %0" : : "r" (rb));
- asm volatile("ptesync" : : : "memory");
- }
+ do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/* Read PTE low word after tlbie to get final R/C values */
remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
}
@@ -452,12 +514,11 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
unsigned long *hp, *hptes[4], tlbrb[4];
long int i, j, k, n, found, indexes[4];
unsigned long flags, req, pte_index, rcbits;
- long int local = 0;
+ int global;
long int ret = H_SUCCESS;
struct revmap_entry *rev, *revs[4];
- if (atomic_read(&kvm->online_vcpus) == 1)
- local = 1;
+ global = global_invalidates(kvm, 0);
for (i = 0; i < 4 && ret == H_SUCCESS; ) {
n = 0;
for (; i < 4; ++i) {
@@ -533,22 +594,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
break;
/* Now that we've collected a batch, do the tlbies */
- if (!local) {
- while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- for (k = 0; k < n; ++k)
- asm volatile(PPC_TLBIE(%1,%0) : :
- "r" (tlbrb[k]),
- "r" (kvm->arch.lpid));
- asm volatile("eieio; tlbsync; ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- asm volatile("ptesync" : : : "memory");
- for (k = 0; k < n; ++k)
- asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
- asm volatile("ptesync" : : : "memory");
- }
+ do_tlbies(kvm, tlbrb, n, global, true);
/* Read PTE low words after tlbie to get final R/C values */
for (k = 0; k < n; ++k) {
@@ -607,19 +653,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
if (v & HPTE_V_VALID) {
rb = compute_tlbie_rb(v, r, pte_index);
hpte[0] = v & ~HPTE_V_VALID;
- if (global_invalidates(kvm, flags)) {
- while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- asm volatile("ptesync" : : : "memory");
- asm volatile("tlbiel %0" : : "r" (rb));
- asm volatile("ptesync" : : : "memory");
- }
+ do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/*
* If the host has this page as readonly but the guest
* wants to make it read/write, reduce the permissions.
@@ -690,13 +724,7 @@ void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
hptep[0] &= ~HPTE_V_VALID;
rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
- while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
+ do_tlbies(kvm, &rb, 1, 1, true);
}
EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
@@ -710,12 +738,7 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
/* modify only the second-last byte, which contains the ref bit */
*((char *)hptep + 14) = rbyte;
- while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
+ do_tlbies(kvm, &rb, 1, 1, false);
}
EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b93e3cd8bf2b..294b7af28cdd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1393,7 +1393,7 @@ hcall_try_real_mode:
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
LOAD_REG_ADDR(r4, hcall_real_table)
- lwzx r3,r3,r4
+ lwax r3,r3,r4
cmpwi r3,0
beq guest_exit_cont
add r3,r3,r4
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 48cbbf862958..17cfae5497a3 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -92,6 +92,11 @@ kvm_start_lightweight:
PPC_LL r3, VCPU_HFLAGS(r4)
rldicl r3, r3, 0, 63 /* r3 &= 1 */
stb r3, HSTATE_RESTORE_HID5(r13)
+
+ /* Load up guest SPRG3 value, since it's user readable */
+ ld r3, VCPU_SHARED(r4)
+ ld r3, VCPU_SHARED_SPRG3(r3)
+ mtspr SPRN_SPRG3, r3
#endif /* CONFIG_PPC_BOOK3S_64 */
PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
@@ -123,6 +128,15 @@ kvmppc_handler_highmem:
/* R7 = vcpu */
PPC_LL r7, GPR4(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * Reload kernel SPRG3 value.
+ * No need to save guest value as usermode can't modify SPRG3.
+ */
+ ld r3, PACA_SPRG3(r13)
+ mtspr SPRN_SPRG3, r3
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
PPC_STL r14, VCPU_GPR(R14)(r7)
PPC_STL r15, VCPU_GPR(R15)(r7)
PPC_STL r16, VCPU_GPR(R16)(r7)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c6e13d9a9e15..27db1e665959 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -468,7 +468,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
* both the traditional FP registers and the added VSX
* registers into thread.fpr[].
*/
- giveup_fpu(current);
+ if (current->thread.regs->msr & MSR_FP)
+ giveup_fpu(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
@@ -483,7 +484,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
- giveup_altivec(current);
+ if (current->thread.regs->msr & MSR_VEC)
+ giveup_altivec(current);
memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
vcpu->arch.vscr = t->vscr;
}
@@ -575,8 +577,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
#endif
- current->thread.regs->msr |= msr;
-
if (msr & MSR_FP) {
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
@@ -598,12 +598,32 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#endif
}
+ current->thread.regs->msr |= msr;
vcpu->arch.guest_owned_ext |= msr;
kvmppc_recalc_shadow_msr(vcpu);
return RESUME_GUEST;
}
+/*
+ * Kernel code using FP or VMX could have flushed guest state to
+ * the thread_struct; if so, get it back now.
+ */
+static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
+{
+ unsigned long lost_ext;
+
+ lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
+ if (!lost_ext)
+ return;
+
+ if (lost_ext & MSR_FP)
+ kvmppc_load_up_fpu();
+ if (lost_ext & MSR_VEC)
+ kvmppc_load_up_altivec();
+ current->thread.regs->msr |= lost_ext;
+}
+
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
@@ -772,7 +792,7 @@ program_interrupt:
}
case BOOK3S_INTERRUPT_SYSCALL:
if (vcpu->arch.papr_enabled &&
- (kvmppc_get_last_inst(vcpu) == 0x44000022) &&
+ (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
!(vcpu->arch.shared->msr & MSR_PR)) {
/* SC 1 papr hypercalls */
ulong cmd = kvmppc_get_gpr(vcpu, 3);
@@ -890,8 +910,9 @@ program_interrupt:
local_irq_enable();
r = s;
} else {
- kvmppc_lazy_ee_enable();
+ kvmppc_fix_ee_before_entry();
}
+ kvmppc_handle_lost_ext(vcpu);
}
trace_kvm_book3s_reenter(r, vcpu);
@@ -1162,7 +1183,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
- kvmppc_lazy_ee_enable();
+ kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 94c1dd46b83d..a3a5cb8ee7ea 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -19,6 +19,7 @@
#include <asm/hvcall.h>
#include <asm/xics.h>
#include <asm/debug.h>
+#include <asm/time.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index dcc94f016007..17722d82f1d1 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -674,8 +674,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
goto out;
}
- kvm_guest_enter();
-
#ifdef CONFIG_PPC_FPU
/* Save userspace FPU state in stack */
enable_kernel_fp();
@@ -698,7 +696,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
#endif
- kvmppc_lazy_ee_enable();
+ kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -1168,7 +1166,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
local_irq_enable();
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
} else {
- kvmppc_lazy_ee_enable();
+ kvmppc_fix_ee_before_entry();
}
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 6316ee336e88..07c0106fab76 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -117,8 +117,6 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
kvm_guest_exit();
continue;
}
-
- trace_hardirqs_on();
#endif
kvm_guest_enter();
@@ -420,6 +418,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
return kvmppc_core_create_memslot(slot, npages);
}
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
@@ -823,39 +825,39 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
#endif
#ifdef CONFIG_KVM_MPIC
case KVM_CAP_IRQ_MPIC: {
- struct file *filp;
+ struct fd f;
struct kvm_device *dev;
r = -EBADF;
- filp = fget(cap->args[0]);
- if (!filp)
+ f = fdget(cap->args[0]);
+ if (!f.file)
break;
r = -EPERM;
- dev = kvm_device_from_filp(filp);
+ dev = kvm_device_from_filp(f.file);
if (dev)
r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
- fput(filp);
+ fdput(f);
break;
}
#endif
#ifdef CONFIG_KVM_XICS
case KVM_CAP_IRQ_XICS: {
- struct file *filp;
+ struct fd f;
struct kvm_device *dev;
r = -EBADF;
- filp = fget(cap->args[0]);
- if (!filp)
+ f = fdget(cap->args[0]);
+ if (!f.file)
break;
r = -EPERM;
- dev = kvm_device_from_filp(filp);
+ dev = kvm_device_from_filp(f.file);
if (dev)
r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
- fput(filp);
+ fdput(f);
break;
}
#endif /* CONFIG_KVM_XICS */
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 01e2db97a210..d47d3dab4870 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -52,7 +52,7 @@
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
/* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */
#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
-#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
+#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_KERNEL_START"
#endif
#endif
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 4f51025f5b00..c77348c5d463 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -119,7 +119,7 @@ static void op_powerpc_stop(void)
model->global_stop();
}
-static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
+static int op_powerpc_create_files(struct dentry *root)
{
int i;
@@ -128,9 +128,9 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
* There is one mmcr0, mmcr1 and mmcra for setting the events for
* all of the counters.
*/
- oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
- oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
- oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
+ oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
+ oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
+ oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
#ifdef CONFIG_OPROFILE_CELL
/* create a file the user tool can check to see what level of profiling
* support exits with this kernel. Initialize bit mask to indicate
@@ -142,7 +142,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
* If the file does not exist, then the kernel only supports SPU
* cycle profiling, PPU event and cycle profiling.
*/
- oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support);
+ oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
sys.cell_support = 0x1; /* Note, the user OProfile tool must check
* that this bit is set before attempting to
* user SPU event profiling. Older kernels
@@ -160,11 +160,11 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
char buf[4];
snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
+ dir = oprofilefs_mkdir(root, buf);
- oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
+ oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
+ oprofilefs_create_ulong(dir, "event", &ctr[i].event);
+ oprofilefs_create_ulong(dir, "count", &ctr[i].count);
/*
* Classic PowerPC doesn't support per-counter
@@ -173,14 +173,14 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
* Book-E style performance monitors, we do
* support them.
*/
- oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
+ oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
+ oprofilefs_create_ulong(dir, "user", &ctr[i].user);
- oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
+ oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
}
- oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
- oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
+ oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
+ oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
/* Default to tracing both kernel and user */
sys.enable_kernel = 1;
diff --git a/arch/powerpc/perf/power7-events-list.h b/arch/powerpc/perf/power7-events-list.h
new file mode 100644
index 000000000000..687790a2c0b8
--- /dev/null
+++ b/arch/powerpc/perf/power7-events-list.h
@@ -0,0 +1,548 @@
+/*
+ * Performance counter support for POWER7 processors.
+ *
+ * Copyright 2013 Runzhen Wang, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+EVENT(PM_IC_DEMAND_L2_BR_ALL, 0x04898)
+EVENT(PM_GCT_UTIL_7_TO_10_SLOTS, 0x020a0)
+EVENT(PM_PMC2_SAVED, 0x10022)
+EVENT(PM_CMPLU_STALL_DFU, 0x2003c)
+EVENT(PM_VSU0_16FLOP, 0x0a0a4)
+EVENT(PM_MRK_LSU_DERAT_MISS, 0x3d05a)
+EVENT(PM_MRK_ST_CMPL, 0x10034)
+EVENT(PM_NEST_PAIR3_ADD, 0x40881)
+EVENT(PM_L2_ST_DISP, 0x46180)
+EVENT(PM_L2_CASTOUT_MOD, 0x16180)
+EVENT(PM_ISEG, 0x020a4)
+EVENT(PM_MRK_INST_TIMEO, 0x40034)
+EVENT(PM_L2_RCST_DISP_FAIL_ADDR, 0x36282)
+EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM, 0x0d0b6)
+EVENT(PM_IERAT_WR_64K, 0x040be)
+EVENT(PM_MRK_DTLB_MISS_16M, 0x4d05e)
+EVENT(PM_IERAT_MISS, 0x100f6)
+EVENT(PM_MRK_PTEG_FROM_LMEM, 0x4d052)
+EVENT(PM_FLOP, 0x100f4)
+EVENT(PM_THRD_PRIO_4_5_CYC, 0x040b4)
+EVENT(PM_BR_PRED_TA, 0x040aa)
+EVENT(PM_CMPLU_STALL_FXU, 0x20014)
+EVENT(PM_EXT_INT, 0x200f8)
+EVENT(PM_VSU_FSQRT_FDIV, 0x0a888)
+EVENT(PM_MRK_LD_MISS_EXPOSED_CYC, 0x1003e)
+EVENT(PM_LSU1_LDF, 0x0c086)
+EVENT(PM_IC_WRITE_ALL, 0x0488c)
+EVENT(PM_LSU0_SRQ_STFWD, 0x0c0a0)
+EVENT(PM_PTEG_FROM_RL2L3_MOD, 0x1c052)
+EVENT(PM_MRK_DATA_FROM_L31_SHR, 0x1d04e)
+EVENT(PM_DATA_FROM_L21_MOD, 0x3c046)
+EVENT(PM_VSU1_SCAL_DOUBLE_ISSUED, 0x0b08a)
+EVENT(PM_VSU0_8FLOP, 0x0a0a0)
+EVENT(PM_POWER_EVENT1, 0x1006e)
+EVENT(PM_DISP_CLB_HELD_BAL, 0x02092)
+EVENT(PM_VSU1_2FLOP, 0x0a09a)
+EVENT(PM_LWSYNC_HELD, 0x0209a)
+EVENT(PM_PTEG_FROM_DL2L3_SHR, 0x3c054)
+EVENT(PM_INST_FROM_L21_MOD, 0x34046)
+EVENT(PM_IERAT_XLATE_WR_16MPLUS, 0x040bc)
+EVENT(PM_IC_REQ_ALL, 0x04888)
+EVENT(PM_DSLB_MISS, 0x0d090)
+EVENT(PM_L3_MISS, 0x1f082)
+EVENT(PM_LSU0_L1_PREF, 0x0d0b8)
+EVENT(PM_VSU_SCALAR_SINGLE_ISSUED, 0x0b884)
+EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0be)
+EVENT(PM_L2_INST, 0x36080)
+EVENT(PM_VSU0_FRSP, 0x0a0b4)
+EVENT(PM_FLUSH_DISP, 0x02082)
+EVENT(PM_PTEG_FROM_L2MISS, 0x4c058)
+EVENT(PM_VSU1_DQ_ISSUED, 0x0b09a)
+EVENT(PM_CMPLU_STALL_LSU, 0x20012)
+EVENT(PM_MRK_DATA_FROM_DMEM, 0x1d04a)
+EVENT(PM_LSU_FLUSH_ULD, 0x0c8b0)
+EVENT(PM_PTEG_FROM_LMEM, 0x4c052)
+EVENT(PM_MRK_DERAT_MISS_16M, 0x3d05c)
+EVENT(PM_THRD_ALL_RUN_CYC, 0x2000c)
+EVENT(PM_MEM0_PREFETCH_DISP, 0x20083)
+EVENT(PM_MRK_STALL_CMPLU_CYC_COUNT, 0x3003f)
+EVENT(PM_DATA_FROM_DL2L3_MOD, 0x3c04c)
+EVENT(PM_VSU_FRSP, 0x0a8b4)
+EVENT(PM_MRK_DATA_FROM_L21_MOD, 0x3d046)
+EVENT(PM_PMC1_OVERFLOW, 0x20010)
+EVENT(PM_VSU0_SINGLE, 0x0a0a8)
+EVENT(PM_MRK_PTEG_FROM_L3MISS, 0x2d058)
+EVENT(PM_MRK_PTEG_FROM_L31_SHR, 0x2d056)
+EVENT(PM_VSU0_VECTOR_SP_ISSUED, 0x0b090)
+EVENT(PM_VSU1_FEST, 0x0a0ba)
+EVENT(PM_MRK_INST_DISP, 0x20030)
+EVENT(PM_VSU0_COMPLEX_ISSUED, 0x0b096)
+EVENT(PM_LSU1_FLUSH_UST, 0x0c0b6)
+EVENT(PM_INST_CMPL, 0x00002)
+EVENT(PM_FXU_IDLE, 0x1000e)
+EVENT(PM_LSU0_FLUSH_ULD, 0x0c0b0)
+EVENT(PM_MRK_DATA_FROM_DL2L3_MOD, 0x3d04c)
+EVENT(PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC, 0x3001c)
+EVENT(PM_LSU1_REJECT_LMQ_FULL, 0x0c0a6)
+EVENT(PM_INST_PTEG_FROM_L21_MOD, 0x3e056)
+EVENT(PM_INST_FROM_RL2L3_MOD, 0x14042)
+EVENT(PM_SHL_CREATED, 0x05082)
+EVENT(PM_L2_ST_HIT, 0x46182)
+EVENT(PM_DATA_FROM_DMEM, 0x1c04a)
+EVENT(PM_L3_LD_MISS, 0x2f082)
+EVENT(PM_FXU1_BUSY_FXU0_IDLE, 0x4000e)
+EVENT(PM_DISP_CLB_HELD_RES, 0x02094)
+EVENT(PM_L2_SN_SX_I_DONE, 0x36382)
+EVENT(PM_GRP_CMPL, 0x30004)
+EVENT(PM_STCX_CMPL, 0x0c098)
+EVENT(PM_VSU0_2FLOP, 0x0a098)
+EVENT(PM_L3_PREF_MISS, 0x3f082)
+EVENT(PM_LSU_SRQ_SYNC_CYC, 0x0d096)
+EVENT(PM_LSU_REJECT_ERAT_MISS, 0x20064)
+EVENT(PM_L1_ICACHE_MISS, 0x200fc)
+EVENT(PM_LSU1_FLUSH_SRQ, 0x0c0be)
+EVENT(PM_LD_REF_L1_LSU0, 0x0c080)
+EVENT(PM_VSU0_FEST, 0x0a0b8)
+EVENT(PM_VSU_VECTOR_SINGLE_ISSUED, 0x0b890)
+EVENT(PM_FREQ_UP, 0x4000c)
+EVENT(PM_DATA_FROM_LMEM, 0x3c04a)
+EVENT(PM_LSU1_LDX, 0x0c08a)
+EVENT(PM_PMC3_OVERFLOW, 0x40010)
+EVENT(PM_MRK_BR_MPRED, 0x30036)
+EVENT(PM_SHL_MATCH, 0x05086)
+EVENT(PM_MRK_BR_TAKEN, 0x10036)
+EVENT(PM_CMPLU_STALL_BRU, 0x4004e)
+EVENT(PM_ISLB_MISS, 0x0d092)
+EVENT(PM_CYC, 0x0001e)
+EVENT(PM_DISP_HELD_THERMAL, 0x30006)
+EVENT(PM_INST_PTEG_FROM_RL2L3_SHR, 0x2e054)
+EVENT(PM_LSU1_SRQ_STFWD, 0x0c0a2)
+EVENT(PM_GCT_NOSLOT_BR_MPRED, 0x4001a)
+EVENT(PM_1PLUS_PPC_CMPL, 0x100f2)
+EVENT(PM_PTEG_FROM_DMEM, 0x2c052)
+EVENT(PM_VSU_2FLOP, 0x0a898)
+EVENT(PM_GCT_FULL_CYC, 0x04086)
+EVENT(PM_MRK_DATA_FROM_L3_CYC, 0x40020)
+EVENT(PM_LSU_SRQ_S0_ALLOC, 0x0d09d)
+EVENT(PM_MRK_DERAT_MISS_4K, 0x1d05c)
+EVENT(PM_BR_MPRED_TA, 0x040ae)
+EVENT(PM_INST_PTEG_FROM_L2MISS, 0x4e058)
+EVENT(PM_DPU_HELD_POWER, 0x20006)
+EVENT(PM_RUN_INST_CMPL, 0x400fa)
+EVENT(PM_MRK_VSU_FIN, 0x30032)
+EVENT(PM_LSU_SRQ_S0_VALID, 0x0d09c)
+EVENT(PM_GCT_EMPTY_CYC, 0x20008)
+EVENT(PM_IOPS_DISP, 0x30014)
+EVENT(PM_RUN_SPURR, 0x10008)
+EVENT(PM_PTEG_FROM_L21_MOD, 0x3c056)
+EVENT(PM_VSU0_1FLOP, 0x0a080)
+EVENT(PM_SNOOP_TLBIE, 0x0d0b2)
+EVENT(PM_DATA_FROM_L3MISS, 0x2c048)
+EVENT(PM_VSU_SINGLE, 0x0a8a8)
+EVENT(PM_DTLB_MISS_16G, 0x1c05e)
+EVENT(PM_CMPLU_STALL_VECTOR, 0x2001c)
+EVENT(PM_FLUSH, 0x400f8)
+EVENT(PM_L2_LD_HIT, 0x36182)
+EVENT(PM_NEST_PAIR2_AND, 0x30883)
+EVENT(PM_VSU1_1FLOP, 0x0a082)
+EVENT(PM_IC_PREF_REQ, 0x0408a)
+EVENT(PM_L3_LD_HIT, 0x2f080)
+EVENT(PM_GCT_NOSLOT_IC_MISS, 0x2001a)
+EVENT(PM_DISP_HELD, 0x10006)
+EVENT(PM_L2_LD, 0x16080)
+EVENT(PM_LSU_FLUSH_SRQ, 0x0c8bc)
+EVENT(PM_BC_PLUS_8_CONV, 0x040b8)
+EVENT(PM_MRK_DATA_FROM_L31_MOD_CYC, 0x40026)
+EVENT(PM_CMPLU_STALL_VECTOR_LONG, 0x4004a)
+EVENT(PM_L2_RCST_BUSY_RC_FULL, 0x26282)
+EVENT(PM_TB_BIT_TRANS, 0x300f8)
+EVENT(PM_THERMAL_MAX, 0x40006)
+EVENT(PM_LSU1_FLUSH_ULD, 0x0c0b2)
+EVENT(PM_LSU1_REJECT_LHS, 0x0c0ae)
+EVENT(PM_LSU_LRQ_S0_ALLOC, 0x0d09f)
+EVENT(PM_L3_CO_L31, 0x4f080)
+EVENT(PM_POWER_EVENT4, 0x4006e)
+EVENT(PM_DATA_FROM_L31_SHR, 0x1c04e)
+EVENT(PM_BR_UNCOND, 0x0409e)
+EVENT(PM_LSU1_DC_PREF_STREAM_ALLOC, 0x0d0aa)
+EVENT(PM_PMC4_REWIND, 0x10020)
+EVENT(PM_L2_RCLD_DISP, 0x16280)
+EVENT(PM_THRD_PRIO_2_3_CYC, 0x040b2)
+EVENT(PM_MRK_PTEG_FROM_L2MISS, 0x4d058)
+EVENT(PM_IC_DEMAND_L2_BHT_REDIRECT, 0x04098)
+EVENT(PM_LSU_DERAT_MISS, 0x200f6)
+EVENT(PM_IC_PREF_CANCEL_L2, 0x04094)
+EVENT(PM_MRK_FIN_STALL_CYC_COUNT, 0x1003d)
+EVENT(PM_BR_PRED_CCACHE, 0x040a0)
+EVENT(PM_GCT_UTIL_1_TO_2_SLOTS, 0x0209c)
+EVENT(PM_MRK_ST_CMPL_INT, 0x30034)
+EVENT(PM_LSU_TWO_TABLEWALK_CYC, 0x0d0a6)
+EVENT(PM_MRK_DATA_FROM_L3MISS, 0x2d048)
+EVENT(PM_GCT_NOSLOT_CYC, 0x100f8)
+EVENT(PM_LSU_SET_MPRED, 0x0c0a8)
+EVENT(PM_FLUSH_DISP_TLBIE, 0x0208a)
+EVENT(PM_VSU1_FCONV, 0x0a0b2)
+EVENT(PM_DERAT_MISS_16G, 0x4c05c)
+EVENT(PM_INST_FROM_LMEM, 0x3404a)
+EVENT(PM_IC_DEMAND_L2_BR_REDIRECT, 0x0409a)
+EVENT(PM_CMPLU_STALL_SCALAR_LONG, 0x20018)
+EVENT(PM_INST_PTEG_FROM_L2, 0x1e050)
+EVENT(PM_PTEG_FROM_L2, 0x1c050)
+EVENT(PM_MRK_DATA_FROM_L21_SHR_CYC, 0x20024)
+EVENT(PM_MRK_DTLB_MISS_4K, 0x2d05a)
+EVENT(PM_VSU0_FPSCR, 0x0b09c)
+EVENT(PM_VSU1_VECT_DOUBLE_ISSUED, 0x0b082)
+EVENT(PM_MRK_PTEG_FROM_RL2L3_MOD, 0x1d052)
+EVENT(PM_MEM0_RQ_DISP, 0x10083)
+EVENT(PM_L2_LD_MISS, 0x26080)
+EVENT(PM_VMX_RESULT_SAT_1, 0x0b0a0)
+EVENT(PM_L1_PREF, 0x0d8b8)
+EVENT(PM_MRK_DATA_FROM_LMEM_CYC, 0x2002c)
+EVENT(PM_GRP_IC_MISS_NONSPEC, 0x1000c)
+EVENT(PM_PB_NODE_PUMP, 0x10081)
+EVENT(PM_SHL_MERGED, 0x05084)
+EVENT(PM_NEST_PAIR1_ADD, 0x20881)
+EVENT(PM_DATA_FROM_L3, 0x1c048)
+EVENT(PM_LSU_FLUSH, 0x0208e)
+EVENT(PM_LSU_SRQ_SYNC_COUNT, 0x0d097)
+EVENT(PM_PMC2_OVERFLOW, 0x30010)
+EVENT(PM_LSU_LDF, 0x0c884)
+EVENT(PM_POWER_EVENT3, 0x3006e)
+EVENT(PM_DISP_WT, 0x30008)
+EVENT(PM_CMPLU_STALL_REJECT, 0x40016)
+EVENT(PM_IC_BANK_CONFLICT, 0x04082)
+EVENT(PM_BR_MPRED_CR_TA, 0x048ae)
+EVENT(PM_L2_INST_MISS, 0x36082)
+EVENT(PM_CMPLU_STALL_ERAT_MISS, 0x40018)
+EVENT(PM_NEST_PAIR2_ADD, 0x30881)
+EVENT(PM_MRK_LSU_FLUSH, 0x0d08c)
+EVENT(PM_L2_LDST, 0x16880)
+EVENT(PM_INST_FROM_L31_SHR, 0x1404e)
+EVENT(PM_VSU0_FIN, 0x0a0bc)
+EVENT(PM_LARX_LSU, 0x0c894)
+EVENT(PM_INST_FROM_RMEM, 0x34042)
+EVENT(PM_DISP_CLB_HELD_TLBIE, 0x02096)
+EVENT(PM_MRK_DATA_FROM_DMEM_CYC, 0x2002e)
+EVENT(PM_BR_PRED_CR, 0x040a8)
+EVENT(PM_LSU_REJECT, 0x10064)
+EVENT(PM_GCT_UTIL_3_TO_6_SLOTS, 0x0209e)
+EVENT(PM_CMPLU_STALL_END_GCT_NOSLOT, 0x10028)
+EVENT(PM_LSU0_REJECT_LMQ_FULL, 0x0c0a4)
+EVENT(PM_VSU_FEST, 0x0a8b8)
+EVENT(PM_NEST_PAIR0_AND, 0x10883)
+EVENT(PM_PTEG_FROM_L3, 0x2c050)
+EVENT(PM_POWER_EVENT2, 0x2006e)
+EVENT(PM_IC_PREF_CANCEL_PAGE, 0x04090)
+EVENT(PM_VSU0_FSQRT_FDIV, 0x0a088)
+EVENT(PM_MRK_GRP_CMPL, 0x40030)
+EVENT(PM_VSU0_SCAL_DOUBLE_ISSUED, 0x0b088)
+EVENT(PM_GRP_DISP, 0x3000a)
+EVENT(PM_LSU0_LDX, 0x0c088)
+EVENT(PM_DATA_FROM_L2, 0x1c040)
+EVENT(PM_MRK_DATA_FROM_RL2L3_MOD, 0x1d042)
+EVENT(PM_LD_REF_L1, 0x0c880)
+EVENT(PM_VSU0_VECT_DOUBLE_ISSUED, 0x0b080)
+EVENT(PM_VSU1_2FLOP_DOUBLE, 0x0a08e)
+EVENT(PM_THRD_PRIO_6_7_CYC, 0x040b6)
+EVENT(PM_BC_PLUS_8_RSLV_TAKEN, 0x040ba)
+EVENT(PM_BR_MPRED_CR, 0x040ac)
+EVENT(PM_L3_CO_MEM, 0x4f082)
+EVENT(PM_LD_MISS_L1, 0x400f0)
+EVENT(PM_DATA_FROM_RL2L3_MOD, 0x1c042)
+EVENT(PM_LSU_SRQ_FULL_CYC, 0x1001a)
+EVENT(PM_TABLEWALK_CYC, 0x10026)
+EVENT(PM_MRK_PTEG_FROM_RMEM, 0x3d052)
+EVENT(PM_LSU_SRQ_STFWD, 0x0c8a0)
+EVENT(PM_INST_PTEG_FROM_RMEM, 0x3e052)
+EVENT(PM_FXU0_FIN, 0x10004)
+EVENT(PM_LSU1_L1_SW_PREF, 0x0c09e)
+EVENT(PM_PTEG_FROM_L31_MOD, 0x1c054)
+EVENT(PM_PMC5_OVERFLOW, 0x10024)
+EVENT(PM_LD_REF_L1_LSU1, 0x0c082)
+EVENT(PM_INST_PTEG_FROM_L21_SHR, 0x4e056)
+EVENT(PM_CMPLU_STALL_THRD, 0x1001c)
+EVENT(PM_DATA_FROM_RMEM, 0x3c042)
+EVENT(PM_VSU0_SCAL_SINGLE_ISSUED, 0x0b084)
+EVENT(PM_BR_MPRED_LSTACK, 0x040a6)
+EVENT(PM_MRK_DATA_FROM_RL2L3_MOD_CYC, 0x40028)
+EVENT(PM_LSU0_FLUSH_UST, 0x0c0b4)
+EVENT(PM_LSU_NCST, 0x0c090)
+EVENT(PM_BR_TAKEN, 0x20004)
+EVENT(PM_INST_PTEG_FROM_LMEM, 0x4e052)
+EVENT(PM_GCT_NOSLOT_BR_MPRED_IC_MISS, 0x4001c)
+EVENT(PM_DTLB_MISS_4K, 0x2c05a)
+EVENT(PM_PMC4_SAVED, 0x30022)
+EVENT(PM_VSU1_PERMUTE_ISSUED, 0x0b092)
+EVENT(PM_SLB_MISS, 0x0d890)
+EVENT(PM_LSU1_FLUSH_LRQ, 0x0c0ba)
+EVENT(PM_DTLB_MISS, 0x300fc)
+EVENT(PM_VSU1_FRSP, 0x0a0b6)
+EVENT(PM_VSU_VECTOR_DOUBLE_ISSUED, 0x0b880)
+EVENT(PM_L2_CASTOUT_SHR, 0x16182)
+EVENT(PM_DATA_FROM_DL2L3_SHR, 0x3c044)
+EVENT(PM_VSU1_STF, 0x0b08e)
+EVENT(PM_ST_FIN, 0x200f0)
+EVENT(PM_PTEG_FROM_L21_SHR, 0x4c056)
+EVENT(PM_L2_LOC_GUESS_WRONG, 0x26480)
+EVENT(PM_MRK_STCX_FAIL, 0x0d08e)
+EVENT(PM_LSU0_REJECT_LHS, 0x0c0ac)
+EVENT(PM_IC_PREF_CANCEL_HIT, 0x04092)
+EVENT(PM_L3_PREF_BUSY, 0x4f080)
+EVENT(PM_MRK_BRU_FIN, 0x2003a)
+EVENT(PM_LSU1_NCLD, 0x0c08e)
+EVENT(PM_INST_PTEG_FROM_L31_MOD, 0x1e054)
+EVENT(PM_LSU_NCLD, 0x0c88c)
+EVENT(PM_LSU_LDX, 0x0c888)
+EVENT(PM_L2_LOC_GUESS_CORRECT, 0x16480)
+EVENT(PM_THRESH_TIMEO, 0x10038)
+EVENT(PM_L3_PREF_ST, 0x0d0ae)
+EVENT(PM_DISP_CLB_HELD_SYNC, 0x02098)
+EVENT(PM_VSU_SIMPLE_ISSUED, 0x0b894)
+EVENT(PM_VSU1_SINGLE, 0x0a0aa)
+EVENT(PM_DATA_TABLEWALK_CYC, 0x3001a)
+EVENT(PM_L2_RC_ST_DONE, 0x36380)
+EVENT(PM_MRK_PTEG_FROM_L21_MOD, 0x3d056)
+EVENT(PM_LARX_LSU1, 0x0c096)
+EVENT(PM_MRK_DATA_FROM_RMEM, 0x3d042)
+EVENT(PM_DISP_CLB_HELD, 0x02090)
+EVENT(PM_DERAT_MISS_4K, 0x1c05c)
+EVENT(PM_L2_RCLD_DISP_FAIL_ADDR, 0x16282)
+EVENT(PM_SEG_EXCEPTION, 0x028a4)
+EVENT(PM_FLUSH_DISP_SB, 0x0208c)
+EVENT(PM_L2_DC_INV, 0x26182)
+EVENT(PM_PTEG_FROM_DL2L3_MOD, 0x4c054)
+EVENT(PM_DSEG, 0x020a6)
+EVENT(PM_BR_PRED_LSTACK, 0x040a2)
+EVENT(PM_VSU0_STF, 0x0b08c)
+EVENT(PM_LSU_FX_FIN, 0x10066)
+EVENT(PM_DERAT_MISS_16M, 0x3c05c)
+EVENT(PM_MRK_PTEG_FROM_DL2L3_MOD, 0x4d054)
+EVENT(PM_GCT_UTIL_11_PLUS_SLOTS, 0x020a2)
+EVENT(PM_INST_FROM_L3, 0x14048)
+EVENT(PM_MRK_IFU_FIN, 0x3003a)
+EVENT(PM_ITLB_MISS, 0x400fc)
+EVENT(PM_VSU_STF, 0x0b88c)
+EVENT(PM_LSU_FLUSH_UST, 0x0c8b4)
+EVENT(PM_L2_LDST_MISS, 0x26880)
+EVENT(PM_FXU1_FIN, 0x40004)
+EVENT(PM_SHL_DEALLOCATED, 0x05080)
+EVENT(PM_L2_SN_M_WR_DONE, 0x46382)
+EVENT(PM_LSU_REJECT_SET_MPRED, 0x0c8a8)
+EVENT(PM_L3_PREF_LD, 0x0d0ac)
+EVENT(PM_L2_SN_M_RD_DONE, 0x46380)
+EVENT(PM_MRK_DERAT_MISS_16G, 0x4d05c)
+EVENT(PM_VSU_FCONV, 0x0a8b0)
+EVENT(PM_ANY_THRD_RUN_CYC, 0x100fa)
+EVENT(PM_LSU_LMQ_FULL_CYC, 0x0d0a4)
+EVENT(PM_MRK_LSU_REJECT_LHS, 0x0d082)
+EVENT(PM_MRK_LD_MISS_L1_CYC, 0x4003e)
+EVENT(PM_MRK_DATA_FROM_L2_CYC, 0x20020)
+EVENT(PM_INST_IMC_MATCH_DISP, 0x30016)
+EVENT(PM_MRK_DATA_FROM_RMEM_CYC, 0x4002c)
+EVENT(PM_VSU0_SIMPLE_ISSUED, 0x0b094)
+EVENT(PM_CMPLU_STALL_DIV, 0x40014)
+EVENT(PM_MRK_PTEG_FROM_RL2L3_SHR, 0x2d054)
+EVENT(PM_VSU_FMA_DOUBLE, 0x0a890)
+EVENT(PM_VSU_4FLOP, 0x0a89c)
+EVENT(PM_VSU1_FIN, 0x0a0be)
+EVENT(PM_NEST_PAIR1_AND, 0x20883)
+EVENT(PM_INST_PTEG_FROM_RL2L3_MOD, 0x1e052)
+EVENT(PM_RUN_CYC, 0x200f4)
+EVENT(PM_PTEG_FROM_RMEM, 0x3c052)
+EVENT(PM_LSU_LRQ_S0_VALID, 0x0d09e)
+EVENT(PM_LSU0_LDF, 0x0c084)
+EVENT(PM_FLUSH_COMPLETION, 0x30012)
+EVENT(PM_ST_MISS_L1, 0x300f0)
+EVENT(PM_L2_NODE_PUMP, 0x36480)
+EVENT(PM_INST_FROM_DL2L3_SHR, 0x34044)
+EVENT(PM_MRK_STALL_CMPLU_CYC, 0x3003e)
+EVENT(PM_VSU1_DENORM, 0x0a0ae)
+EVENT(PM_MRK_DATA_FROM_L31_SHR_CYC, 0x20026)
+EVENT(PM_NEST_PAIR0_ADD, 0x10881)
+EVENT(PM_INST_FROM_L3MISS, 0x24048)
+EVENT(PM_EE_OFF_EXT_INT, 0x02080)
+EVENT(PM_INST_PTEG_FROM_DMEM, 0x2e052)
+EVENT(PM_INST_FROM_DL2L3_MOD, 0x3404c)
+EVENT(PM_PMC6_OVERFLOW, 0x30024)
+EVENT(PM_VSU_2FLOP_DOUBLE, 0x0a88c)
+EVENT(PM_TLB_MISS, 0x20066)
+EVENT(PM_FXU_BUSY, 0x2000e)
+EVENT(PM_L2_RCLD_DISP_FAIL_OTHER, 0x26280)
+EVENT(PM_LSU_REJECT_LMQ_FULL, 0x0c8a4)
+EVENT(PM_IC_RELOAD_SHR, 0x04096)
+EVENT(PM_GRP_MRK, 0x10031)
+EVENT(PM_MRK_ST_NEST, 0x20034)
+EVENT(PM_VSU1_FSQRT_FDIV, 0x0a08a)
+EVENT(PM_LSU0_FLUSH_LRQ, 0x0c0b8)
+EVENT(PM_LARX_LSU0, 0x0c094)
+EVENT(PM_IBUF_FULL_CYC, 0x04084)
+EVENT(PM_MRK_DATA_FROM_DL2L3_SHR_CYC, 0x2002a)
+EVENT(PM_LSU_DC_PREF_STREAM_ALLOC, 0x0d8a8)
+EVENT(PM_GRP_MRK_CYC, 0x10030)
+EVENT(PM_MRK_DATA_FROM_RL2L3_SHR_CYC, 0x20028)
+EVENT(PM_L2_GLOB_GUESS_CORRECT, 0x16482)
+EVENT(PM_LSU_REJECT_LHS, 0x0c8ac)
+EVENT(PM_MRK_DATA_FROM_LMEM, 0x3d04a)
+EVENT(PM_INST_PTEG_FROM_L3, 0x2e050)
+EVENT(PM_FREQ_DOWN, 0x3000c)
+EVENT(PM_PB_RETRY_NODE_PUMP, 0x30081)
+EVENT(PM_INST_FROM_RL2L3_SHR, 0x1404c)
+EVENT(PM_MRK_INST_ISSUED, 0x10032)
+EVENT(PM_PTEG_FROM_L3MISS, 0x2c058)
+EVENT(PM_RUN_PURR, 0x400f4)
+EVENT(PM_MRK_GRP_IC_MISS, 0x40038)
+EVENT(PM_MRK_DATA_FROM_L3, 0x1d048)
+EVENT(PM_CMPLU_STALL_DCACHE_MISS, 0x20016)
+EVENT(PM_PTEG_FROM_RL2L3_SHR, 0x2c054)
+EVENT(PM_LSU_FLUSH_LRQ, 0x0c8b8)
+EVENT(PM_MRK_DERAT_MISS_64K, 0x2d05c)
+EVENT(PM_INST_PTEG_FROM_DL2L3_MOD, 0x4e054)
+EVENT(PM_L2_ST_MISS, 0x26082)
+EVENT(PM_MRK_PTEG_FROM_L21_SHR, 0x4d056)
+EVENT(PM_LWSYNC, 0x0d094)
+EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0bc)
+EVENT(PM_MRK_LSU_FLUSH_LRQ, 0x0d088)
+EVENT(PM_INST_IMC_MATCH_CMPL, 0x100f0)
+EVENT(PM_NEST_PAIR3_AND, 0x40883)
+EVENT(PM_PB_RETRY_SYS_PUMP, 0x40081)
+EVENT(PM_MRK_INST_FIN, 0x30030)
+EVENT(PM_MRK_PTEG_FROM_DL2L3_SHR, 0x3d054)
+EVENT(PM_INST_FROM_L31_MOD, 0x14044)
+EVENT(PM_MRK_DTLB_MISS_64K, 0x3d05e)
+EVENT(PM_LSU_FIN, 0x30066)
+EVENT(PM_MRK_LSU_REJECT, 0x40064)
+EVENT(PM_L2_CO_FAIL_BUSY, 0x16382)
+EVENT(PM_MEM0_WQ_DISP, 0x40083)
+EVENT(PM_DATA_FROM_L31_MOD, 0x1c044)
+EVENT(PM_THERMAL_WARN, 0x10016)
+EVENT(PM_VSU0_4FLOP, 0x0a09c)
+EVENT(PM_BR_MPRED_CCACHE, 0x040a4)
+EVENT(PM_CMPLU_STALL_IFU, 0x4004c)
+EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
+EVENT(PM_FLUSH_BR_MPRED, 0x02084)
+EVENT(PM_MRK_DTLB_MISS_16G, 0x1d05e)
+EVENT(PM_MRK_PTEG_FROM_DMEM, 0x2d052)
+EVENT(PM_L2_RCST_DISP, 0x36280)
+EVENT(PM_CMPLU_STALL, 0x4000a)
+EVENT(PM_LSU_PARTIAL_CDF, 0x0c0aa)
+EVENT(PM_DISP_CLB_HELD_SB, 0x020a8)
+EVENT(PM_VSU0_FMA_DOUBLE, 0x0a090)
+EVENT(PM_FXU0_BUSY_FXU1_IDLE, 0x3000e)
+EVENT(PM_IC_DEMAND_CYC, 0x10018)
+EVENT(PM_MRK_DATA_FROM_L21_SHR, 0x3d04e)
+EVENT(PM_MRK_LSU_FLUSH_UST, 0x0d086)
+EVENT(PM_INST_PTEG_FROM_L3MISS, 0x2e058)
+EVENT(PM_VSU_DENORM, 0x0a8ac)
+EVENT(PM_MRK_LSU_PARTIAL_CDF, 0x0d080)
+EVENT(PM_INST_FROM_L21_SHR, 0x3404e)
+EVENT(PM_IC_PREF_WRITE, 0x0408e)
+EVENT(PM_BR_PRED, 0x0409c)
+EVENT(PM_INST_FROM_DMEM, 0x1404a)
+EVENT(PM_IC_PREF_CANCEL_ALL, 0x04890)
+EVENT(PM_LSU_DC_PREF_STREAM_CONFIRM, 0x0d8b4)
+EVENT(PM_MRK_LSU_FLUSH_SRQ, 0x0d08a)
+EVENT(PM_MRK_FIN_STALL_CYC, 0x1003c)
+EVENT(PM_L2_RCST_DISP_FAIL_OTHER, 0x46280)
+EVENT(PM_VSU1_DD_ISSUED, 0x0b098)
+EVENT(PM_PTEG_FROM_L31_SHR, 0x2c056)
+EVENT(PM_DATA_FROM_L21_SHR, 0x3c04e)
+EVENT(PM_LSU0_NCLD, 0x0c08c)
+EVENT(PM_VSU1_4FLOP, 0x0a09e)
+EVENT(PM_VSU1_8FLOP, 0x0a0a2)
+EVENT(PM_VSU_8FLOP, 0x0a8a0)
+EVENT(PM_LSU_LMQ_SRQ_EMPTY_CYC, 0x2003e)
+EVENT(PM_DTLB_MISS_64K, 0x3c05e)
+EVENT(PM_THRD_CONC_RUN_INST, 0x300f4)
+EVENT(PM_MRK_PTEG_FROM_L2, 0x1d050)
+EVENT(PM_PB_SYS_PUMP, 0x20081)
+EVENT(PM_VSU_FIN, 0x0a8bc)
+EVENT(PM_MRK_DATA_FROM_L31_MOD, 0x1d044)
+EVENT(PM_THRD_PRIO_0_1_CYC, 0x040b0)
+EVENT(PM_DERAT_MISS_64K, 0x2c05c)
+EVENT(PM_PMC2_REWIND, 0x30020)
+EVENT(PM_INST_FROM_L2, 0x14040)
+EVENT(PM_GRP_BR_MPRED_NONSPEC, 0x1000a)
+EVENT(PM_INST_DISP, 0x200f2)
+EVENT(PM_MEM0_RD_CANCEL_TOTAL, 0x30083)
+EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM, 0x0d0b4)
+EVENT(PM_L1_DCACHE_RELOAD_VALID, 0x300f6)
+EVENT(PM_VSU_SCALAR_DOUBLE_ISSUED, 0x0b888)
+EVENT(PM_L3_PREF_HIT, 0x3f080)
+EVENT(PM_MRK_PTEG_FROM_L31_MOD, 0x1d054)
+EVENT(PM_CMPLU_STALL_STORE, 0x2004a)
+EVENT(PM_MRK_FXU_FIN, 0x20038)
+EVENT(PM_PMC4_OVERFLOW, 0x10010)
+EVENT(PM_MRK_PTEG_FROM_L3, 0x2d050)
+EVENT(PM_LSU0_LMQ_LHR_MERGE, 0x0d098)
+EVENT(PM_BTAC_HIT, 0x0508a)
+EVENT(PM_L3_RD_BUSY, 0x4f082)
+EVENT(PM_LSU0_L1_SW_PREF, 0x0c09c)
+EVENT(PM_INST_FROM_L2MISS, 0x44048)
+EVENT(PM_LSU0_DC_PREF_STREAM_ALLOC, 0x0d0a8)
+EVENT(PM_L2_ST, 0x16082)
+EVENT(PM_VSU0_DENORM, 0x0a0ac)
+EVENT(PM_MRK_DATA_FROM_DL2L3_SHR, 0x3d044)
+EVENT(PM_BR_PRED_CR_TA, 0x048aa)
+EVENT(PM_VSU0_FCONV, 0x0a0b0)
+EVENT(PM_MRK_LSU_FLUSH_ULD, 0x0d084)
+EVENT(PM_BTAC_MISS, 0x05088)
+EVENT(PM_MRK_LD_MISS_EXPOSED_CYC_COUNT, 0x1003f)
+EVENT(PM_MRK_DATA_FROM_L2, 0x1d040)
+EVENT(PM_LSU_DCACHE_RELOAD_VALID, 0x0d0a2)
+EVENT(PM_VSU_FMA, 0x0a884)
+EVENT(PM_LSU0_FLUSH_SRQ, 0x0c0bc)
+EVENT(PM_LSU1_L1_PREF, 0x0d0ba)
+EVENT(PM_IOPS_CMPL, 0x10014)
+EVENT(PM_L2_SYS_PUMP, 0x36482)
+EVENT(PM_L2_RCLD_BUSY_RC_FULL, 0x46282)
+EVENT(PM_LSU_LMQ_S0_ALLOC, 0x0d0a1)
+EVENT(PM_FLUSH_DISP_SYNC, 0x02088)
+EVENT(PM_MRK_DATA_FROM_DL2L3_MOD_CYC, 0x4002a)
+EVENT(PM_L2_IC_INV, 0x26180)
+EVENT(PM_MRK_DATA_FROM_L21_MOD_CYC, 0x40024)
+EVENT(PM_L3_PREF_LDST, 0x0d8ac)
+EVENT(PM_LSU_SRQ_EMPTY_CYC, 0x40008)
+EVENT(PM_LSU_LMQ_S0_VALID, 0x0d0a0)
+EVENT(PM_FLUSH_PARTIAL, 0x02086)
+EVENT(PM_VSU1_FMA_DOUBLE, 0x0a092)
+EVENT(PM_1PLUS_PPC_DISP, 0x400f2)
+EVENT(PM_DATA_FROM_L2MISS, 0x200fe)
+EVENT(PM_SUSPENDED, 0x00000)
+EVENT(PM_VSU0_FMA, 0x0a084)
+EVENT(PM_CMPLU_STALL_SCALAR, 0x40012)
+EVENT(PM_STCX_FAIL, 0x0c09a)
+EVENT(PM_VSU0_FSQRT_FDIV_DOUBLE, 0x0a094)
+EVENT(PM_DC_PREF_DST, 0x0d0b0)
+EVENT(PM_VSU1_SCAL_SINGLE_ISSUED, 0x0b086)
+EVENT(PM_L3_HIT, 0x1f080)
+EVENT(PM_L2_GLOB_GUESS_WRONG, 0x26482)
+EVENT(PM_MRK_DFU_FIN, 0x20032)
+EVENT(PM_INST_FROM_L1, 0x04080)
+EVENT(PM_BRU_FIN, 0x10068)
+EVENT(PM_IC_DEMAND_REQ, 0x04088)
+EVENT(PM_VSU1_FSQRT_FDIV_DOUBLE, 0x0a096)
+EVENT(PM_VSU1_FMA, 0x0a086)
+EVENT(PM_MRK_LD_MISS_L1, 0x20036)
+EVENT(PM_VSU0_2FLOP_DOUBLE, 0x0a08c)
+EVENT(PM_LSU_DC_PREF_STRIDED_STREAM_CONFIRM, 0x0d8bc)
+EVENT(PM_INST_PTEG_FROM_L31_SHR, 0x2e056)
+EVENT(PM_MRK_LSU_REJECT_ERAT_MISS, 0x30064)
+EVENT(PM_MRK_DATA_FROM_L2MISS, 0x4d048)
+EVENT(PM_DATA_FROM_RL2L3_SHR, 0x1c04c)
+EVENT(PM_INST_FROM_PREF, 0x14046)
+EVENT(PM_VSU1_SQ, 0x0b09e)
+EVENT(PM_L2_LD_DISP, 0x36180)
+EVENT(PM_L2_DISP_ALL, 0x46080)
+EVENT(PM_THRD_GRP_CMPL_BOTH_CYC, 0x10012)
+EVENT(PM_VSU_FSQRT_FDIV_DOUBLE, 0x0a894)
+EVENT(PM_BR_MPRED, 0x400f6)
+EVENT(PM_INST_PTEG_FROM_DL2L3_SHR, 0x3e054)
+EVENT(PM_VSU_1FLOP, 0x0a880)
+EVENT(PM_HV_CYC, 0x2000a)
+EVENT(PM_MRK_LSU_FIN, 0x40032)
+EVENT(PM_MRK_DATA_FROM_RL2L3_SHR, 0x1d04c)
+EVENT(PM_DTLB_MISS_16M, 0x4c05e)
+EVENT(PM_LSU1_LMQ_LHR_MERGE, 0x0d09a)
+EVENT(PM_IFU_FIN, 0x40066)
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index d1821b8bbc4c..56c67bca2f75 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -53,37 +53,13 @@
/*
* Power7 event codes.
*/
-#define PME_PM_CYC 0x1e
-#define PME_PM_GCT_NOSLOT_CYC 0x100f8
-#define PME_PM_CMPLU_STALL 0x4000a
-#define PME_PM_INST_CMPL 0x2
-#define PME_PM_LD_REF_L1 0xc880
-#define PME_PM_LD_MISS_L1 0x400f0
-#define PME_PM_BRU_FIN 0x10068
-#define PME_PM_BR_MPRED 0x400f6
-
-#define PME_PM_CMPLU_STALL_FXU 0x20014
-#define PME_PM_CMPLU_STALL_DIV 0x40014
-#define PME_PM_CMPLU_STALL_SCALAR 0x40012
-#define PME_PM_CMPLU_STALL_SCALAR_LONG 0x20018
-#define PME_PM_CMPLU_STALL_VECTOR 0x2001c
-#define PME_PM_CMPLU_STALL_VECTOR_LONG 0x4004a
-#define PME_PM_CMPLU_STALL_LSU 0x20012
-#define PME_PM_CMPLU_STALL_REJECT 0x40016
-#define PME_PM_CMPLU_STALL_ERAT_MISS 0x40018
-#define PME_PM_CMPLU_STALL_DCACHE_MISS 0x20016
-#define PME_PM_CMPLU_STALL_STORE 0x2004a
-#define PME_PM_CMPLU_STALL_THRD 0x1001c
-#define PME_PM_CMPLU_STALL_IFU 0x4004c
-#define PME_PM_CMPLU_STALL_BRU 0x4004e
-#define PME_PM_GCT_NOSLOT_IC_MISS 0x2001a
-#define PME_PM_GCT_NOSLOT_BR_MPRED 0x4001a
-#define PME_PM_GCT_NOSLOT_BR_MPRED_IC_MISS 0x4001c
-#define PME_PM_GRP_CMPL 0x30004
-#define PME_PM_1PLUS_PPC_CMPL 0x100f2
-#define PME_PM_CMPLU_STALL_DFU 0x2003c
-#define PME_PM_RUN_CYC 0x200f4
-#define PME_PM_RUN_INST_CMPL 0x400fa
+#define EVENT(_name, _code) \
+ PME_##_name = _code,
+
+enum {
+#include "power7-events-list.h"
+};
+#undef EVENT
/*
* Layout of constraint bits:
@@ -398,96 +374,36 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
};
-GENERIC_EVENT_ATTR(cpu-cycles, CYC);
-GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC);
-GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL);
-GENERIC_EVENT_ATTR(instructions, INST_CMPL);
-GENERIC_EVENT_ATTR(cache-references, LD_REF_L1);
-GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1);
-GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN);
-GENERIC_EVENT_ATTR(branch-misses, BR_MPRED);
-
-POWER_EVENT_ATTR(CYC, CYC);
-POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC);
-POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL);
-POWER_EVENT_ATTR(INST_CMPL, INST_CMPL);
-POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1);
-POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1);
-POWER_EVENT_ATTR(BRU_FIN, BRU_FIN)
-POWER_EVENT_ATTR(BR_MPRED, BR_MPRED);
-
-POWER_EVENT_ATTR(CMPLU_STALL_FXU, CMPLU_STALL_FXU);
-POWER_EVENT_ATTR(CMPLU_STALL_DIV, CMPLU_STALL_DIV);
-POWER_EVENT_ATTR(CMPLU_STALL_SCALAR, CMPLU_STALL_SCALAR);
-POWER_EVENT_ATTR(CMPLU_STALL_SCALAR_LONG, CMPLU_STALL_SCALAR_LONG);
-POWER_EVENT_ATTR(CMPLU_STALL_VECTOR, CMPLU_STALL_VECTOR);
-POWER_EVENT_ATTR(CMPLU_STALL_VECTOR_LONG, CMPLU_STALL_VECTOR_LONG);
-POWER_EVENT_ATTR(CMPLU_STALL_LSU, CMPLU_STALL_LSU);
-POWER_EVENT_ATTR(CMPLU_STALL_REJECT, CMPLU_STALL_REJECT);
-
-POWER_EVENT_ATTR(CMPLU_STALL_ERAT_MISS, CMPLU_STALL_ERAT_MISS);
-POWER_EVENT_ATTR(CMPLU_STALL_DCACHE_MISS, CMPLU_STALL_DCACHE_MISS);
-POWER_EVENT_ATTR(CMPLU_STALL_STORE, CMPLU_STALL_STORE);
-POWER_EVENT_ATTR(CMPLU_STALL_THRD, CMPLU_STALL_THRD);
-POWER_EVENT_ATTR(CMPLU_STALL_IFU, CMPLU_STALL_IFU);
-POWER_EVENT_ATTR(CMPLU_STALL_BRU, CMPLU_STALL_BRU);
-POWER_EVENT_ATTR(GCT_NOSLOT_IC_MISS, GCT_NOSLOT_IC_MISS);
-
-POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED, GCT_NOSLOT_BR_MPRED);
-POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED_IC_MISS, GCT_NOSLOT_BR_MPRED_IC_MISS);
-POWER_EVENT_ATTR(GRP_CMPL, GRP_CMPL);
-POWER_EVENT_ATTR(1PLUS_PPC_CMPL, 1PLUS_PPC_CMPL);
-POWER_EVENT_ATTR(CMPLU_STALL_DFU, CMPLU_STALL_DFU);
-POWER_EVENT_ATTR(RUN_CYC, RUN_CYC);
-POWER_EVENT_ATTR(RUN_INST_CMPL, RUN_INST_CMPL);
+GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
+GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED);
+
+#define EVENT(_name, _code) POWER_EVENT_ATTR(_name, _name);
+#include "power7-events-list.h"
+#undef EVENT
+
+#define EVENT(_name, _code) POWER_EVENT_PTR(_name),
static struct attribute *power7_events_attr[] = {
- GENERIC_EVENT_PTR(CYC),
- GENERIC_EVENT_PTR(GCT_NOSLOT_CYC),
- GENERIC_EVENT_PTR(CMPLU_STALL),
- GENERIC_EVENT_PTR(INST_CMPL),
- GENERIC_EVENT_PTR(LD_REF_L1),
- GENERIC_EVENT_PTR(LD_MISS_L1),
- GENERIC_EVENT_PTR(BRU_FIN),
- GENERIC_EVENT_PTR(BR_MPRED),
-
- POWER_EVENT_PTR(CYC),
- POWER_EVENT_PTR(GCT_NOSLOT_CYC),
- POWER_EVENT_PTR(CMPLU_STALL),
- POWER_EVENT_PTR(INST_CMPL),
- POWER_EVENT_PTR(LD_REF_L1),
- POWER_EVENT_PTR(LD_MISS_L1),
- POWER_EVENT_PTR(BRU_FIN),
- POWER_EVENT_PTR(BR_MPRED),
-
- POWER_EVENT_PTR(CMPLU_STALL_FXU),
- POWER_EVENT_PTR(CMPLU_STALL_DIV),
- POWER_EVENT_PTR(CMPLU_STALL_SCALAR),
- POWER_EVENT_PTR(CMPLU_STALL_SCALAR_LONG),
- POWER_EVENT_PTR(CMPLU_STALL_VECTOR),
- POWER_EVENT_PTR(CMPLU_STALL_VECTOR_LONG),
- POWER_EVENT_PTR(CMPLU_STALL_LSU),
- POWER_EVENT_PTR(CMPLU_STALL_REJECT),
-
- POWER_EVENT_PTR(CMPLU_STALL_ERAT_MISS),
- POWER_EVENT_PTR(CMPLU_STALL_DCACHE_MISS),
- POWER_EVENT_PTR(CMPLU_STALL_STORE),
- POWER_EVENT_PTR(CMPLU_STALL_THRD),
- POWER_EVENT_PTR(CMPLU_STALL_IFU),
- POWER_EVENT_PTR(CMPLU_STALL_BRU),
- POWER_EVENT_PTR(GCT_NOSLOT_IC_MISS),
- POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED),
-
- POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED_IC_MISS),
- POWER_EVENT_PTR(GRP_CMPL),
- POWER_EVENT_PTR(1PLUS_PPC_CMPL),
- POWER_EVENT_PTR(CMPLU_STALL_DFU),
- POWER_EVENT_PTR(RUN_CYC),
- POWER_EVENT_PTR(RUN_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
+ GENERIC_EVENT_PTR(PM_CMPLU_STALL),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_REF_L1),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+ GENERIC_EVENT_PTR(PM_BRU_FIN),
+ GENERIC_EVENT_PTR(PM_BR_MPRED),
+
+ #include "power7-events-list.h"
+ #undef EVENT
NULL
};
-
static struct attribute_group power7_pmu_events_group = {
.name = "events",
.attrs = power7_events_attr,
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 4cfa49901c02..534574a97ec9 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of_gpio.h>
-#include <linux/of_i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
index cba1e6be68e5..ce73ce865613 100644
--- a/arch/powerpc/platforms/ps3/time.c
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -90,7 +90,7 @@ static int __init ps3_rtc_init(void)
pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
- return PTR_RET(pdev);
+ return PTR_ERR_OR_ZERO(pdev);
}
module_init(ps3_rtc_init);
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 6a5f2b1f32ca..d276cd3edd8f 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -539,36 +539,6 @@ static int zip_oops(size_t text_len)
}
#ifdef CONFIG_PSTORE
-/* Derived from logfs_uncompress */
-int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen)
-{
- int err, ret;
-
- ret = -EIO;
- err = zlib_inflateInit(&stream);
- if (err != Z_OK)
- goto error;
-
- stream.next_in = in;
- stream.avail_in = inlen;
- stream.total_in = 0;
- stream.next_out = out;
- stream.avail_out = outlen;
- stream.total_out = 0;
-
- err = zlib_inflate(&stream, Z_FINISH);
- if (err != Z_STREAM_END)
- goto error;
-
- err = zlib_inflateEnd(&stream);
- if (err != Z_OK)
- goto error;
-
- ret = stream.total_out;
-error:
- return ret;
-}
-
static int nvram_pstore_open(struct pstore_info *psi)
{
/* Reset the iterator to start reading partitions again */
@@ -584,7 +554,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
* @part: pstore writes data to registered buffer in parts,
* part number will indicate the same.
* @count: Indicates oops count
- * @hsize: Size of header added by pstore
+ * @compressed: Flag to indicate the log is compressed
* @size: number of bytes written to the registered buffer
* @psi: registered pstore_info structure
*
@@ -595,7 +565,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
static int nvram_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason,
u64 *id, unsigned int part, int count,
- size_t hsize, size_t size,
+ bool compressed, size_t size,
struct pstore_info *psi)
{
int rc;
@@ -611,30 +581,11 @@ static int nvram_pstore_write(enum pstore_type_id type,
oops_hdr->report_length = (u16) size;
oops_hdr->timestamp = get_seconds();
- if (big_oops_buf) {
- rc = zip_oops(size);
- /*
- * If compression fails copy recent log messages from
- * big_oops_buf to oops_data.
- */
- if (rc != 0) {
- size_t diff = size - oops_data_sz + hsize;
-
- if (size > oops_data_sz) {
- memcpy(oops_data, big_oops_buf, hsize);
- memcpy(oops_data + hsize, big_oops_buf + diff,
- oops_data_sz - hsize);
-
- oops_hdr->report_length = (u16) oops_data_sz;
- } else
- memcpy(oops_data, big_oops_buf, size);
- } else
- err_type = ERR_TYPE_KERNEL_PANIC_GZ;
- }
+ if (compressed)
+ err_type = ERR_TYPE_KERNEL_PANIC_GZ;
rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type,
- count);
+ (int) (sizeof(*oops_hdr) + size), err_type, count);
if (rc != 0)
return rc;
@@ -650,12 +601,12 @@ static int nvram_pstore_write(enum pstore_type_id type,
*/
static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time, char **buf,
- struct pstore_info *psi)
+ bool *compressed, struct pstore_info *psi)
{
struct oops_log_info *oops_hdr;
unsigned int err_type, id_no, size = 0;
struct nvram_os_partition *part = NULL;
- char *buff = NULL, *big_buff = NULL;
+ char *buff = NULL;
int sig = 0;
loff_t p;
@@ -719,8 +670,7 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
*id = id_no;
if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
- int length, unzipped_len;
- size_t hdr_size;
+ size_t length, hdr_size;
oops_hdr = (struct oops_log_info *)buff;
if (oops_hdr->version < OOPS_HDR_VERSION) {
@@ -741,23 +691,10 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
memcpy(*buf, buff + hdr_size, length);
kfree(buff);
- if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
- big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
- if (!big_buff)
- return -ENOMEM;
-
- unzipped_len = nvram_decompress(*buf, big_buff,
- length, big_oops_buf_sz);
-
- if (unzipped_len < 0) {
- pr_err("nvram: decompression failed, returned "
- "rc %d\n", unzipped_len);
- kfree(big_buff);
- } else {
- *buf = big_buff;
- length = unzipped_len;
- }
- }
+ if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
+ *compressed = true;
+ else
+ *compressed = false;
return length;
}
@@ -777,13 +714,8 @@ static int nvram_pstore_init(void)
{
int rc = 0;
- if (big_oops_buf) {
- nvram_pstore_info.buf = big_oops_buf;
- nvram_pstore_info.bufsize = big_oops_buf_sz;
- } else {
- nvram_pstore_info.buf = oops_data;
- nvram_pstore_info.bufsize = oops_data_sz;
- }
+ nvram_pstore_info.buf = oops_data;
+ nvram_pstore_info.bufsize = oops_data_sz;
rc = pstore_register(&nvram_pstore_info);
if (rc != 0)
@@ -802,7 +734,6 @@ static int nvram_pstore_init(void)
static void __init nvram_init_oops_partition(int rtas_partition_exists)
{
int rc;
- size_t size;
rc = pseries_nvram_init_os_partition(&oops_log_partition);
if (rc != 0) {
@@ -823,6 +754,11 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
oops_data = oops_buf + sizeof(struct oops_log_info);
oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
+ rc = nvram_pstore_init();
+
+ if (!rc)
+ return;
+
/*
* Figure compression (preceded by elimination of each line's <n>
* severity prefix) will reduce the oops/panic report to at most
@@ -831,9 +767,8 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
big_oops_buf_sz = (oops_data_sz * 100) / 45;
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) {
- size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
- zlib_inflate_workspacesize());
- stream.workspace = kmalloc(size, GFP_KERNEL);
+ stream.workspace = kmalloc(zlib_deflate_workspacesize(
+ WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
if (!stream.workspace) {
pr_err("nvram: No memory for compression workspace; "
"skipping compression of %s partition data\n",
@@ -847,11 +782,6 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
stream.workspace = NULL;
}
- rc = nvram_pstore_init();
-
- if (!rc)
- return;
-
rc = kmsg_dump_register(&nvram_kmsg_dumper);
if (rc != 0) {
pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
index af79e1ea74b6..af0f9beddca9 100644
--- a/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -62,7 +62,7 @@ static int __init add_rtc(void)
pd = platform_device_register_simple("rtc_cmos", -1,
&res[0], num_res);
- return PTR_RET(pd);
+ return PTR_ERR_OR_ZERO(pd);
}
fs_initcall(add_rtc);