summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h6
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/lib/code-patching.c6
-rw-r--r--arch/powerpc/mm/slice.c34
-rw-r--r--arch/powerpc/perf/imc-pmu.c27
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c22
-rw-r--r--arch/powerpc/platforms/powernv/vas.c1
7 files changed, 66 insertions, 34 deletions
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
index 7f74c282710f..fad0e6ff460f 100644
--- a/arch/powerpc/include/asm/imc-pmu.h
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -21,11 +21,6 @@
#include <asm/opal.h>
/*
- * For static allocation of some of the structures.
- */
-#define IMC_MAX_PMUS 32
-
-/*
* Compatibility macros for IMC devices
*/
#define IMC_DTB_COMPAT "ibm,opal-in-memory-counters"
@@ -125,4 +120,5 @@ enum {
extern int init_imc_pmu(struct device_node *parent,
struct imc_pmu *pmu_ptr, int pmu_id);
extern void thread_imc_disable(void);
+extern int get_max_nest_dev(void);
#endif /* __ASM_POWERPC_IMC_PMU_H */
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 602e0fde19b4..8bdc2f96c5d6 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -735,8 +735,8 @@ static __init void cpufeatures_cpu_quirks(void)
*/
if ((version & 0xffffff00) == 0x004e0100)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
- else if ((version & 0xffffefff) == 0x004e0200)
- cur_cpu_spec->cpu_features &= ~CPU_FTR_POWER9_DD2_1;
+ else if ((version & 0xffffefff) == 0x004e0201)
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
}
static void __init cpufeatures_setup_finished(void)
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index c9de03e0c1f1..d469224c4ada 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -21,6 +21,7 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/code-patching.h>
+#include <asm/setup.h>
static int __patch_instruction(unsigned int *addr, unsigned int instr)
{
@@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
* During early early boot patch_instruction is called
* when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching
- * We use slab_is_available and per cpu read * via this_cpu_read
- * of text_poke_area. Per-CPU areas might not be up early
- * this can create problems with just using this_cpu_read()
*/
- if (!slab_is_available() || !this_cpu_read(text_poke_area))
+ if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
return __patch_instruction(addr, instr);
local_irq_save(flags);
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 564fff06f5c1..23ec2c5e3b78 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -122,7 +122,8 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
return !slice_area_is_free(mm, start, end - start);
}
-static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
+static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
+ unsigned long high_limit)
{
unsigned long i;
@@ -133,15 +134,16 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i;
- if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
+ if (high_limit <= SLICE_LOW_TOP)
return;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
if (!slice_high_has_vma(mm, i))
__set_bit(i, ret->high_slices);
}
-static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
+static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
+ unsigned long high_limit)
{
unsigned char *hpsizes;
int index, mask_index;
@@ -156,8 +158,11 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
if (((lpsizes >> (i * 4)) & 0xf) == psize)
ret->low_slices |= 1u << i;
+ if (high_limit <= SLICE_LOW_TOP)
+ return;
+
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -169,6 +174,10 @@ static int slice_check_fit(struct mm_struct *mm,
struct slice_mask mask, struct slice_mask available)
{
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+ /*
+ * Make sure we just do bit compare only to the max
+ * addr limit and not the full bit map size.
+ */
unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
bitmap_and(result, mask.high_slices,
@@ -472,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* First make up a "good" mask of slices that have the right size
* already
*/
- slice_mask_for_size(mm, psize, &good_mask);
+ slice_mask_for_size(mm, psize, &good_mask, high_limit);
slice_print_mask(" good_mask", good_mask);
/*
@@ -497,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
#ifdef CONFIG_PPC_64K_PAGES
/* If we support combo pages, we can allow 64k pages in 4k slices */
if (psize == MMU_PAGE_64K) {
- slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
if (fixed)
slice_or_mask(&good_mask, &compat_mask);
}
@@ -530,11 +539,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
return newaddr;
}
}
-
- /* We don't fit in the good mask, check what other slices are
+ /*
+ * We don't fit in the good mask, check what other slices are
* empty and thus can be converted
*/
- slice_mask_for_free(mm, &potential_mask);
+ slice_mask_for_free(mm, &potential_mask, high_limit);
slice_or_mask(&potential_mask, &good_mask);
slice_print_mask(" potential", potential_mask);
@@ -744,17 +753,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
{
struct slice_mask mask, available;
unsigned int psize = mm->context.user_psize;
+ unsigned long high_limit = mm->context.slb_addr_limit;
if (radix_enabled())
return 0;
slice_range_to_mask(addr, len, &mask);
- slice_mask_for_size(mm, psize, &available);
+ slice_mask_for_size(mm, psize, &available, high_limit);
#ifdef CONFIG_PPC_64K_PAGES
/* We need to account for 4k slices too */
if (psize == MMU_PAGE_64K) {
struct slice_mask compat_mask;
- slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
slice_or_mask(&available, &compat_mask);
}
#endif
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 36344117c680..0ead3cd73caa 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -26,7 +26,7 @@
*/
static DEFINE_MUTEX(nest_init_lock);
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
-static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS];
+static struct imc_pmu **per_nest_pmu_arr;
static cpumask_t nest_imc_cpumask;
struct imc_pmu_ref *nest_imc_refc;
static int nest_pmus;
@@ -286,13 +286,14 @@ static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
static void nest_change_cpu_context(int old_cpu, int new_cpu)
{
struct imc_pmu **pn = per_nest_pmu_arr;
- int i;
if (old_cpu < 0 || new_cpu < 0)
return;
- for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++)
+ while (*pn) {
perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
+ pn++;
+ }
}
static int ppc_nest_imc_cpu_offline(unsigned int cpu)
@@ -467,7 +468,7 @@ static int nest_imc_event_init(struct perf_event *event)
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
* Get the base memory addresss for this cpu.
*/
- chip_id = topology_physical_package_id(event->cpu);
+ chip_id = cpu_to_chip_id(event->cpu);
pcni = pmu->mem_info;
do {
if (pcni->id == chip_id) {
@@ -524,19 +525,19 @@ static int nest_imc_event_init(struct perf_event *event)
*/
static int core_imc_mem_init(int cpu, int size)
{
- int phys_id, rc = 0, core_id = (cpu / threads_per_core);
+ int nid, rc = 0, core_id = (cpu / threads_per_core);
struct imc_mem_info *mem_info;
/*
* alloc_pages_node() will allocate memory for core in the
* local node only.
*/
- phys_id = topology_physical_package_id(cpu);
+ nid = cpu_to_node(cpu);
mem_info = &core_imc_pmu->mem_info[core_id];
mem_info->id = core_id;
/* We need only vbase for core counters */
- mem_info->vbase = page_address(alloc_pages_node(phys_id,
+ mem_info->vbase = page_address(alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size)));
if (!mem_info->vbase)
@@ -797,14 +798,14 @@ static int core_imc_event_init(struct perf_event *event)
static int thread_imc_mem_alloc(int cpu_id, int size)
{
u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
- int phys_id = topology_physical_package_id(cpu_id);
+ int nid = cpu_to_node(cpu_id);
if (!local_mem) {
/*
* This case could happen only once at start, since we dont
* free the memory in cpu offline path.
*/
- local_mem = page_address(alloc_pages_node(phys_id,
+ local_mem = page_address(alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size)));
if (!local_mem)
@@ -1194,6 +1195,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
kfree(pmu_ptr);
+ kfree(per_nest_pmu_arr);
return;
}
@@ -1218,6 +1220,13 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
return -ENOMEM;
/* Needed for hotplug/migration */
+ if (!per_nest_pmu_arr) {
+ per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
+ sizeof(struct imc_pmu *),
+ GFP_KERNEL);
+ if (!per_nest_pmu_arr)
+ return -ENOMEM;
+ }
per_nest_pmu_arr[pmu_index] = pmu_ptr;
break;
case IMC_DOMAIN_CORE:
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 21f6531fae20..465ea105b771 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -153,6 +153,22 @@ static void disable_core_pmu_counters(void)
put_online_cpus();
}
+int get_max_nest_dev(void)
+{
+ struct device_node *node;
+ u32 pmu_units = 0, type;
+
+ for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
+ if (of_property_read_u32(node, "type", &type))
+ continue;
+
+ if (type == IMC_TYPE_CHIP)
+ pmu_units++;
+ }
+
+ return pmu_units;
+}
+
static int opal_imc_counters_probe(struct platform_device *pdev)
{
struct device_node *imc_dev = pdev->dev.of_node;
@@ -191,8 +207,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
break;
}
- if (!imc_pmu_create(imc_dev, pmu_count, domain))
- pmu_count++;
+ if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
+ if (domain == IMC_DOMAIN_NEST)
+ pmu_count++;
+ }
}
return 0;
diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c
index c488621dbec3..aebbe95c9230 100644
--- a/arch/powerpc/platforms/powernv/vas.c
+++ b/arch/powerpc/platforms/powernv/vas.c
@@ -135,6 +135,7 @@ int chip_to_vas_id(int chipid)
}
return -1;
}
+EXPORT_SYMBOL(chip_to_vas_id);
static int vas_probe(struct platform_device *pdev)
{