summaryrefslogtreecommitdiffstats
path: root/drivers/acpi/numa/hmat.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/numa/hmat.c')
-rw-r--r--drivers/acpi/numa/hmat.c93
1 files changed, 72 insertions, 21 deletions
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 134bcb40b2af..cb73a5d6ea76 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -63,7 +63,7 @@ struct memory_target {
unsigned int memory_pxm;
unsigned int processor_pxm;
struct resource memregions;
- struct node_hmem_attrs hmem_attrs;
+ struct node_hmem_attrs hmem_attrs[2];
struct list_head caches;
struct node_cache_attrs cache_attrs;
bool registered;
@@ -72,6 +72,7 @@ struct memory_target {
struct memory_initiator {
struct list_head node;
unsigned int processor_pxm;
+ bool has_cpu;
};
struct memory_locality {
@@ -115,6 +116,7 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
return;
initiator->processor_pxm = cpu_pxm;
+ initiator->has_cpu = node_state(pxm_to_node(cpu_pxm), N_CPU);
list_add_tail(&initiator->node, &initiators);
}
@@ -222,28 +224,28 @@ static u32 hmat_normalize(u16 entry, u64 base, u8 type)
}
static void hmat_update_target_access(struct memory_target *target,
- u8 type, u32 value)
+ u8 type, u32 value, int access)
{
switch (type) {
case ACPI_HMAT_ACCESS_LATENCY:
- target->hmem_attrs.read_latency = value;
- target->hmem_attrs.write_latency = value;
+ target->hmem_attrs[access].read_latency = value;
+ target->hmem_attrs[access].write_latency = value;
break;
case ACPI_HMAT_READ_LATENCY:
- target->hmem_attrs.read_latency = value;
+ target->hmem_attrs[access].read_latency = value;
break;
case ACPI_HMAT_WRITE_LATENCY:
- target->hmem_attrs.write_latency = value;
+ target->hmem_attrs[access].write_latency = value;
break;
case ACPI_HMAT_ACCESS_BANDWIDTH:
- target->hmem_attrs.read_bandwidth = value;
- target->hmem_attrs.write_bandwidth = value;
+ target->hmem_attrs[access].read_bandwidth = value;
+ target->hmem_attrs[access].write_bandwidth = value;
break;
case ACPI_HMAT_READ_BANDWIDTH:
- target->hmem_attrs.read_bandwidth = value;
+ target->hmem_attrs[access].read_bandwidth = value;
break;
case ACPI_HMAT_WRITE_BANDWIDTH:
- target->hmem_attrs.write_bandwidth = value;
+ target->hmem_attrs[access].write_bandwidth = value;
break;
default:
break;
@@ -336,8 +338,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
if (mem_hier == ACPI_HMAT_MEMORY) {
target = find_mem_target(targs[targ]);
- if (target && target->processor_pxm == inits[init])
- hmat_update_target_access(target, type, value);
+ if (target && target->processor_pxm == inits[init]) {
+ hmat_update_target_access(target, type, value, 0);
+ /* If the node has a CPU, update access 1 */
+ if (node_state(pxm_to_node(inits[init]), N_CPU))
+ hmat_update_target_access(target, type, value, 1);
+ }
}
}
}
@@ -431,7 +437,8 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
- if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
+ if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
+ hmat_revision > 1) {
target = find_mem_target(p->memory_PD);
if (!target) {
pr_debug("HMAT: Memory Domain missing from SRAT\n");
@@ -573,6 +580,7 @@ static void hmat_register_target_initiators(struct memory_target *target)
unsigned int mem_nid, cpu_nid;
struct memory_locality *loc = NULL;
u32 best = 0;
+ bool access0done = false;
int i;
mem_nid = pxm_to_node(target->memory_pxm);
@@ -584,7 +592,11 @@ static void hmat_register_target_initiators(struct memory_target *target)
if (target->processor_pxm != PXM_INVAL) {
cpu_nid = pxm_to_node(target->processor_pxm);
register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
- return;
+ access0done = true;
+ if (node_state(cpu_nid, N_CPU)) {
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
+ return;
+ }
}
if (list_empty(&localities))
@@ -598,6 +610,41 @@ static void hmat_register_target_initiators(struct memory_target *target)
*/
bitmap_zero(p_nodes, MAX_NUMNODES);
list_sort(p_nodes, &initiators, initiator_cmp);
+ if (!access0done) {
+ for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
+ loc = localities_types[i];
+ if (!loc)
+ continue;
+
+ best = 0;
+ list_for_each_entry(initiator, &initiators, node) {
+ u32 value;
+
+ if (!test_bit(initiator->processor_pxm, p_nodes))
+ continue;
+
+ value = hmat_initiator_perf(target, initiator,
+ loc->hmat_loc);
+ if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
+ bitmap_clear(p_nodes, 0, initiator->processor_pxm);
+ if (value != best)
+ clear_bit(initiator->processor_pxm, p_nodes);
+ }
+ if (best)
+ hmat_update_target_access(target, loc->hmat_loc->data_type,
+ best, 0);
+ }
+
+ for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
+ cpu_nid = pxm_to_node(i);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ }
+ }
+
+ /* Access 1 ignores Generic Initiators */
+ bitmap_zero(p_nodes, MAX_NUMNODES);
+ list_sort(p_nodes, &initiators, initiator_cmp);
+ best = 0;
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i];
if (!loc)
@@ -607,6 +654,10 @@ static void hmat_register_target_initiators(struct memory_target *target)
list_for_each_entry(initiator, &initiators, node) {
u32 value;
+ if (!initiator->has_cpu) {
+ clear_bit(initiator->processor_pxm, p_nodes);
+ continue;
+ }
if (!test_bit(initiator->processor_pxm, p_nodes))
continue;
@@ -617,12 +668,11 @@ static void hmat_register_target_initiators(struct memory_target *target)
clear_bit(initiator->processor_pxm, p_nodes);
}
if (best)
- hmat_update_target_access(target, loc->hmat_loc->data_type, best);
+ hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1);
}
-
for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
cpu_nid = pxm_to_node(i);
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
}
}
@@ -635,10 +685,10 @@ static void hmat_register_target_cache(struct memory_target *target)
node_add_cache(mem_nid, &tcache->cache_attrs);
}
-static void hmat_register_target_perf(struct memory_target *target)
+static void hmat_register_target_perf(struct memory_target *target, int access)
{
unsigned mem_nid = pxm_to_node(target->memory_pxm);
- node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
+ node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
}
static void hmat_register_target_devices(struct memory_target *target)
@@ -653,7 +703,7 @@ static void hmat_register_target_devices(struct memory_target *target)
return;
for (res = target->memregions.child; res; res = res->sibling) {
- int target_nid = acpi_map_pxm_to_node(target->memory_pxm);
+ int target_nid = pxm_to_node(target->memory_pxm);
hmem_register_device(target_nid, res);
}
@@ -683,7 +733,8 @@ static void hmat_register_target(struct memory_target *target)
if (!target->registered) {
hmat_register_target_initiators(target);
hmat_register_target_cache(target);
- hmat_register_target_perf(target);
+ hmat_register_target_perf(target, 0);
+ hmat_register_target_perf(target, 1);
target->registered = true;
}
mutex_unlock(&target_lock);