diff options
author | Babu Moger <Babu.Moger@amd.com> | 2018-11-21 20:28:45 +0000 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2018-11-22 20:16:20 +0100 |
commit | 4d05bf71f157d756932e77cdee16dc99e235d636 (patch) | |
tree | 63c54a94cca33592a7b3e7b5ab7176dbed79c73f | |
parent | 723f1a0dd8e26a7523ba068204bee11c95ded38d (diff) | |
download | linux-4d05bf71f157d756932e77cdee16dc99e235d636.tar.bz2 |
x86/resctrl: Introduce AMD QOS feature
Enable QOS feature on AMD.
Following QoS sub-features are supported on AMD if the underlying
hardware supports it:
- L3 Cache allocation enforcement
- L3 Cache occupancy monitoring
- L3 Code-Data Prioritization support
- Memory Bandwidth Enforcement (Allocation)
The specification is available at:
https://developer.amd.com/wp-content/resources/56375.pdf
There are differences in the way some of the features are implemented.
Separate those functions and add those as vendor specific functions.
The major difference is in MBA feature:
- AMD uses CPUID leaf 0x80000020 to initialize the MBA features.
- AMD uses direct bandwidth value instead of delay based on bandwidth values.
- MSR register base addresses are different for MBA.
- AMD allows non-contiguous L3 cache bit masks.
Signed-off-by: Babu Moger <babu.moger@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: "Chang S. Bae" <chang.seok.bae@intel.com>
Cc: David Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Dmitry Safonov <dima@arista.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Kate Stewart <kstewart@linuxfoundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: <linux-doc@vger.kernel.org>
Cc: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Philippe Ombredanne <pombredanne@nexb.com>
Cc: Pu Wen <puwen@hygon.cn>
Cc: <qianyue.zj@alibaba-inc.com>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Reinette Chatre <reinette.chatre@intel.com>
Cc: Rian Hunter <rian@alum.mit.edu>
Cc: Sherry Hurwitz <sherry.hurwitz@amd.com>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: <xiaochen.shen@intel.com>
Link: https://lkml.kernel.org/r/20181121202811.4492-12-babu.moger@amd.com
-rw-r--r-- | arch/x86/kernel/cpu/resctrl/core.c | 69 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 71 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/resctrl/internal.h | 5 |
3 files changed, 142 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index ba5a5b8c4681..2ec252be4ed9 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -61,6 +61,9 @@ mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); static void cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); +static void +mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, + struct rdt_resource *r); #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains) @@ -255,7 +258,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r) return false; } -static bool __get_mem_config(struct rdt_resource *r) +static bool __get_mem_config_intel(struct rdt_resource *r) { union cpuid_0x10_3_eax eax; union cpuid_0x10_x_edx edx; @@ -281,6 +284,30 @@ static bool __get_mem_config(struct rdt_resource *r) return true; } +static bool __rdt_get_mem_config_amd(struct rdt_resource *r) +{ + union cpuid_0x10_3_eax eax; + union cpuid_0x10_x_edx edx; + u32 ebx, ecx; + + cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full); + r->num_closid = edx.split.cos_max + 1; + r->default_ctrl = MAX_MBA_BW_AMD; + + /* AMD does not use delay */ + r->membw.delay_linear = false; + + r->membw.min_bw = 0; + r->membw.bw_gran = 1; + /* Max value is 2048, Data width should be 4 in decimal */ + r->data_width = 4; + + r->alloc_capable = true; + r->alloc_enabled = true; + + return true; +} + static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) { union cpuid_0x10_1_eax eax; @@ -340,6 +367,15 @@ static int get_cache_id(int cpu, int level) return -1; } +static void +mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) +{ + unsigned int i; + + for (i = m->low; i < m->high; i++) + wrmsrl(r->msr_base + i, d->ctrl_val[i]); +} + /* * Map the memory b/w percentage value to delay values * that can be written to QOS_MSRs. @@ -793,8 +829,13 @@ static bool __init rdt_cpu_has(int flag) static __init bool get_mem_config(void) { - if (rdt_cpu_has(X86_FEATURE_MBA)) - return __get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]); + if (!rdt_cpu_has(X86_FEATURE_MBA)) + return false; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]); + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]); return false; } @@ -893,10 +934,32 @@ static __init void rdt_init_res_defs_intel(void) } } +static __init void rdt_init_res_defs_amd(void) +{ + struct rdt_resource *r; + + for_each_rdt_resource(r) { + if (r->rid == RDT_RESOURCE_L3 || + r->rid == RDT_RESOURCE_L3DATA || + r->rid == RDT_RESOURCE_L3CODE || + r->rid == RDT_RESOURCE_L2 || + r->rid == RDT_RESOURCE_L2DATA || + r->rid == RDT_RESOURCE_L2CODE) + r->cbm_validate = cbm_validate_amd; + else if (r->rid == RDT_RESOURCE_MBA) { + r->msr_base = MSR_IA32_MBA_BW_BASE; + r->msr_update = mba_wrmsr_amd; + r->parse_ctrlval = parse_bw_amd; + } + } +} + static __init void rdt_init_res_defs(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) rdt_init_res_defs_intel(); + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + rdt_init_res_defs_amd(); } static enum cpuhp_state rdt_online; diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index bfd7bdf8a156..43ee3cee6494 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -30,6 +30,53 @@ /* * Check whether MBA bandwidth percentage value is correct. The value is + * checked against the minimum and maximum bandwidth values specified by + * the hardware. The allocated bandwidth percentage is rounded to the next + * control step available on the hardware. + */ +static bool bw_validate_amd(char *buf, unsigned long *data, + struct rdt_resource *r) +{ + unsigned long bw; + int ret; + + ret = kstrtoul(buf, 10, &bw); + if (ret) { + rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); + return false; + } + + if (bw < r->membw.min_bw || bw > r->default_ctrl) { + rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, + r->membw.min_bw, r->default_ctrl); + return false; + } + + *data = roundup(bw, (unsigned long)r->membw.bw_gran); + return true; +} + +int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d) +{ + unsigned long bw_val; + + if (d->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + if (!bw_validate_amd(data->buf, &bw_val, r)) + return -EINVAL; + + d->new_ctrl = bw_val; + d->have_new_ctrl = true; + + return 0; +} + +/* + * Check whether MBA bandwidth percentage value is correct. The value is * checked against the minimum and max bandwidth values specified by the * hardware. The allocated bandwidth percentage is rounded to the next * control step available on the hardware. @@ -124,6 +171,30 @@ bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r) } /* + * Check whether a cache bit mask is valid. AMD allows non-contiguous + * bitmasks + */ +bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r) +{ + unsigned long val; + int ret; + + ret = kstrtoul(buf, 16, &val); + if (ret) { + rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); + return false; + } + + if (val > r->default_ctrl) { + rdt_last_cmd_puts("Mask out of range\n"); + return false; + } + + *data = val; + return true; +} + +/* * Read one cache bit mask (hex). Check that it is valid for the current * resource type. */ diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 599cad34a6a8..822b7db634ee 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -11,6 +11,7 @@ #define MSR_IA32_L3_CBM_BASE 0xc90 #define MSR_IA32_L2_CBM_BASE 0xd10 #define MSR_IA32_MBA_THRTL_BASE 0xd50 +#define MSR_IA32_MBA_BW_BASE 0xc0000200 #define MSR_IA32_QM_CTR 0x0c8e #define MSR_IA32_QM_EVTSEL 0x0c8d @@ -34,6 +35,7 @@ #define MAX_MBA_BW 100u #define MBA_IS_LINEAR 0x4 #define MBA_MAX_MBPS U32_MAX +#define MAX_MBA_BW_AMD 0x800 #define RMID_VAL_ERROR BIT_ULL(63) #define RMID_VAL_UNAVAIL BIT_ULL(62) @@ -448,6 +450,8 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, struct rdt_domain *d); int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r, struct rdt_domain *d); +int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r, + struct rdt_domain *d); extern struct mutex rdtgroup_mutex; @@ -579,5 +583,6 @@ void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r); +bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r); #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ |