diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-26 09:25:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-26 09:25:47 -0700 |
commit | ea5bc7b977fc7cd2be4065ef41824adc976c807f (patch) | |
tree | 4519a12c4792b161a7679368034829ca83e53bd1 /arch/x86/events | |
parent | 90e66ce9e817df307045abe40ff64ecf60e3c6ee (diff) | |
parent | df448cdfc01ffc117702a494ef302e7fb76df78a (diff) | |
download | linux-ea5bc7b977fc7cd2be4065ef41824adc976c807f.tar.bz2 |
Merge tag 'x86_cleanups_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull misc x86 cleanups from Borislav Petkov:
"Trivial cleanups and fixes all over the place"
* tag 'x86_cleanups_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
MAINTAINERS: Remove me from IDE/ATAPI section
x86/pat: Do not compile stubbed functions when X86_PAT is off
x86/asm: Ensure asm/proto.h can be included stand-alone
x86/platform/intel/quark: Fix incorrect kernel-doc comment syntax in files
x86/msr: Make locally used functions static
x86/cacheinfo: Remove unneeded dead-store initialization
x86/process/64: Move cpu_current_top_of_stack out of TSS
tools/turbostat: Unmark non-kernel-doc comment
x86/syscalls: Fix -Wmissing-prototypes warnings from COND_SYSCALL()
x86/fpu/math-emu: Fix function cast warning
x86/msr: Fix wr/rdmsr_safe_regs_on_cpu() prototypes
x86: Fix various typos in comments, take #2
x86: Remove unusual Unicode characters from comments
x86/kaslr: Return boolean values from a function returning bool
x86: Fix various typos in comments
x86/setup: Remove unused RESERVE_BRK_ARRAY()
stacktrace: Move documentation for arch_stack_walk_reliable() to header
x86: Remove duplicate TSC DEADLINE MSR definitions
Diffstat (limited to 'arch/x86/events')
-rw-r--r-- | arch/x86/events/amd/core.c | 2 | ||||
-rw-r--r-- | arch/x86/events/amd/iommu.h | 2 | ||||
-rw-r--r-- | arch/x86/events/core.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/bts.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/core.c | 14 | ||||
-rw-r--r-- | arch/x86/events/intel/ds.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/lbr.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/p4.c | 6 | ||||
-rw-r--r-- | arch/x86/events/intel/pt.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/uncore_snbep.c | 12 | ||||
-rw-r--r-- | arch/x86/events/zhaoxin/core.c | 2 |
11 files changed, 24 insertions, 24 deletions
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 2c1791c4a518..9687a8aef01c 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -623,7 +623,7 @@ static void amd_pmu_disable_all(void) /* * Check each counter for overflow and wait for it to be reset by the * NMI if it has overflowed. This relies on the fact that all active - * counters are always enabled when this function is caled and + * counters are always enabled when this function is called and * ARCH_PERFMON_EVENTSEL_INT is always set. */ for (idx = 0; idx < x86_pmu.num_counters; idx++) { diff --git a/arch/x86/events/amd/iommu.h b/arch/x86/events/amd/iommu.h index 0e5c036fd7be..e6493a67f1c6 100644 --- a/arch/x86/events/amd/iommu.h +++ b/arch/x86/events/amd/iommu.h @@ -17,7 +17,7 @@ #define IOMMU_PC_DEVID_MATCH_REG 0x20 #define IOMMU_PC_COUNTER_REPORT_REG 0x28 -/* maximun specified bank/counters */ +/* maximum specified bank/counters */ #define PC_MAX_SPEC_BNKS 64 #define PC_MAX_SPEC_CNTRS 16 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 18df17129695..4c31cae4707e 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -765,7 +765,7 @@ struct perf_sched { }; /* - * Initialize interator that runs through all events and counters. + * Initialize iterator that runs through all events and counters. */ static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, int num, int wmin, int wmax, int gpmax) diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 731dd8d0dbb1..6320d2cfd9d3 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -594,7 +594,7 @@ static __init int bts_init(void) * we cannot use the user mapping since it will not be available * if we're not running the owning process. * - * With PTI we can't use the kernal map either, because its not + * With PTI we can't use the kernel map either, because its not * there when we run userspace. * * For now, disable this driver when using PTI. diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index c57ec8e27907..3fd69bd5fa6e 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -137,7 +137,7 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly = FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */ + INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */ INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */ INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ @@ -2186,7 +2186,7 @@ static void intel_pmu_enable_all(int added) * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either * in sequence on the same PMC or on different PMCs. * - * In practise it appears some of these events do in fact count, and + * In practice it appears some of these events do in fact count, and * we need to program all 4 events. */ static void intel_pmu_nhm_workaround(void) @@ -2435,7 +2435,7 @@ static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) /* * The metric is reported as an 8bit integer fraction - * suming up to 0xff. + * summing up to 0xff. * slots-in-metric = (Metric / 0xff) * slots */ val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff; @@ -2776,7 +2776,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) * processing loop coming after that the function, otherwise * phony regular samples may be generated in the sampling buffer * not marked with the EXACT tag. Another possibility is to have - * one PEBS event and at least one non-PEBS event whic hoverflows + * one PEBS event and at least one non-PEBS event which overflows * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will * not be set, yet the overflow status bit for the PEBS counter will * be on Skylake. @@ -2824,7 +2824,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) } /* - * Intel Perf mertrics + * Intel Perf metrics */ if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { handled++; @@ -4594,7 +4594,7 @@ static bool check_msr(unsigned long msr, u64 mask) /* * Disable the check for real HW, so we don't - * mess with potentionaly enabled registers: + * mess with potentially enabled registers: */ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) return true; @@ -4659,7 +4659,7 @@ static __init void intel_arch_events_quirk(void) { int bit; - /* disable event that reported as not presend by cpuid */ + /* disable event that reported as not present by cpuid */ for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; pr_warn("CPUID marked event: \'%s\' unavailable\n", diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index d32b302719fe..5aabb0e2964a 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1805,7 +1805,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count) * * [-period, 0] * - * the difference between two consequtive reads is: + * the difference between two consecutive reads is: * * A) value2 - value1; * when no overflows have happened in between, diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 21890dacfcfe..acb04ef3da3f 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1198,7 +1198,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) /* * The LBR logs any address in the IP, even if the IP just * faulted. This means userspace can control the from address. - * Ensure we don't blindy read any address by validating it is + * Ensure we don't blindly read any address by validating it is * a known text address. */ if (kernel_text_address(from)) { diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index a4cc66005ce8..971dffe0b77d 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -24,7 +24,7 @@ struct p4_event_bind { unsigned int escr_msr[2]; /* ESCR MSR for this event */ unsigned int escr_emask; /* valid ESCR EventMask bits */ unsigned int shared; /* event is shared across threads */ - char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */ + char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on absence */ }; struct p4_pebs_bind { @@ -45,7 +45,7 @@ struct p4_pebs_bind { * it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of * event configuration to find out which values are to be * written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT - * resgisters + * registers */ static struct p4_pebs_bind p4_pebs_bind_map[] = { P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001), @@ -1313,7 +1313,7 @@ static __initconst const struct x86_pmu p4_pmu = { .get_event_constraints = x86_get_event_constraints, /* * IF HT disabled we may need to use all - * ARCH_P4_MAX_CCCR counters simulaneously + * ARCH_P4_MAX_CCCR counters simultaneously * though leave it restricted at moment assuming * HT is on */ diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index e94af4a54d0d..915847655c06 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -362,7 +362,7 @@ static bool pt_event_valid(struct perf_event *event) /* * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config - * clears the assomption that BranchEn must always be enabled, + * clears the assumption that BranchEn must always be enabled, * as was the case with the first implementation of PT. * If this bit is not set, the legacy behavior is preserved * for compatibility with the older userspace. diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 9b8937631838..4bba0491068c 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -280,17 +280,17 @@ * | [63] | 00h | VALID - When set, indicates the CPU bus * numbers have been initialized. (RO) * |[62:48]| --- | Reserved - * |[47:40]| 00h | BUS_NUM_5 — Return the bus number BIOS assigned + * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned * CPUBUSNO(5). (RO) - * |[39:32]| 00h | BUS_NUM_4 — Return the bus number BIOS assigned + * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned * CPUBUSNO(4). (RO) - * |[31:24]| 00h | BUS_NUM_3 — Return the bus number BIOS assigned + * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned * CPUBUSNO(3). (RO) - * |[23:16]| 00h | BUS_NUM_2 — Return the bus number BIOS assigned + * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned * CPUBUSNO(2). (RO) - * |[15:8] | 00h | BUS_NUM_1 — Return the bus number BIOS assigned + * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned * CPUBUSNO(1). (RO) - * | [7:0] | 00h | BUS_NUM_0 — Return the bus number BIOS assigned + * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned * CPUBUSNO(0). (RO) */ #define SKX_MSR_CPU_BUS_NUMBER 0x300 diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index e68827e604ad..949d845c922b 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -494,7 +494,7 @@ static __init void zhaoxin_arch_events_quirk(void) { int bit; - /* disable event that reported as not presend by cpuid */ + /* disable event that reported as not present by cpuid */ for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) { zx_pmon_event_map[zx_arch_events_map[bit].id] = 0; pr_warn("CPUID marked event: \'%s\' unavailable\n", |