From 1631ba1259d6d7f49b6028f2a1a0fa02be1c522a Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Thu, 7 Jul 2022 01:15:35 +0200 Subject: riscv: Add support for non-coherent devices using zicbom extension The Zicbom ISA-extension was ratified in november 2021 and introduces instructions for dcache invalidate, clean and flush operations. Implement cache management operations for non-coherent devices based on them. Of course not all cores will support this, so implement an alternative-based mechanism that replaces empty instructions with ones done around Zicbom instructions. As discussed in previous versions, assume the platform being coherent by default so that non-coherent devices need to get marked accordingly by firmware. Reviewed-by: Christoph Hellwig Signed-off-by: Heiko Stuebner Reviewed-by: Guo Ren Link: https://lore.kernel.org/r/20220706231536.2041855-4-heiko@sntech.de Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cache.h | 4 ++++ arch/riscv/include/asm/cacheflush.h | 10 ++++++++++ arch/riscv/include/asm/errata_list.h | 19 ++++++++++++++++++- arch/riscv/include/asm/hwcap.h | 1 + 4 files changed, 33 insertions(+), 1 deletion(-) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h index 9b58b104559e..d3036df23ccb 100644 --- a/arch/riscv/include/asm/cache.h +++ b/arch/riscv/include/asm/cache.h @@ -11,6 +11,10 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#ifdef CONFIG_RISCV_DMA_NONCOHERENT +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#endif + /* * RISC-V requires the stack pointer to be 16-byte aligned, so ensure that * the flat loader aligns it accordingly. diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 23ff70350992..a60acaecfeda 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -42,6 +42,16 @@ void flush_icache_mm(struct mm_struct *mm, bool local); #endif /* CONFIG_SMP */ +#ifdef CONFIG_RISCV_ISA_ZICBOM +void riscv_init_cbom_blocksize(void); +#else +static inline void riscv_init_cbom_blocksize(void) { } +#endif + +#ifdef CONFIG_RISCV_DMA_NONCOHERENT +void riscv_noncoherent_supported(void); +#endif + /* * Bits in sys_riscv_flush_icache()'s flags argument. */ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index 9e2888dbb5b1..f7c015005847 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -20,7 +20,8 @@ #endif #define CPUFEATURE_SVPBMT 0 -#define CPUFEATURE_NUMBER 1 +#define CPUFEATURE_ZICBOM 1 +#define CPUFEATURE_NUMBER 2 #ifdef __ASSEMBLY__ @@ -93,6 +94,22 @@ asm volatile(ALTERNATIVE( \ #define ALT_THEAD_PMA(_val) #endif +#define ALT_CMO_OP(_op, _start, _size, _cachesize) \ +asm volatile(ALTERNATIVE( \ + __nops(5), \ + "mv a0, %1\n\t" \ + "j 2f\n\t" \ + "3:\n\t" \ + "cbo." __stringify(_op) " (a0)\n\t" \ + "add a0, a0, %0\n\t" \ + "2:\n\t" \ + "bltu a0, %2, 3b\n\t", 0, \ + CPUFEATURE_ZICBOM, CONFIG_RISCV_ISA_ZICBOM) \ + : : "r"(_cachesize), \ + "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \ + "r"((unsigned long)(_start) + (_size)) \ + : "a0") + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index e48eebdd2631..ed4045e70f7a 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -54,6 +54,7 @@ extern unsigned long elf_hwcap; enum riscv_isa_ext_id { RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE, RISCV_ISA_EXT_SVPBMT, + RISCV_ISA_EXT_ZICBOM, RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX, }; -- cgit v1.2.3 From d20ec7529236a2fcdb2d856fc0bd80b409a217fc Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Thu, 7 Jul 2022 01:15:36 +0200 Subject: riscv: implement cache-management errata for T-Head SoCs The T-Head C906 and C910 implement a scheme for handling cache operations different from the generic Zicbom extension. Add an errata for it next to the generic dma coherency ops. Reviewed-by: Samuel Holland Tested-by: Samuel Holland Reviewed-by: Guo Ren Signed-off-by: Heiko Stuebner Link: https://lore.kernel.org/r/20220706231536.2041855-5-heiko@sntech.de Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig.erratas | 11 +++++++++ arch/riscv/errata/thead/errata.c | 20 +++++++++++++++ arch/riscv/include/asm/errata_list.h | 48 ++++++++++++++++++++++++++++++++---- 3 files changed, 74 insertions(+), 5 deletions(-) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas index 457ac72c9b36..3223e533fd87 100644 --- a/arch/riscv/Kconfig.erratas +++ b/arch/riscv/Kconfig.erratas @@ -55,4 +55,15 @@ config ERRATA_THEAD_PBMT If you don't know what to do here, say "Y". +config ERRATA_THEAD_CMO + bool "Apply T-Head cache management errata" + depends on ERRATA_THEAD + select RISCV_DMA_NONCOHERENT + default y + help + This will apply the cache management errata to handle the + non-standard handling on non-coherent operations on T-Head SoCs. + + If you don't know what to do here, say "Y". + endmenu diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c index b37b6fedd53b..202c83f677b2 100644 --- a/arch/riscv/errata/thead/errata.c +++ b/arch/riscv/errata/thead/errata.c @@ -27,6 +27,23 @@ static bool errata_probe_pbmt(unsigned int stage, return false; } +static bool errata_probe_cmo(unsigned int stage, + unsigned long arch_id, unsigned long impid) +{ +#ifdef CONFIG_ERRATA_THEAD_CMO + if (arch_id != 0 || impid != 0) + return false; + + if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) + return false; + + riscv_noncoherent_supported(); + return true; +#else + return false; +#endif +} + static u32 thead_errata_probe(unsigned int stage, unsigned long archid, unsigned long impid) { @@ -35,6 +52,9 @@ static u32 thead_errata_probe(unsigned int stage, if (errata_probe_pbmt(stage, archid, impid)) cpu_req_errata |= (1U << ERRATA_THEAD_PBMT); + if (errata_probe_cmo(stage, archid, impid)) + cpu_req_errata |= (1U << ERRATA_THEAD_CMO); + return cpu_req_errata; } diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index f7c015005847..0f66e368e351 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -16,7 +16,8 @@ #ifdef CONFIG_ERRATA_THEAD #define ERRATA_THEAD_PBMT 0 -#define ERRATA_THEAD_NUMBER 1 +#define ERRATA_THEAD_CMO 1 +#define ERRATA_THEAD_NUMBER 2 #endif #define CPUFEATURE_SVPBMT 0 @@ -94,17 +95,54 @@ asm volatile(ALTERNATIVE( \ #define ALT_THEAD_PMA(_val) #endif +/* + * dcache.ipa rs1 (invalidate, physical address) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000001 01010 rs1 000 00000 0001011 + * dache.iva rs1 (invalida, virtual address) + * 0000001 00110 rs1 000 00000 0001011 + * + * dcache.cpa rs1 (clean, physical address) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000001 01001 rs1 000 00000 0001011 + * dcache.cva rs1 (clean, virtual address) + * 0000001 00100 rs1 000 00000 0001011 + * + * dcache.cipa rs1 (clean then invalidate, physical address) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000001 01011 rs1 000 00000 0001011 + * dcache.civa rs1 (... virtual address) + * 0000001 00111 rs1 000 00000 0001011 + * + * sync.s (make sure all cache operations finished) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000000 11001 00000 000 00000 0001011 + */ +#define THEAD_inval_A0 ".long 0x0265000b" +#define THEAD_clean_A0 ".long 0x0245000b" +#define THEAD_flush_A0 ".long 0x0275000b" +#define THEAD_SYNC_S ".long 0x0190000b" + #define ALT_CMO_OP(_op, _start, _size, _cachesize) \ -asm volatile(ALTERNATIVE( \ - __nops(5), \ +asm volatile(ALTERNATIVE_2( \ + __nops(6), \ "mv a0, %1\n\t" \ "j 2f\n\t" \ "3:\n\t" \ "cbo." __stringify(_op) " (a0)\n\t" \ "add a0, a0, %0\n\t" \ "2:\n\t" \ - "bltu a0, %2, 3b\n\t", 0, \ - CPUFEATURE_ZICBOM, CONFIG_RISCV_ISA_ZICBOM) \ + "bltu a0, %2, 3b\n\t" \ + "nop", 0, CPUFEATURE_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \ + "mv a0, %1\n\t" \ + "j 2f\n\t" \ + "3:\n\t" \ + THEAD_##_op##_A0 "\n\t" \ + "add a0, a0, %0\n\t" \ + "2:\n\t" \ + "bltu a0, %2, 3b\n\t" \ + THEAD_SYNC_S, THEAD_VENDOR_ID, \ + ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO) \ : : "r"(_cachesize), \ "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \ "r"((unsigned long)(_start) + (_size)) \ -- cgit v1.2.3 From 8eb060e10185cfc97ef0200d197ec246ba0f9f8c Mon Sep 17 00:00:00 2001 From: Dao Lu Date: Mon, 20 Jun 2022 13:15:25 -0700 Subject: arch/riscv: add Zihintpause support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement support for the ZiHintPause extension. The PAUSE instruction is a HINT that indicates the current hart’s rate of instruction retirement should be temporarily reduced or paused. Reviewed-by: Heiko Stuebner Tested-by: Heiko Stuebner Signed-off-by: Dao Lu [Palmer: Some minor merge conflicts.] Link: https://lore.kernel.org/all/20220620201530.3929352-1-daolu@rivosinc.com/ Link: https://lore.kernel.org/all/20220811053356.17375-1-palmer@rivosinc.com/ Signed-off-by: Palmer Dabbelt --- arch/riscv/Makefile | 4 ++++ arch/riscv/include/asm/hwcap.h | 5 +++++ arch/riscv/include/asm/vdso/processor.h | 21 ++++++++++++++++++--- arch/riscv/kernel/cpu.c | 1 + arch/riscv/kernel/cpufeature.c | 1 + 5 files changed, 29 insertions(+), 3 deletions(-) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 42d7ff8730aa..3fa8ef336822 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -60,6 +60,10 @@ riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei toolchain-supports-zicbom := $(call cc-option-yn, -march=$(riscv-march-y)_zicbom) riscv-march-$(toolchain-supports-zicbom) := $(riscv-march-y)_zicbom +# Check if the toolchain supports Zihintpause extension +toolchain-supports-zihintpause := $(call cc-option-yn, -march=$(riscv-march-y)_zihintpause) +riscv-march-$(toolchain-supports-zihintpause) := $(riscv-march-y)_zihintpause + KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y)) KBUILD_AFLAGS += -march=$(riscv-march-y) diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index ed4045e70f7a..3c8a5ca95c72 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -8,6 +8,7 @@ #ifndef _ASM_RISCV_HWCAP_H #define _ASM_RISCV_HWCAP_H +#include #include #include @@ -55,6 +56,7 @@ enum riscv_isa_ext_id { RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE, RISCV_ISA_EXT_SVPBMT, RISCV_ISA_EXT_ZICBOM, + RISCV_ISA_EXT_ZIHINTPAUSE, RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX, }; @@ -65,6 +67,7 @@ enum riscv_isa_ext_id { */ enum riscv_isa_ext_key { RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */ + RISCV_ISA_EXT_KEY_ZIHINTPAUSE, RISCV_ISA_EXT_KEY_MAX, }; @@ -84,6 +87,8 @@ static __always_inline int riscv_isa_ext2key(int num) return RISCV_ISA_EXT_KEY_FPU; case RISCV_ISA_EXT_d: return RISCV_ISA_EXT_KEY_FPU; + case RISCV_ISA_EXT_ZIHINTPAUSE: + return RISCV_ISA_EXT_KEY_ZIHINTPAUSE; default: return -EINVAL; } diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h index 134388cbaaa1..1e4f8b4aef79 100644 --- a/arch/riscv/include/asm/vdso/processor.h +++ b/arch/riscv/include/asm/vdso/processor.h @@ -4,15 +4,30 @@ #ifndef __ASSEMBLY__ +#include #include +#include static inline void cpu_relax(void) { + if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) { #ifdef __riscv_muldiv - int dummy; - /* In lieu of a halt instruction, induce a long-latency stall. */ - __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); + int dummy; + /* In lieu of a halt instruction, induce a long-latency stall. */ + __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); #endif + } else { + /* + * Reduce instruction retirement. + * This assumes the PC changes. + */ +#ifdef __riscv_zihintpause + __asm__ __volatile__ ("pause"); +#else + /* Encoding of the pause instruction */ + __asm__ __volatile__ (".4byte 0x100000F"); +#endif + } barrier(); } diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 76a2a225e3d9..a77c380703c5 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -94,6 +94,7 @@ static struct riscv_isa_ext_data isa_ext_arr[] = { __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), + __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX), }; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index f914e8da157a..c233fbc5b873 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -202,6 +202,7 @@ void __init riscv_fill_hwcap(void) SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF); SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT); SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM); + SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE); } #undef SET_ISA_EXT_MAP } -- cgit v1.2.3 From da6d2128e56a50a0d497c8e41ca1d33d88bcc0aa Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Thu, 14 Jul 2022 08:18:11 +0100 Subject: RISC-V: Declare cpu_ops_spinwait in The cpu_ops_spinwait is used in a couple of places in arch/riscv and is causing a sparse warning due to no declaration. Add this to with the others to fix the following: arch/riscv/kernel/cpu_ops_spinwait.c:16:29: warning: symbol 'cpu_ops_spinwait' was not declared. Should it be static? Signed-off-by: Ben Dooks Link: https://lore.kernel.org/r/20220714071811.187491-1-ben.dooks@sifive.com [Palmer: Drop the extern from cpu_ops.c] Fixes: 2ffc48fc7071 ("RISC-V: Move spinwait booting method to its own config") Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpu_ops.h | 1 + arch/riscv/kernel/cpu_ops.c | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h index 134590f1b843..aa128466c4d4 100644 --- a/arch/riscv/include/asm/cpu_ops.h +++ b/arch/riscv/include/asm/cpu_ops.h @@ -38,6 +38,7 @@ struct cpu_operations { #endif }; +extern const struct cpu_operations cpu_ops_spinwait; extern const struct cpu_operations *cpu_ops[NR_CPUS]; void __init cpu_set_ops(int cpu); diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c index 170d07e57721..f92c0e6eddb1 100644 --- a/arch/riscv/kernel/cpu_ops.c +++ b/arch/riscv/kernel/cpu_ops.c @@ -15,9 +15,7 @@ const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; extern const struct cpu_operations cpu_ops_sbi; -#ifdef CONFIG_RISCV_BOOT_SPINWAIT -extern const struct cpu_operations cpu_ops_spinwait; -#else +#ifndef CONFIG_RISCV_BOOT_SPINWAIT const struct cpu_operations cpu_ops_spinwait = { .name = "", .cpu_prepare = NULL, -- cgit v1.2.3 From 87df2b5cbc84554df49ccfd9170103729d5a0ab4 Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Thu, 14 Jul 2022 09:02:36 +0100 Subject: riscv: ensure cpu_ops_sbi is declared Sparse complains that cpu_ops_sbi is used undeclared: arch/riscv/kernel/cpu_ops_sbi.c:17:29: warning: symbol 'cpu_ops_sbi' was not declared. Should it be static? Fix the warning by adding cpu_ops_sbi to cpu_ops_sbi.h & including that where used. Signed-off-by: Conor Dooley Link: https://lore.kernel.org/r/20220714080235.3853374-1-conor.dooley@microchip.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpu_ops_sbi.h | 2 ++ arch/riscv/kernel/cpu_ops.c | 1 + 2 files changed, 3 insertions(+) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/include/asm/cpu_ops_sbi.h b/arch/riscv/include/asm/cpu_ops_sbi.h index 56e4b76d09ff..d6e4665b3195 100644 --- a/arch/riscv/include/asm/cpu_ops_sbi.h +++ b/arch/riscv/include/asm/cpu_ops_sbi.h @@ -10,6 +10,8 @@ #include #include +extern const struct cpu_operations cpu_ops_sbi; + /** * struct sbi_hart_boot_data - Hart specific boot used during booting and * cpu hotplug. diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c index f92c0e6eddb1..8275f237a59d 100644 --- a/arch/riscv/kernel/cpu_ops.c +++ b/arch/riscv/kernel/cpu_ops.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3 From bf952a290f7a9d818204b9b68e861655f8b15a65 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Fri, 22 Jul 2022 09:50:44 -0700 Subject: RISC-V: Add SSTC extension CSR details This patch just introduces the required CSR fields related to the SSTC extension. Reviewed-by: Anup Patel Signed-off-by: Atish Patra Link: https://lore.kernel.org/r/20220722165047.519994-2-atishp@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/csr.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 6d85655e7edf..34f4eaf326c4 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -235,6 +235,9 @@ #define CSR_SIP 0x144 #define CSR_SATP 0x180 +#define CSR_STIMECMP 0x14D +#define CSR_STIMECMPH 0x15D + #define CSR_VSSTATUS 0x200 #define CSR_VSIE 0x204 #define CSR_VSTVEC 0x205 @@ -244,6 +247,8 @@ #define CSR_VSTVAL 0x243 #define CSR_VSIP 0x244 #define CSR_VSATP 0x280 +#define CSR_VSTIMECMP 0x24D +#define CSR_VSTIMECMPH 0x25D #define CSR_HSTATUS 0x600 #define CSR_HEDELEG 0x602 -- cgit v1.2.3 From 464b0187ff94fcc629fe7cd350e16a3b9e80ed9e Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Fri, 22 Jul 2022 09:50:45 -0700 Subject: RISC-V: Enable sstc extension parsing from DT The ISA extension framework now allows parsing any multi-letter ISA extension. Enable that for sstc extension. Reviewed-by: Anup Patel Signed-off-by: Atish Patra Link: https://lore.kernel.org/r/20220722165047.519994-3-atishp@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/hwcap.h | 1 + arch/riscv/kernel/cpu.c | 1 + arch/riscv/kernel/cpufeature.c | 1 + 3 files changed, 3 insertions(+) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 4e2486881840..b186fff75198 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -53,6 +53,7 @@ extern unsigned long elf_hwcap; enum riscv_isa_ext_id { RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE, RISCV_ISA_EXT_SVPBMT, + RISCV_ISA_EXT_SSTC, RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index fba9e9f46a8c..0016d9337fe0 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -89,6 +89,7 @@ int riscv_of_parent_hartid(struct device_node *node) static struct riscv_isa_ext_data isa_ext_arr[] = { __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), + __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX), }; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index a6f62a6d1edd..d1d83cd9fd4b 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -199,6 +199,7 @@ void __init riscv_fill_hwcap(void) } else { SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF); SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT); + SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC); } #undef SET_ISA_EXT_MAP } -- cgit v1.2.3 From 63ba67ebdfd403ef53aa0fefde3a42e505516e8c Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Mon, 11 Jul 2022 10:46:31 -0700 Subject: RISC-V: Move counter info definition to sbi header file Counter info encoding format is defined by the SBI specificaiton. KVM implementation of SBI PMU extension will also leverage this definition. Move the definition to common sbi header file from the sbi pmu driver. Signed-off-by: Atish Patra Link: https://lore.kernel.org/r/20220711174632.4186047-5-atishp@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/sbi.h | 14 ++++++++++++++ drivers/perf/riscv_pmu_sbi.c | 14 -------------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 9e3c2cf1edaf..d633ac0f5a32 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -122,6 +122,20 @@ enum sbi_ext_pmu_fid { SBI_EXT_PMU_COUNTER_FW_READ, }; +union sbi_pmu_ctr_info { + unsigned long value; + struct { + unsigned long csr:12; + unsigned long width:6; +#if __riscv_xlen == 32 + unsigned long reserved:13; +#else + unsigned long reserved:45; +#endif + unsigned long type:1; + }; +}; + #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(55, 0) #define RISCV_PMU_RAW_EVENT_IDX 0x20000 diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index bae614c73b14..24124546844c 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -21,20 +21,6 @@ #include #include -union sbi_pmu_ctr_info { - unsigned long value; - struct { - unsigned long csr:12; - unsigned long width:6; -#if __riscv_xlen == 32 - unsigned long reserved:13; -#else - unsigned long reserved:45; -#endif - unsigned long type:1; - }; -}; - /* * RISC-V doesn't have hetergenous harts yet. This need to be part of * per_cpu in case of harts with different pmu counters -- cgit v1.2.3 From f829ee7595b5e9a9e5e1cc802224391c61072b38 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Mon, 11 Jul 2022 10:46:32 -0700 Subject: RISC-V: Improve SBI definitions Fixed few typos and bit fields not aligned with the spec. Define other related macros that will be useful in the future. Signed-off-by: Atish Patra Link: https://lore.kernel.org/r/20220711174632.4186047-6-atishp@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/sbi.h | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index d633ac0f5a32..2a0ef738695e 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -136,7 +136,7 @@ union sbi_pmu_ctr_info { }; }; -#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(55, 0) +#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0) #define RISCV_PMU_RAW_EVENT_IDX 0x20000 /** General pmu event codes specified in SBI PMU extension */ @@ -203,12 +203,26 @@ enum sbi_pmu_ctr_type { SBI_PMU_CTR_TYPE_FW, }; +/* Helper macros to decode event idx */ +#define SBI_PMU_EVENT_IDX_OFFSET 20 +#define SBI_PMU_EVENT_IDX_MASK 0xFFFFF +#define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF +#define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000 +#define SBI_PMU_EVENT_RAW_IDX 0x20000 +#define SBI_PMU_FIXED_CTR_MASK 0x07 + +#define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8 +#define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06 +#define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01 + +#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF + /* Flags defined for config matching function */ #define SBI_PMU_CFG_FLAG_SKIP_MATCH (1 << 0) #define SBI_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1) #define SBI_PMU_CFG_FLAG_AUTO_START (1 << 2) #define SBI_PMU_CFG_FLAG_SET_VUINH (1 << 3) -#define SBI_PMU_CFG_FLAG_SET_VSNH (1 << 4) +#define SBI_PMU_CFG_FLAG_SET_VSINH (1 << 4) #define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5) #define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6) #define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7) -- cgit v1.2.3 From 8f5cb44b1bae8520c0705ce348b30ffb1fdda43a Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Fri, 22 Jul 2022 09:50:47 -0700 Subject: RISC-V: KVM: Support sstc extension Sstc extension allows the guest to program the vstimecmp CSR directly instead of making an SBI call to the hypervisor to program the next event. The timer interrupt is also directly injected to the guest by the hardware in this case. To maintain backward compatibility, the hypervisors also update the vstimecmp in an SBI set_time call if the hardware supports it. Thus, the older kernels in guest also take advantage of the sstc extension. Reviewed-by: Anup Patel Signed-off-by: Atish Patra Acked-by: Anup Patel Link: https://lore.kernel.org/all/CAAhSdy2mb6wyqy0NAn9BcTWKMYEc0Z4zU3s3j7oNqBz6eDQ9sg@mail.gmail.com/ Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_vcpu_timer.h | 7 ++ arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kvm/vcpu.c | 8 +- arch/riscv/kvm/vcpu_timer.c | 144 ++++++++++++++++++++++++++++++-- 4 files changed, 153 insertions(+), 7 deletions(-) (limited to 'arch/riscv/include/asm') diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h index 50138e2eb91b..0d8fdb8ec63a 100644 --- a/arch/riscv/include/asm/kvm_vcpu_timer.h +++ b/arch/riscv/include/asm/kvm_vcpu_timer.h @@ -28,6 +28,11 @@ struct kvm_vcpu_timer { u64 next_cycles; /* Underlying hrtimer instance */ struct hrtimer hrt; + + /* Flag to check if sstc is enabled or not */ + bool sstc_enabled; + /* A function pointer to switch between stimecmp or hrtimer at runtime */ + int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles); }; int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles); @@ -40,5 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu); int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu); void kvm_riscv_guest_timer_init(struct kvm *kvm); +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu); #endif diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 24b2a6e27698..7351417afd62 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -97,6 +97,7 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_I, KVM_RISCV_ISA_EXT_M, KVM_RISCV_ISA_EXT_SVPBMT, + KVM_RISCV_ISA_EXT_SSTC, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 5d271b597613..d0f08d5b4282 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -52,6 +52,7 @@ static const unsigned long kvm_isa_ext_arr[] = { RISCV_ISA_EXT_i, RISCV_ISA_EXT_m, RISCV_ISA_EXT_SVPBMT, + RISCV_ISA_EXT_SSTC, }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) @@ -85,6 +86,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_C: case KVM_RISCV_ISA_EXT_I: case KVM_RISCV_ISA_EXT_M: + case KVM_RISCV_ISA_EXT_SSTC: return false; default: break; @@ -203,7 +205,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER); + return kvm_riscv_vcpu_timer_pending(vcpu); } void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) @@ -785,6 +787,8 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa) if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT)) henvcfg |= ENVCFG_PBMTE; + if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SSTC)) + henvcfg |= ENVCFG_STCE; csr_write(CSR_HENVCFG, henvcfg); #ifdef CONFIG_32BIT csr_write(CSR_HENVCFGH, henvcfg >> 32); @@ -828,6 +832,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->arch.isa); kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); + kvm_riscv_vcpu_timer_save(vcpu); + csr->vsstatus = csr_read(CSR_VSSTATUS); csr->vsie = csr_read(CSR_VSIE); csr->vstvec = csr_read(CSR_VSTVEC); diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c index 595043857049..16f50c46ba39 100644 --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -69,7 +69,18 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t) return 0; } -int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) +static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles) +{ +#if defined(CONFIG_32BIT) + csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF); + csr_write(CSR_VSTIMECMPH, ncycles >> 32); +#else + csr_write(CSR_VSTIMECMP, ncycles); +#endif + return 0; +} + +static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles) { struct kvm_vcpu_timer *t = &vcpu->arch.timer; struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; @@ -88,6 +99,65 @@ int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) return 0; } +int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + return t->timer_next_event(vcpu, ncycles); +} + +static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h) +{ + u64 delta_ns; + struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); + struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + + if (kvm_riscv_current_cycles(gt) < t->next_cycles) { + delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); + hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); + return HRTIMER_RESTART; + } + + t->next_set = false; + kvm_vcpu_kick(vcpu); + + return HRTIMER_NORESTART; +} + +bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + + if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) || + kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER)) + return true; + else + return false; +} + +static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; + u64 delta_ns; + + if (!t->init_done) + return; + + delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); + if (delta_ns) { + hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); + t->next_set = true; + } +} + +static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu) +{ + kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); +} + int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -180,10 +250,20 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu) return -EINVAL; hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; t->init_done = true; t->next_set = false; + /* Enable sstc for every vcpu if available in hardware */ + if (riscv_isa_extension_available(NULL, SSTC)) { + t->sstc_enabled = true; + t->hrt.function = kvm_riscv_vcpu_vstimer_expired; + t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp; + } else { + t->sstc_enabled = false; + t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; + t->timer_next_event = kvm_riscv_vcpu_update_hrtimer; + } + return 0; } @@ -199,21 +279,73 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu) int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu) { + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + t->next_cycles = -1ULL; return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer); } -void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) +static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu) { struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; -#ifdef CONFIG_64BIT - csr_write(CSR_HTIMEDELTA, gt->time_delta); -#else +#if defined(CONFIG_32BIT) csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta)); csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32)); +#else + csr_write(CSR_HTIMEDELTA, gt->time_delta); #endif } +void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr; + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + kvm_riscv_vcpu_update_timedelta(vcpu); + + if (!t->sstc_enabled) + return; + + csr = &vcpu->arch.guest_csr; +#if defined(CONFIG_32BIT) + csr_write(CSR_VSTIMECMP, (u32)t->next_cycles); + csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32)); +#else + csr_write(CSR_VSTIMECMP, t->next_cycles); +#endif + + /* timer should be enabled for the remaining operations */ + if (unlikely(!t->init_done)) + return; + + kvm_riscv_vcpu_timer_unblocking(vcpu); +} + +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_csr *csr; + struct kvm_vcpu_timer *t = &vcpu->arch.timer; + + if (!t->sstc_enabled) + return; + + csr = &vcpu->arch.guest_csr; + t = &vcpu->arch.timer; +#if defined(CONFIG_32BIT) + t->next_cycles = csr_read(CSR_VSTIMECMP); + t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; +#else + t->next_cycles = csr_read(CSR_VSTIMECMP); +#endif + /* timer should be enabled for the remaining operations */ + if (unlikely(!t->init_done)) + return; + + if (kvm_vcpu_is_blocking(vcpu)) + kvm_riscv_vcpu_timer_blocking(vcpu); +} + void kvm_riscv_guest_timer_init(struct kvm *kvm) { struct kvm_guest_timer *gt = &kvm->arch.timer; -- cgit v1.2.3