From 278294798ac9118412c9624a801d3f20f2279363 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Mon, 26 Sep 2022 14:57:10 -0700 Subject: PCI: Allow drivers to request exclusive config regions PCI config space access from user space has traditionally been unrestricted with writes being an understood risk for device operation. Unfortunately, device breakage or odd behavior from config writes lacks indicators that can leave driver writers confused when evaluating failures. This is especially true with the new PCIe Data Object Exchange (DOE) mailbox protocol where backdoor shenanigans from user space through things such as vendor defined protocols may affect device operation without complete breakage. A prior proposal restricted read and writes completely.[1] Greg and Bjorn pointed out that proposal is flawed for a couple of reasons. First, lspci should always be allowed and should not interfere with any device operation. Second, setpci is a valuable tool that is sometimes necessary and it should not be completely restricted.[2] Finally methods exist for full lock of device access if required. Even though access should not be restricted it would be nice for driver writers to be able to flag critical parts of the config space such that interference from user space can be detected. Introduce pci_request_config_region_exclusive() to mark exclusive config regions. Such regions trigger a warning and kernel taint if accessed via user space. Create pci_warn_once() to restrict the user from spamming the log. [1] https://lore.kernel.org/all/161663543465.1867664.5674061943008380442.stgit@dwillia2-desk3.amr.corp.intel.com/ [2] https://lore.kernel.org/all/YF8NGeGv9vYcMfTV@kroah.com/ Cc: Bjorn Helgaas Cc: Greg Kroah-Hartman Reviewed-by: Jonathan Cameron Suggested-by: Dan Williams Signed-off-by: Ira Weiny Acked-by: Greg Kroah-Hartman Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/20220926215711.2893286-2-ira.weiny@intel.com Signed-off-by: Dan Williams --- drivers/pci/pci-sysfs.c | 7 +++++++ drivers/pci/probe.c | 6 ++++++ include/linux/ioport.h | 2 ++ include/linux/pci.h | 17 +++++++++++++++++ kernel/resource.c | 13 ++++++++----- 5 files changed, 40 insertions(+), 5 deletions(-) diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 0a2eeb82cebd..6c250eb214e8 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -756,6 +756,13 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, if (ret) return ret; + if (resource_is_exclusive(&dev->driver_exclusive_resource, off, + count)) { + pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx", + current->comm, off); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } + if (off > dev->cfg_size) return 0; if (off + count > dev->cfg_size) { diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index b66fa42c4b1f..2f4e88a44e8b 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2307,6 +2307,12 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus) INIT_LIST_HEAD(&dev->bus_list); dev->dev.type = &pci_dev_type; dev->bus = pci_bus_get(bus); + dev->driver_exclusive_resource = (struct resource) { + .name = "PCI Exclusive", + .start = 0, + .end = -1, + }; + #ifdef CONFIG_PCI_MSI raw_spin_lock_init(&dev->msi_lock); #endif diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 27642ca15d93..4ae3c541ea6f 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -318,6 +318,8 @@ extern void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n); extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); extern bool iomem_is_exclusive(u64 addr); +extern bool resource_is_exclusive(struct resource *resource, u64 addr, + resource_size_t size); extern int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, diff --git a/include/linux/pci.h b/include/linux/pci.h index 2bda4a4e47e8..575849a100a3 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -409,6 +409,7 @@ struct pci_dev { */ unsigned int irq; struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ + struct resource driver_exclusive_resource; /* driver exclusive resource ranges */ bool match_driver; /* Skip attaching driver */ @@ -1407,6 +1408,21 @@ int pci_request_selected_regions(struct pci_dev *, int, const char *); int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); void pci_release_selected_regions(struct pci_dev *, int); +static inline __must_check struct resource * +pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset, + unsigned int len, const char *name) +{ + return __request_region(&pdev->driver_exclusive_resource, offset, len, + name, IORESOURCE_EXCLUSIVE); +} + +static inline void pci_release_config_region(struct pci_dev *pdev, + unsigned int offset, + unsigned int len) +{ + __release_region(&pdev->driver_exclusive_resource, offset, len); +} + /* drivers/pci/bus.c */ void pci_add_resource(struct list_head *resources, struct resource *res); void pci_add_resource_offset(struct list_head *resources, struct resource *res, @@ -2484,6 +2500,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg) #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) diff --git a/kernel/resource.c b/kernel/resource.c index 4c5e80b92f2f..82ed54cd1f0d 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -1707,18 +1707,15 @@ static int strict_iomem_checks; * * Returns true if exclusive to the kernel, otherwise returns false. */ -bool iomem_is_exclusive(u64 addr) +bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size) { const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM | IORESOURCE_EXCLUSIVE; bool skip_children = false, err = false; - int size = PAGE_SIZE; struct resource *p; - addr = addr & PAGE_MASK; - read_lock(&resource_lock); - for_each_resource(&iomem_resource, p, skip_children) { + for_each_resource(root, p, skip_children) { if (p->start >= addr + size) break; if (p->end < addr) { @@ -1757,6 +1754,12 @@ bool iomem_is_exclusive(u64 addr) return err; } +bool iomem_is_exclusive(u64 addr) +{ + return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK, + PAGE_SIZE); +} + struct resource_entry *resource_list_create_entry(struct resource *res, size_t extra_size) { -- cgit v1.2.3 From 487d828d751d90cf9ca594f45b02dd0e0d712b64 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Mon, 26 Sep 2022 14:57:11 -0700 Subject: cxl/doe: Request exclusive DOE access The PCIE Data Object Exchange (DOE) mailbox is a protocol run over configuration cycles. It assumes one initiator at a time. While the kernel has control of the mailbox user space writes could interfere with the kernel access. Mark DOE mailbox config space exclusive when iterated by the CXL driver. Signed-off-by: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/20220926215711.2893286-3-ira.weiny@intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 5 +++++ include/uapi/linux/pci_regs.h | 1 + 2 files changed, 6 insertions(+) diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index faeb5d9d7a7a..621a0522b554 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -418,6 +418,11 @@ static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds) continue; } + if (!pci_request_config_region_exclusive(pdev, off, + PCI_DOE_CAP_SIZEOF, + dev_name(dev))) + pci_err(pdev, "Failed to exclude DOE registers\n"); + if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) { dev_err(dev, "xa_insert failed to insert MB @ %x\n", off); diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 57b8e2ffb1dd..82a03ea954af 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -1119,6 +1119,7 @@ #define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */ #define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */ #define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */ +#define PCI_DOE_CAP_SIZEOF 0x18 /* Size of DOE register block */ /* DOE Data Object - note not actually registers */ #define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff -- cgit v1.2.3 From 1156b4418db01b1d5a332bc399817d029acd2ec8 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Fri, 28 Oct 2022 11:34:04 -0700 Subject: memregion: Add cpu_cache_invalidate_memregion() interface With CXL security features, and CXL dynamic provisioning, global CPU cache flushing nvdimm requirements are no longer specific to that subsystem, even beyond the scope of security_ops. CXL will need such semantics for features not necessarily limited to persistent memory. The functionality this is enabling is to be able to instantaneously secure erase potentially terabytes of memory at once and the kernel needs to be sure that none of the data from before the erase is still present in the cache. It is also used when unlocking a memory device where speculative reads and firmware accesses could have cached poison from before the device was unlocked. Lastly this facility is used when mapping new devices, or new capacity into an established physical address range. I.e. when the driver switches DeviceA mapping AddressX to DeviceB mapping AddressX then any cached data from DeviceA:AddressX needs to be invalidated. This capability is typically only used once per-boot (for unlock), or once per bare metal provisioning event (secure erase), like when handing off the system to another tenant or decommissioning a device. It may also be used for dynamic CXL region provisioning. Users must first call cpu_cache_has_invalidate_memregion() to know whether this functionality is available on the architecture. On x86 this respects the constraints of when wbinvd() is tolerable. It is already the case that wbinvd() is problematic to allow in VMs due its global performance impact and KVM, for example, has been known to just trap and ignore the call. With confidential computing guest execution of wbinvd() may even trigger an exception. Given guests should not be messing with the bare metal address map via CXL configuration changes cpu_cache_has_invalidate_memregion() returns false in VMs. While this global cache invalidation facility, is exported to modules, since NVDIMM and CXL support can be built as a module, it is not for general use. The intent is that this facility is not available outside of specific "device-memory" use cases. To make that expectation as clear as possible the API is scoped to a new "DEVMEM" module namespace that only the NVDIMM and CXL subsystems are expected to import. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: x86@kernel.org Cc: "H. Peter Anvin" Cc: Andy Lutomirski Cc: Peter Zijlstra Tested-by: Dave Jiang Signed-off-by: Davidlohr Bueso Acked-by: Dave Hansen Co-developed-by: Dan Williams Signed-off-by: Dan Williams --- arch/x86/Kconfig | 1 + arch/x86/mm/pat/set_memory.c | 18 ++++++++++++++++++ drivers/acpi/nfit/intel.c | 43 ++++++++++++++++++++----------------------- include/linux/memregion.h | 38 ++++++++++++++++++++++++++++++++++++++ lib/Kconfig | 3 +++ 5 files changed, 80 insertions(+), 23 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 67745ceab0db..e16b2b15d67e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -69,6 +69,7 @@ config X86 select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_CACHE_LINE_SIZE + select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 2e5a045731de..ef34ba21aa92 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -330,6 +331,23 @@ void arch_invalidate_pmem(void *addr, size_t size) EXPORT_SYMBOL_GPL(arch_invalidate_pmem); #endif +#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION +bool cpu_cache_has_invalidate_memregion(void) +{ + return !cpu_feature_enabled(X86_FEATURE_HYPERVISOR); +} +EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, DEVMEM); + +int cpu_cache_invalidate_memregion(int res_desc) +{ + if (WARN_ON_ONCE(!cpu_cache_has_invalidate_memregion())) + return -ENXIO; + wbinvd_on_all_cpus(); + return 0; +} +EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, DEVMEM); +#endif + static void __cpa_flush_all(void *arg) { unsigned long cache = (unsigned long)arg; diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 8dd792a55730..fa0e57e35162 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "intel.h" #include "nfit.h" @@ -190,8 +191,6 @@ static int intel_security_change_key(struct nvdimm *nvdimm, } } -static void nvdimm_invalidate_cache(void); - static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data) { @@ -213,6 +212,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask)) return -ENOTTY; + if (!cpu_cache_has_invalidate_memregion()) + return -EINVAL; + memcpy(nd_cmd.cmd.passphrase, key_data->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -228,7 +230,7 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, } /* DIMM unlocked, invalidate all CPU caches before we read it */ - nvdimm_invalidate_cache(); + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); return 0; } @@ -297,8 +299,11 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, if (!test_bit(cmd, &nfit_mem->dsm_mask)) return -ENOTTY; + if (!cpu_cache_has_invalidate_memregion()) + return -EINVAL; + /* flush all cache before we erase DIMM */ - nvdimm_invalidate_cache(); + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); memcpy(nd_cmd.cmd.passphrase, key->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -318,7 +323,7 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, } /* DIMM erased, invalidate all CPU caches before we read it */ - nvdimm_invalidate_cache(); + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); return 0; } @@ -341,6 +346,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask)) return -ENOTTY; + if (!cpu_cache_has_invalidate_memregion()) + return -EINVAL; + rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; @@ -355,7 +363,7 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) } /* flush all cache before we make the nvdimms available */ - nvdimm_invalidate_cache(); + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); return 0; } @@ -380,8 +388,11 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask)) return -ENOTTY; + if (!cpu_cache_has_invalidate_memregion()) + return -EINVAL; + /* flush all cache before we erase DIMM */ - nvdimm_invalidate_cache(); + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); memcpy(nd_cmd.cmd.passphrase, nkey->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -401,22 +412,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, } } -/* - * TODO: define a cross arch wbinvd equivalent when/if - * NVDIMM_FAMILY_INTEL command support arrives on another arch. - */ -#ifdef CONFIG_X86 -static void nvdimm_invalidate_cache(void) -{ - wbinvd_on_all_cpus(); -} -#else -static void nvdimm_invalidate_cache(void) -{ - WARN_ON_ONCE("cache invalidation required after unlock\n"); -} -#endif - static const struct nvdimm_security_ops __intel_security_ops = { .get_flags = intel_security_flags, .freeze = intel_security_freeze, @@ -775,3 +770,5 @@ static const struct nvdimm_fw_ops __intel_fw_ops = { }; const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops; + +MODULE_IMPORT_NS(DEVMEM); diff --git a/include/linux/memregion.h b/include/linux/memregion.h index c04c4fd2e209..bf83363807ac 100644 --- a/include/linux/memregion.h +++ b/include/linux/memregion.h @@ -3,6 +3,7 @@ #define _MEMREGION_H_ #include #include +#include struct memregion_info { int target_node; @@ -20,4 +21,41 @@ static inline void memregion_free(int id) { } #endif + +/** + * cpu_cache_invalidate_memregion - drop any CPU cached data for + * memregions described by @res_desc + * @res_desc: one of the IORES_DESC_* types + * + * Perform cache maintenance after a memory event / operation that + * changes the contents of physical memory in a cache-incoherent manner. + * For example, device memory technologies like NVDIMM and CXL have + * device secure erase, and dynamic region provision that can replace + * the memory mapped to a given physical address. + * + * Limit the functionality to architectures that have an efficient way + * to writeback and invalidate potentially terabytes of address space at + * once. Note that this routine may or may not write back any dirty + * contents while performing the invalidation. It is only exported for + * the explicit usage of the NVDIMM and CXL modules in the 'DEVMEM' + * symbol namespace on bare platforms. + * + * Returns 0 on success or negative error code on a failure to perform + * the cache maintenance. + */ +#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION +int cpu_cache_invalidate_memregion(int res_desc); +bool cpu_cache_has_invalidate_memregion(void); +#else +static inline bool cpu_cache_has_invalidate_memregion(void) +{ + return false; +} + +static inline int cpu_cache_invalidate_memregion(int res_desc) +{ + WARN_ON_ONCE("CPU cache invalidation required"); + return -ENXIO; +} +#endif #endif /* _MEMREGION_H_ */ diff --git a/lib/Kconfig b/lib/Kconfig index 9bbf8a4b2108..9eb514abcdec 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -672,6 +672,9 @@ config ARCH_HAS_PMEM_API config MEMREGION bool +config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION + bool + config ARCH_HAS_MEMREMAP_COMPAT_ALIGN bool -- cgit v1.2.3 From fa89248e669d589cdb895517e75cdead8d8ba5c0 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 18 Oct 2022 15:23:29 +0200 Subject: cxl/core: Remove duplicate declaration of devm_cxl_iomap_block() The function devm_cxl_iomap_block() is only used in the core code. There are two declarations in header files of it, in drivers/cxl/core/core.h and drivers/cxl/cxl.h. Remove its unused declaration in drivers/cxl/cxl.h. Fixing build error in regs.c found by kernel test robot by including "core.h" there. Signed-off-by: Robert Richter Reported-by: kernel test robot Reviewed-by: Jonathan Cameron Reviewed-by: Davidlohr Bueso Reviewed-by: Dan Williams Link: https://lore.kernel.org/r/20221018132341.76259-2-rrichter@amd.com Signed-off-by: Dan Williams --- drivers/cxl/core/regs.c | 2 ++ drivers/cxl/cxl.h | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 39a129c57d40..6522931df3f7 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -7,6 +7,8 @@ #include #include +#include "core.h" + /** * DOC: cxl registers * diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index ac75554b5d76..a3a4f18b25f2 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -218,8 +218,6 @@ int cxl_map_device_regs(struct pci_dev *pdev, enum cxl_regloc_type; int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, struct cxl_register_map *map); -void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, - resource_size_t length); #define CXL_RESOURCE_NONE ((resource_size_t) -1) #define CXL_TARGET_STRLEN 20 -- cgit v1.2.3 From 3bb80da51b1c1dbf31af2226f57cbc258f5e994b Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 18 Oct 2022 15:23:30 +0200 Subject: cxl/core: Check physical address before mapping it in devm_cxl_iomap_block() The physical base address of a CXL range can be invalid and is then set to CXL_RESOURCE_NONE. In general software shall prevent such situations, but it is hard to proof this may never happen. E.g. in add_port_attach_ep() there this the following: component_reg_phys = find_component_registers(uport_dev); port = devm_cxl_add_port(&parent_port->dev, uport_dev, component_reg_phys, parent_dport); find_component_registers() and subsequent functions (e.g. cxl_regmap_to_base()) may return CXL_RESOURCE_NONE. But it is written to port without any further check in cxl_port_alloc(): port->component_reg_phys = component_reg_phys; It is then later directly used in devm_cxl_setup_hdm() to map io ranges with devm_cxl_iomap_block(). Just an example... Check this condition. Also do not fail silently like an ioremap() failure, use a WARN_ON_ONCE() for it. Signed-off-by: Robert Richter Link: https://lore.kernel.org/r/20221018132341.76259-3-rrichter@amd.com Signed-off-by: Dan Williams --- drivers/cxl/core/regs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 6522931df3f7..ec178e69b18f 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -167,6 +167,9 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, void __iomem *ret_val; struct resource *res; + if (WARN_ON_ONCE(addr == CXL_RESOURCE_NONE)) + return NULL; + res = devm_request_mem_region(dev, addr, length, dev_name(dev)); if (!res) { resource_size_t end = addr + length - 1; -- cgit v1.2.3 From f3cd264c4ec1ab9b8918f3b083cfc13c5e7c26b7 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 18 Oct 2022 15:23:31 +0200 Subject: cxl: Unify debug messages when calling devm_cxl_add_port() CXL ports are added in a couple of code paths using devm_cxl_add_port(). Debug messages are individually generated, but are incomplete and inconsistent. Change this by moving its generation to devm_cxl_add_port(). This unifies the messages and reduces code duplication. Also, generate messages on failure. Use a __devm_cxl_add_port() wrapper to keep the readability of the error exits. Signed-off-by: Robert Richter Link: https://lore.kernel.org/r/20221018132341.76259-4-rrichter@amd.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 2 -- drivers/cxl/core/port.c | 51 +++++++++++++++++++++++++++++++++++++------------ 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index fb649683dd3a..767a91f44221 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -220,7 +220,6 @@ static int add_host_bridge_uport(struct device *match, void *arg) port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport); if (IS_ERR(port)) return PTR_ERR(port); - dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev)); return 0; } @@ -466,7 +465,6 @@ static int cxl_acpi_probe(struct platform_device *pdev) root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); if (IS_ERR(root_port)) return PTR_ERR(root_port); - dev_dbg(host, "add: %s\n", dev_name(&root_port->dev)); rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, add_host_bridge_dport); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index e7556864ea80..93560d749aed 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -655,16 +655,10 @@ err: return ERR_PTR(rc); } -/** - * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy - * @host: host device for devm operations - * @uport: "physical" device implementing this upstream port - * @component_reg_phys: (optional) for configurable cxl_port instances - * @parent_dport: next hop up in the CXL memory decode hierarchy - */ -struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, - resource_size_t component_reg_phys, - struct cxl_dport *parent_dport) +static struct cxl_port *__devm_cxl_add_port(struct device *host, + struct device *uport, + resource_size_t component_reg_phys, + struct cxl_dport *parent_dport) { struct cxl_port *port; struct device *dev; @@ -702,6 +696,41 @@ err: put_device(dev); return ERR_PTR(rc); } + +/** + * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy + * @host: host device for devm operations + * @uport: "physical" device implementing this upstream port + * @component_reg_phys: (optional) for configurable cxl_port instances + * @parent_dport: next hop up in the CXL memory decode hierarchy + */ +struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, + resource_size_t component_reg_phys, + struct cxl_dport *parent_dport) +{ + struct cxl_port *port, *parent_port; + + port = __devm_cxl_add_port(host, uport, component_reg_phys, + parent_dport); + + parent_port = parent_dport ? parent_dport->port : NULL; + if (IS_ERR(port)) { + dev_dbg(uport, "Failed to add %s%s%s%s: %ld\n", + dev_name(&port->dev), + parent_port ? " to " : "", + parent_port ? dev_name(&parent_port->dev) : "", + parent_port ? "" : " (root port)", + PTR_ERR(port)); + } else { + dev_dbg(uport, "%s added%s%s%s\n", + dev_name(&port->dev), + parent_port ? " to " : "", + parent_port ? dev_name(&parent_port->dev) : "", + parent_port ? "" : " (root port)"); + } + + return port; +} EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) @@ -1147,8 +1176,6 @@ int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd, if (IS_ERR(endpoint)) return PTR_ERR(endpoint); - dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev)); - rc = cxl_endpoint_autoremove(cxlmd, endpoint); if (rc) return rc; -- cgit v1.2.3 From 58eef878fcd1f44612445b5cdde598f04b940da8 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 18 Oct 2022 15:23:32 +0200 Subject: cxl: Unify debug messages when calling devm_cxl_add_dport() CXL dports are added in a couple of code paths using devm_cxl_add_dport(). Debug messages are individually generated, but are incomplete and inconsistent. Change this by moving its generation to devm_cxl_add_dport(). This unifies the messages and reduces code duplication. Also, generate messages on failure. Use a __devm_cxl_add_dport() wrapper to keep the readability of the error exits. Signed-off-by: Robert Richter Link: https://lore.kernel.org/r/20221018132341.76259-5-rrichter@amd.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 7 ++----- drivers/cxl/core/pci.c | 2 -- drivers/cxl/core/port.c | 48 +++++++++++++++++++++++++++++++------------- tools/testing/cxl/test/cxl.c | 9 +-------- 4 files changed, 37 insertions(+), 29 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 767a91f44221..31e104f0210f 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -282,12 +282,9 @@ static int add_host_bridge_dport(struct device *match, void *arg) } dport = devm_cxl_add_dport(root_port, match, uid, ctx.chbcr); - if (IS_ERR(dport)) { - dev_err(host, "failed to add downstream port: %s\n", - dev_name(match)); + if (IS_ERR(dport)) return PTR_ERR(dport); - } - dev_dbg(host, "add dport%llu: %s\n", uid, dev_name(match)); + return 0; } diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 9240df53ed87..0dbbe8d39b07 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -62,8 +62,6 @@ static int match_add_dports(struct pci_dev *pdev, void *data) } ctx->count++; - dev_dbg(&port->dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev)); - return 0; } diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 93560d749aed..0d2f5eaaca7d 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -899,20 +899,10 @@ static void cxl_dport_unlink(void *data) sysfs_remove_link(&port->dev.kobj, link_name); } -/** - * devm_cxl_add_dport - append downstream port data to a cxl_port - * @port: the cxl_port that references this dport - * @dport_dev: firmware or PCI device representing the dport - * @port_id: identifier for this dport in a decoder's target list - * @component_reg_phys: optional location of CXL component registers - * - * Note that dports are appended to the devm release action's of the - * either the port's host (for root ports), or the port itself (for - * switch ports) - */ -struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, - struct device *dport_dev, int port_id, - resource_size_t component_reg_phys) +static struct cxl_dport *__devm_cxl_add_dport(struct cxl_port *port, + struct device *dport_dev, + int port_id, + resource_size_t component_reg_phys) { char link_name[CXL_TARGET_STRLEN]; struct cxl_dport *dport; @@ -964,6 +954,36 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, return dport; } + +/** + * devm_cxl_add_dport - append downstream port data to a cxl_port + * @port: the cxl_port that references this dport + * @dport_dev: firmware or PCI device representing the dport + * @port_id: identifier for this dport in a decoder's target list + * @component_reg_phys: optional location of CXL component registers + * + * Note that dports are appended to the devm release action's of the + * either the port's host (for root ports), or the port itself (for + * switch ports) + */ +struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, + struct device *dport_dev, int port_id, + resource_size_t component_reg_phys) +{ + struct cxl_dport *dport; + + dport = __devm_cxl_add_dport(port, dport_dev, port_id, + component_reg_phys); + if (IS_ERR(dport)) { + dev_dbg(dport_dev, "failed to add dport to %s: %ld\n", + dev_name(&port->dev), PTR_ERR(dport)); + } else { + dev_dbg(dport_dev, "dport added to %s\n", + dev_name(&port->dev)); + } + + return dport; +} EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); static int add_ep(struct cxl_ep *new) diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 7edce12fd2ce..a5146d80ecc4 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -634,7 +634,6 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) static int mock_cxl_port_enumerate_dports(struct cxl_port *port) { - struct device *dev = &port->dev; struct platform_device **array; int i, array_size; @@ -684,14 +683,8 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port) dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id, CXL_RESOURCE_NONE); - if (IS_ERR(dport)) { - dev_err(dev, "failed to add dport: %s (%ld)\n", - dev_name(&pdev->dev), PTR_ERR(dport)); + if (IS_ERR(dport)) return PTR_ERR(dport); - } - - dev_dbg(dev, "add dport%d: %s\n", pdev->id, - dev_name(&pdev->dev)); } return 0; -- cgit v1.2.3 From 76f191108419424fef1bb713d8d160c3b49bedc4 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 14 Nov 2022 10:29:52 -0800 Subject: tools/testing/cxl: Add bridge mocking support In preparation for cxl_acpi walking pci_root->bus->bridge, add that association to the mock pci_root instances. Note that the missing 3rd entry in mock_pci_root[] was not noticed until now given that the test version of to_cxl_host_bridge() (tools/testing/cxl/mock_acpi.c), obviated the need for that entry. However, "cxl/acpi: Improve debug messages in cxl_acpi_probe()" [1] needs pci_root->bus->bridge to be populated. Link: https://lore.kernel.org/r/20221018132341.76259-6-rrichter@amd.com [1] Cc: Robert Richter Signed-off-by: Dan Williams --- tools/testing/cxl/test/cxl.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index a5146d80ecc4..facfcd11cb67 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -439,14 +439,18 @@ mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, return AE_OK; } -static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES]; -static struct acpi_pci_root mock_pci_root[NR_CXL_HOST_BRIDGES] = { +static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST]; +static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = { [0] = { .bus = &mock_pci_bus[0], }, [1] = { .bus = &mock_pci_bus[1], }, + [2] = { + .bus = &mock_pci_bus[2], + }, + }; static bool is_mock_bus(struct pci_bus *bus) @@ -744,6 +748,7 @@ static __init int cxl_single_init(void) } cxl_hb_single[i] = pdev; + mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev; rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, "physical_node"); if (rc) @@ -910,6 +915,7 @@ static __init int cxl_test_init(void) } cxl_host_bridge[i] = pdev; + mock_pci_bus[i].bridge = &pdev->dev; rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, "physical_node"); if (rc) -- cgit v1.2.3 From b51d76752120a267397276121619fca80f767e62 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 18 Oct 2022 15:23:33 +0200 Subject: cxl/acpi: Improve debug messages in cxl_acpi_probe() In cxl_acpi_probe() the iterator bus_for_each_dev() walks through all CXL hosts. Since all dev_*() debug messages point to the ACPI0017 device which is the CXL root for all hosts, the device information is pointless as it is always the same device. Change this to use the host device for this instead. Also, add additional host specific information such as CXL support, UID and CHBCR. This is an example log: acpi ACPI0016:00: UID found: 4 acpi ACPI0016:00: CHBCR found: 0x28090000000 acpi ACPI0016:00: dport added to root0 acpi ACPI0016:00: host-bridge: ACPI0016:00 pci0000:7f: host supports CXL Signed-off-by: Robert Richter Link: https://lore.kernel.org/r/20221018132341.76259-6-rrichter@amd.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 31e104f0210f..fb9f72813067 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -221,6 +221,8 @@ static int add_host_bridge_uport(struct device *match, void *arg) if (IS_ERR(port)) return PTR_ERR(port); + dev_info(pci_root->bus->bridge, "host supports CXL\n"); + return 0; } @@ -264,11 +266,12 @@ static int add_host_bridge_dport(struct device *match, void *arg) status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL, &uid); if (status != AE_OK) { - dev_err(host, "unable to retrieve _UID of %s\n", - dev_name(match)); + dev_err(match, "unable to retrieve _UID\n"); return -ENODEV; } + dev_dbg(match, "UID found: %lld\n", uid); + ctx = (struct cxl_chbs_context) { .dev = host, .uid = uid, @@ -276,11 +279,12 @@ static int add_host_bridge_dport(struct device *match, void *arg) acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx); if (ctx.chbcr == 0) { - dev_warn(host, "No CHBS found for Host Bridge: %s\n", - dev_name(match)); + dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n", uid); return 0; } + dev_dbg(match, "CHBCR found: 0x%08llx\n", (u64)ctx.chbcr); + dport = devm_cxl_add_dport(root_port, match, uid, ctx.chbcr); if (IS_ERR(dport)) return PTR_ERR(dport); -- cgit v1.2.3 From 3b39fd6cf12ceda2a2582dcb9b9ee9f4d197b857 Mon Sep 17 00:00:00 2001 From: Adam Manzanares Date: Mon, 29 Aug 2022 22:03:14 +0000 Subject: cxl: Replace HDM decoder granularity magic numbers When reviewing the CFMWS parsing code that deals with the HDM decoders, I noticed a couple of magic numbers. This commit replaces these magic numbers with constants defined by the CXL 3.0 specification. v2: - Change references to CXL 3.0 specification (David) - CXL_DECODER_MAX_GRANULARITY_ORDER -> CXL_DECODER_MAX_ENCODED_IG (Dan) Signed-off-by: Adam Manzanares Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/20220829220249.243888-1-a.manzanares@samsung.com Signed-off-by: Dan Williams --- drivers/cxl/cxl.h | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index a3a4f18b25f2..7d07127eade3 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -61,6 +61,10 @@ #define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i) #define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i) +/* HDM decoder control register constants CXL 3.0 8.2.5.19.7 */ +#define CXL_DECODER_MIN_GRANULARITY 256 +#define CXL_DECODER_MAX_ENCODED_IG 6 + static inline int cxl_hdm_decoder_count(u32 cap_hdr) { int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr); @@ -71,9 +75,9 @@ static inline int cxl_hdm_decoder_count(u32 cap_hdr) /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */ static inline int cxl_to_granularity(u16 ig, unsigned int *val) { - if (ig > 6) + if (ig > CXL_DECODER_MAX_ENCODED_IG) return -EINVAL; - *val = 256 << ig; + *val = CXL_DECODER_MIN_GRANULARITY << ig; return 0; } @@ -96,7 +100,7 @@ static inline int cxl_to_ways(u8 eniw, unsigned int *val) static inline int granularity_to_cxl(int g, u16 *ig) { - if (g > SZ_16K || g < 256 || !is_power_of_2(g)) + if (g > SZ_16K || g < CXL_DECODER_MIN_GRANULARITY || !is_power_of_2(g)) return -EINVAL; *ig = ilog2(g) - 8; return 0; @@ -246,7 +250,6 @@ enum cxl_decoder_type { */ #define CXL_DECODER_MAX_INTERLEAVE 16 -#define CXL_DECODER_MIN_GRANULARITY 256 /** * struct cxl_decoder - Common CXL HDM Decoder Attributes -- cgit v1.2.3 From 328281155539b44539e12d62803e09310d86d11f Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:21:36 -0700 Subject: cxl/pmem: Introduce nvdimm_security_ops with ->get_flags() operation Add nvdimm_security_ops support for CXL memory device with the introduction of the ->get_flags() callback function. This is part of the "Persistent Memory Data-at-rest Security" command set for CXL memory device support. The ->get_flags() function provides the security state of the persistent memory device defined by the CXL 3.0 spec section 8.2.9.8.6.1. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983609611.2734609.13231854299523325319.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/Makefile | 2 +- drivers/cxl/core/mbox.c | 1 + drivers/cxl/cxlmem.h | 8 +++++++ drivers/cxl/pmem.c | 6 +++-- drivers/cxl/security.c | 56 ++++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/cxl_mem.h | 1 + tools/testing/cxl/Kbuild | 1 + 7 files changed, 72 insertions(+), 3 deletions(-) create mode 100644 drivers/cxl/security.c diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile index a78270794150..db321f48ba52 100644 --- a/drivers/cxl/Makefile +++ b/drivers/cxl/Makefile @@ -9,5 +9,5 @@ obj-$(CONFIG_CXL_PORT) += cxl_port.o cxl_mem-y := mem.o cxl_pci-y := pci.o cxl_acpi-y := acpi.o -cxl_pmem-y := pmem.o +cxl_pmem-y := pmem.o security.o cxl_port-y := port.o diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 0c90f13870a4..6907ee1f43e0 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -65,6 +65,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), + CXL_CMD(GET_SECURITY_STATE, 0, 0x4, 0), }; /* diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 88e3a8e54b6a..25d1d8fa7d1e 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -273,6 +273,7 @@ enum cxl_opcode { CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303, CXL_MBOX_OP_SCAN_MEDIA = 0x4304, CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, + CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500, CXL_MBOX_OP_MAX = 0x10000 }; @@ -372,6 +373,13 @@ struct cxl_mem_command { #define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) }; +#define CXL_PMEM_SEC_STATE_USER_PASS_SET 0x01 +#define CXL_PMEM_SEC_STATE_MASTER_PASS_SET 0x02 +#define CXL_PMEM_SEC_STATE_LOCKED 0x04 +#define CXL_PMEM_SEC_STATE_FROZEN 0x08 +#define CXL_PMEM_SEC_STATE_USER_PLIMIT 0x10 +#define CXL_PMEM_SEC_STATE_MASTER_PLIMIT 0x20 + int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, size_t in_size, void *out, size_t out_size); int cxl_dev_state_identify(struct cxl_dev_state *cxlds); diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 4c627d67281a..efffc731c2ec 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -11,6 +11,8 @@ #include "cxlmem.h" #include "cxl.h" +extern const struct nvdimm_security_ops *cxl_security_ops; + /* * Ordered workqueue for cxl nvdimm device arrival and departure * to coordinate bus rescans when a bridge arrives and trigger remove @@ -78,8 +80,8 @@ static int cxl_nvdimm_probe(struct device *dev) set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); - nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, - cmd_mask, 0, NULL); + nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, + cmd_mask, 0, NULL, NULL, cxl_security_ops, NULL); if (!nvdimm) { rc = -ENOMEM; goto out; diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c new file mode 100644 index 000000000000..806173084216 --- /dev/null +++ b/drivers/cxl/security.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include +#include "cxlmem.h" +#include "cxl.h" + +static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, + enum nvdimm_passphrase_type ptype) +{ + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); + struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + unsigned long security_flags = 0; + u32 sec_out; + int rc; + + rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SECURITY_STATE, NULL, 0, + &sec_out, sizeof(sec_out)); + if (rc < 0) + return 0; + + if (ptype == NVDIMM_MASTER) { + if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) + set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); + else + set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); + if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) + set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); + return security_flags; + } + + if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) { + if (sec_out & CXL_PMEM_SEC_STATE_FROZEN || + sec_out & CXL_PMEM_SEC_STATE_USER_PLIMIT) + set_bit(NVDIMM_SECURITY_FROZEN, &security_flags); + + if (sec_out & CXL_PMEM_SEC_STATE_LOCKED) + set_bit(NVDIMM_SECURITY_LOCKED, &security_flags); + else + set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); + } else { + set_bit(NVDIMM_SECURITY_DISABLED, &security_flags); + } + + return security_flags; +} + +static const struct nvdimm_security_ops __cxl_security_ops = { + .get_flags = cxl_pmem_get_security_flags, +}; + +const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index c71021a2a9ed..cdc6049683ce 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -41,6 +41,7 @@ ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \ ___C(SCAN_MEDIA, "Scan Media"), \ ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \ + ___C(GET_SECURITY_STATE, "Get Security State"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild index 500be85729cc..e4048a05b6ab 100644 --- a/tools/testing/cxl/Kbuild +++ b/tools/testing/cxl/Kbuild @@ -26,6 +26,7 @@ cxl_acpi-y += config_check.o obj-m += cxl_pmem.o cxl_pmem-y := $(CXL_SRC)/pmem.o +cxl_pmem-y += $(CXL_SRC)/security.o cxl_pmem-y += config_check.o obj-m += cxl_port.o -- cgit v1.2.3 From 1fa99be35ab3591fbee99b652c10ac8af131d6ee Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:21:41 -0700 Subject: tools/testing/cxl: Add "Get Security State" opcode support Add the emulation support for handling "Get Security State" opcode for a CXL memory device for the cxl_test. The function will copy back device security state bitmask to the output payload. The security state data is added as platform_data for the mock mem device. Reviewed-by: Davidlohr Bueso Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983610177.2734609.4953959949148428755.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 44 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index aa2df3a15051..d67fc04bf0cf 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -65,6 +65,11 @@ static struct { }, }; +struct cxl_mockmem_data { + void *lsa; + u32 security_state; +}; + static int mock_gsl(struct cxl_mbox_cmd *cmd) { if (cmd->size_out < sizeof(mock_gsl_payload)) @@ -137,10 +142,27 @@ static int mock_partition_info(struct cxl_dev_state *cxlds, return 0; } +static int mock_get_security_state(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *cmd) +{ + struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); + + if (cmd->size_in) + return -EINVAL; + + if (cmd->size_out != sizeof(u32)) + return -EINVAL; + + memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32)); + + return 0; +} + static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; - void *lsa = dev_get_drvdata(cxlds->dev); + struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); + void *lsa = mdata->lsa; u32 offset, length; if (sizeof(*get_lsa) > cmd->size_in) @@ -159,7 +181,8 @@ static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in; - void *lsa = dev_get_drvdata(cxlds->dev); + struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); + void *lsa = mdata->lsa; u32 offset, length; if (sizeof(*set_lsa) > cmd->size_in) @@ -230,6 +253,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_GET_HEALTH_INFO: rc = mock_health_info(cxlds, cmd); break; + case CXL_MBOX_OP_GET_SECURITY_STATE: + rc = mock_get_security_state(cxlds, cmd); + break; default: break; } @@ -250,16 +276,20 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct cxl_memdev *cxlmd; struct cxl_dev_state *cxlds; - void *lsa; + struct cxl_mockmem_data *mdata; int rc; - lsa = vmalloc(LSA_SIZE); - if (!lsa) + mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL); + if (!mdata) + return -ENOMEM; + dev_set_drvdata(dev, mdata); + + mdata->lsa = vmalloc(LSA_SIZE); + if (!mdata->lsa) return -ENOMEM; - rc = devm_add_action_or_reset(dev, label_area_release, lsa); + rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa); if (rc) return rc; - dev_set_drvdata(dev, lsa); cxlds = cxl_dev_state_create(dev); if (IS_ERR(cxlds)) -- cgit v1.2.3 From 997469407f266250040f20ec73aecc77ad277145 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:21:47 -0700 Subject: cxl/pmem: Add "Set Passphrase" security command support Create callback function to support the nvdimm_security_ops ->change_key() callback. Translate the operation to send "Set Passphrase" security command for CXL memory device. The operation supports setting a passphrase for the CXL persistent memory device. It also supports the changing of the currently set passphrase. The operation allows manipulation of a user passphrase or a master passphrase. See CXL rev3.0 spec section 8.2.9.8.6.2 for reference. However, the spec leaves a gap WRT master passphrase usages. The spec does not define any ways to retrieve the status of if the support of master passphrase is available for the device, nor does the commands that utilize master passphrase will return a specific error that indicates master passphrase is not supported. If using a device does not support master passphrase and a command is issued with a master passphrase, the error message returned by the device will be ambiguous. Reviewed-by: Davidlohr Bueso Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983610751.2734609.4445075071552032091.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 1 + drivers/cxl/cxlmem.h | 15 +++++++++++++++ drivers/cxl/security.c | 22 ++++++++++++++++++++++ include/uapi/linux/cxl_mem.h | 1 + 4 files changed, 39 insertions(+) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 6907ee1f43e0..2fdafa697e6a 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -66,6 +66,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_SECURITY_STATE, 0, 0x4, 0), + CXL_CMD(SET_PASSPHRASE, 0x60, 0, 0), }; /* diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 25d1d8fa7d1e..725b08148524 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -274,6 +274,7 @@ enum cxl_opcode { CXL_MBOX_OP_SCAN_MEDIA = 0x4304, CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500, + CXL_MBOX_OP_SET_PASSPHRASE = 0x4501, CXL_MBOX_OP_MAX = 0x10000 }; @@ -380,6 +381,20 @@ struct cxl_mem_command { #define CXL_PMEM_SEC_STATE_USER_PLIMIT 0x10 #define CXL_PMEM_SEC_STATE_MASTER_PLIMIT 0x20 +/* set passphrase input payload */ +struct cxl_set_pass { + u8 type; + u8 reserved[31]; + /* CXL field using NVDIMM define, same length */ + u8 old_pass[NVDIMM_PASSPHRASE_LEN]; + u8 new_pass[NVDIMM_PASSPHRASE_LEN]; +} __packed; + +enum { + CXL_PMEM_SEC_PASS_MASTER = 0, + CXL_PMEM_SEC_PASS_USER, +}; + int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, size_t in_size, void *out, size_t out_size); int cxl_dev_state_identify(struct cxl_dev_state *cxlds); diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index 806173084216..5365646230c3 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -49,8 +49,30 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, return security_flags; } +static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, + const struct nvdimm_key_data *old_data, + const struct nvdimm_key_data *new_data, + enum nvdimm_passphrase_type ptype) +{ + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); + struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_set_pass set_pass; + int rc; + + set_pass.type = ptype == NVDIMM_MASTER ? + CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; + memcpy(set_pass.old_pass, old_data->data, NVDIMM_PASSPHRASE_LEN); + memcpy(set_pass.new_pass, new_data->data, NVDIMM_PASSPHRASE_LEN); + + rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_PASSPHRASE, + &set_pass, sizeof(set_pass), NULL, 0); + return rc; +} + static const struct nvdimm_security_ops __cxl_security_ops = { .get_flags = cxl_pmem_get_security_flags, + .change_key = cxl_pmem_security_change_key, }; const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index cdc6049683ce..9da047e9b038 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -42,6 +42,7 @@ ___C(SCAN_MEDIA, "Scan Media"), \ ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \ ___C(GET_SECURITY_STATE, "Get Security State"), \ + ___C(SET_PASSPHRASE, "Set Passphrase"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a -- cgit v1.2.3 From 53d2ce6e7869d369d8bbed3bd8a3d2c9758cf7c7 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:21:53 -0700 Subject: tools/testing/cxl: Add "Set Passphrase" opcode support Add support to emulate a CXL mem device supporting the "Set Passphrase" operation. The operation supports setting of either a user or a master passphrase. Reviewed-by: Jonathan Cameron Reviewed-by: Davidlohr Bueso Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983611314.2734609.12996309794483934484.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 88 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index d67fc04bf0cf..33ae7953f3f1 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -65,9 +65,16 @@ static struct { }, }; +#define PASS_TRY_LIMIT 3 + struct cxl_mockmem_data { void *lsa; u32 security_state; + u8 user_pass[NVDIMM_PASSPHRASE_LEN]; + u8 master_pass[NVDIMM_PASSPHRASE_LEN]; + int user_limit; + int master_limit; + }; static int mock_gsl(struct cxl_mbox_cmd *cmd) @@ -158,6 +165,84 @@ static int mock_get_security_state(struct cxl_dev_state *cxlds, return 0; } +static void master_plimit_check(struct cxl_mockmem_data *mdata) +{ + if (mdata->master_limit == PASS_TRY_LIMIT) + return; + mdata->master_limit++; + if (mdata->master_limit == PASS_TRY_LIMIT) + mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT; +} + +static void user_plimit_check(struct cxl_mockmem_data *mdata) +{ + if (mdata->user_limit == PASS_TRY_LIMIT) + return; + mdata->user_limit++; + if (mdata->user_limit == PASS_TRY_LIMIT) + mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; +} + +static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) +{ + struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); + struct cxl_set_pass *set_pass; + + if (cmd->size_in != sizeof(*set_pass)) + return -EINVAL; + + if (cmd->size_out != 0) + return -EINVAL; + + if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + set_pass = cmd->payload_in; + switch (set_pass->type) { + case CXL_PMEM_SEC_PASS_MASTER: + if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + /* + * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in + * the security disabled state when the user passphrase is not set. + */ + if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { + master_plimit_check(mdata); + cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; + return -ENXIO; + } + memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); + mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET; + return 0; + + case CXL_PMEM_SEC_PASS_USER: + if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { + user_plimit_check(mdata); + cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; + return -ENXIO; + } + memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); + mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET; + return 0; + + default: + cmd->return_code = CXL_MBOX_CMD_RC_INPUT; + } + return -EINVAL; +} + static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; @@ -256,6 +341,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_GET_SECURITY_STATE: rc = mock_get_security_state(cxlds, cmd); break; + case CXL_MBOX_OP_SET_PASSPHRASE: + rc = mock_set_passphrase(cxlds, cmd); + break; default: break; } -- cgit v1.2.3 From c4ef680d0b72815003a76074ca1cd872a2fecfc3 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:21:58 -0700 Subject: cxl/pmem: Add Disable Passphrase security command support Create callback function to support the nvdimm_security_ops ->disable() callback. Translate the operation to send "Disable Passphrase" security command for CXL memory device. The operation supports disabling a passphrase for the CXL persistent memory device. In the original implementation of nvdimm_security_ops, this operation only supports disabling of the user passphrase. This is due to the NFIT version of disable passphrase only supported disabling of user passphrase. The CXL spec allows disabling of the master passphrase as well which nvidmm_security_ops does not support yet. In this commit, the callback function will only support user passphrase. See CXL rev3.0 spec section 8.2.9.8.6.3 for reference. Reviewed-by: Davidlohr Bueso Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983611878.2734609.10602135274526390127.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 1 + drivers/cxl/cxlmem.h | 8 ++++++++ drivers/cxl/security.c | 18 ++++++++++++++++++ include/uapi/linux/cxl_mem.h | 1 + 4 files changed, 28 insertions(+) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 2fdafa697e6a..890db291c6bf 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -67,6 +67,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_SECURITY_STATE, 0, 0x4, 0), CXL_CMD(SET_PASSPHRASE, 0x60, 0, 0), + CXL_CMD(DISABLE_PASSPHRASE, 0x40, 0, 0), }; /* diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 725b08148524..9ad92f975b78 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -275,6 +275,7 @@ enum cxl_opcode { CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500, CXL_MBOX_OP_SET_PASSPHRASE = 0x4501, + CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502, CXL_MBOX_OP_MAX = 0x10000 }; @@ -390,6 +391,13 @@ struct cxl_set_pass { u8 new_pass[NVDIMM_PASSPHRASE_LEN]; } __packed; +/* disable passphrase input payload */ +struct cxl_disable_pass { + u8 type; + u8 reserved[31]; + u8 pass[NVDIMM_PASSPHRASE_LEN]; +} __packed; + enum { CXL_PMEM_SEC_PASS_MASTER = 0, CXL_PMEM_SEC_PASS_USER, diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index 5365646230c3..5a8e852ecadb 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -70,9 +70,27 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, return rc; } +static int cxl_pmem_security_disable(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data) +{ + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); + struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_disable_pass dis_pass; + int rc; + + dis_pass.type = CXL_PMEM_SEC_PASS_USER; + memcpy(dis_pass.pass, key_data->data, NVDIMM_PASSPHRASE_LEN); + + rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_DISABLE_PASSPHRASE, + &dis_pass, sizeof(dis_pass), NULL, 0); + return rc; +} + static const struct nvdimm_security_ops __cxl_security_ops = { .get_flags = cxl_pmem_get_security_flags, .change_key = cxl_pmem_security_change_key, + .disable = cxl_pmem_security_disable, }; const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 9da047e9b038..f6d383a80f22 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -43,6 +43,7 @@ ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \ ___C(GET_SECURITY_STATE, "Get Security State"), \ ___C(SET_PASSPHRASE, "Set Passphrase"), \ + ___C(DISABLE_PASSPHRASE, "Disable Passphrase"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a -- cgit v1.2.3 From abf0c8380a487d050da58e344635f4e96dd3c467 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:04 -0700 Subject: tools/testing/cxl: Add "Disable" security opcode support Add support to emulate a CXL mem device support the "Disable Passphrase" operation. The operation supports disabling of either a user or a master passphrase. The emulation will provide support for both user and master passphrase. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983612447.2734609.2767804273351656413.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 74 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 33ae7953f3f1..77774a951a81 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -243,6 +243,77 @@ static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd return -EINVAL; } +static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) +{ + struct cxl_mock_mem_pdata *mdata = dev_get_platdata(cxlds->dev); + struct cxl_disable_pass *dis_pass; + + if (cmd->size_in != sizeof(*dis_pass)) + return -EINVAL; + + if (cmd->size_out != 0) + return -EINVAL; + + if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + dis_pass = cmd->payload_in; + switch (dis_pass->type) { + case CXL_PMEM_SEC_PASS_MASTER: + if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) { + master_plimit_check(mdata); + cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; + return -ENXIO; + } + + mdata->master_limit = 0; + memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN); + mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET; + return 0; + + case CXL_PMEM_SEC_PASS_USER: + if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { + user_plimit_check(mdata); + cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; + return -ENXIO; + } + + mdata->user_limit = 0; + memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); + mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET | + CXL_PMEM_SEC_STATE_LOCKED); + return 0; + + default: + cmd->return_code = CXL_MBOX_CMD_RC_INPUT; + return -EINVAL; + } + + return 0; +} + static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; @@ -344,6 +415,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_SET_PASSPHRASE: rc = mock_set_passphrase(cxlds, cmd); break; + case CXL_MBOX_OP_DISABLE_PASSPHRASE: + rc = mock_disable_passphrase(cxlds, cmd); + break; default: break; } -- cgit v1.2.3 From a072f7b7972fd85bdefefa1d6febec483438d420 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:10 -0700 Subject: cxl/pmem: Add "Freeze Security State" security command support Create callback function to support the nvdimm_security_ops() ->freeze() callback. Translate the operation to send "Freeze Security State" security command for CXL memory device. See CXL rev3.0 spec section 8.2.9.8.6.5 for reference. Reviewed-by: Davidlohr Bueso Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983613019.2734609.10645754779802492122.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 1 + drivers/cxl/cxlmem.h | 1 + drivers/cxl/security.c | 10 ++++++++++ include/uapi/linux/cxl_mem.h | 1 + 4 files changed, 13 insertions(+) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 890db291c6bf..20bceb9e78bc 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -68,6 +68,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(GET_SECURITY_STATE, 0, 0x4, 0), CXL_CMD(SET_PASSPHRASE, 0x60, 0, 0), CXL_CMD(DISABLE_PASSPHRASE, 0x40, 0, 0), + CXL_CMD(FREEZE_SECURITY, 0, 0, 0), }; /* diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 9ad92f975b78..9007158969fe 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -276,6 +276,7 @@ enum cxl_opcode { CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500, CXL_MBOX_OP_SET_PASSPHRASE = 0x4501, CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502, + CXL_MBOX_OP_FREEZE_SECURITY = 0x4504, CXL_MBOX_OP_MAX = 0x10000 }; diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index 5a8e852ecadb..f323a1593cfc 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -87,10 +87,20 @@ static int cxl_pmem_security_disable(struct nvdimm *nvdimm, return rc; } +static int cxl_pmem_security_freeze(struct nvdimm *nvdimm) +{ + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); + struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + return cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_FREEZE_SECURITY, NULL, 0, NULL, 0); +} + static const struct nvdimm_security_ops __cxl_security_ops = { .get_flags = cxl_pmem_get_security_flags, .change_key = cxl_pmem_security_change_key, .disable = cxl_pmem_security_disable, + .freeze = cxl_pmem_security_freeze, }; const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index f6d383a80f22..7c0adcd68f4c 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -44,6 +44,7 @@ ___C(GET_SECURITY_STATE, "Get Security State"), \ ___C(SET_PASSPHRASE, "Set Passphrase"), \ ___C(DISABLE_PASSPHRASE, "Disable Passphrase"), \ + ___C(FREEZE_SECURITY, "Freeze Security"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a -- cgit v1.2.3 From 410926e9d79b4aba516a6677f88e0c35cbbd5b04 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:16 -0700 Subject: tools/testing/cxl: Add "Freeze Security State" security opcode support Add support to emulate a CXL mem device support the "Freeze Security State" operation. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983613604.2734609.1960672960407811362.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 77774a951a81..45c6e6d3cfbb 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -314,6 +314,23 @@ static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_ return 0; } +static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) +{ + struct cxl_mock_mem_pdata *mdata = dev_get_platdata(cxlds->dev); + + if (cmd->size_in != 0) + return -EINVAL; + + if (cmd->size_out != 0) + return -EINVAL; + + if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) + return 0; + + mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN; + return 0; +} + static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; @@ -418,6 +435,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_DISABLE_PASSPHRASE: rc = mock_disable_passphrase(cxlds, cmd); break; + case CXL_MBOX_OP_FREEZE_SECURITY: + rc = mock_freeze_security(cxlds, cmd); + break; default: break; } -- cgit v1.2.3 From 2bb692f7a6cd0a7b2c29d8d5029c4469c4ec02dd Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:21 -0700 Subject: cxl/pmem: Add "Unlock" security command support Create callback function to support the nvdimm_security_ops() ->unlock() callback. Translate the operation to send "Unlock" security command for CXL mem device. When the mem device is unlocked, cpu_cache_invalidate_memregion() is called in order to invalidate all CPU caches before attempting to access the mem device. See CXL rev3.0 spec section 8.2.9.8.6.4 for reference. Reviewed-by: Davidlohr Bueso Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983614167.2734609.15124543712487741176.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 1 + drivers/cxl/cxlmem.h | 1 + drivers/cxl/security.c | 27 +++++++++++++++++++++++++++ include/uapi/linux/cxl_mem.h | 1 + 4 files changed, 30 insertions(+) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 20bceb9e78bc..4f84d3962fb1 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -69,6 +69,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(SET_PASSPHRASE, 0x60, 0, 0), CXL_CMD(DISABLE_PASSPHRASE, 0x40, 0, 0), CXL_CMD(FREEZE_SECURITY, 0, 0, 0), + CXL_CMD(UNLOCK, 0x20, 0, 0), }; /* diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 9007158969fe..4e6897e8eb7d 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -276,6 +276,7 @@ enum cxl_opcode { CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500, CXL_MBOX_OP_SET_PASSPHRASE = 0x4501, CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502, + CXL_MBOX_OP_UNLOCK = 0x4503, CXL_MBOX_OP_FREEZE_SECURITY = 0x4504, CXL_MBOX_OP_MAX = 0x10000 }; diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index f323a1593cfc..32b9e279e74b 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "cxlmem.h" #include "cxl.h" @@ -96,11 +97,37 @@ static int cxl_pmem_security_freeze(struct nvdimm *nvdimm) return cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_FREEZE_SECURITY, NULL, 0, NULL, 0); } +static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data) +{ + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); + struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + u8 pass[NVDIMM_PASSPHRASE_LEN]; + int rc; + + if (!cpu_cache_has_invalidate_memregion()) + return -EINVAL; + + memcpy(pass, key_data->data, NVDIMM_PASSPHRASE_LEN); + rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_UNLOCK, + pass, NVDIMM_PASSPHRASE_LEN, NULL, 0); + if (rc < 0) + return rc; + + /* DIMM unlocked, invalidate all CPU caches before we read it */ + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); + return 0; +} + static const struct nvdimm_security_ops __cxl_security_ops = { .get_flags = cxl_pmem_get_security_flags, .change_key = cxl_pmem_security_change_key, .disable = cxl_pmem_security_disable, .freeze = cxl_pmem_security_freeze, + .unlock = cxl_pmem_security_unlock, }; const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; + +MODULE_IMPORT_NS(DEVMEM); diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 7c0adcd68f4c..95dca8d4584f 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -45,6 +45,7 @@ ___C(SET_PASSPHRASE, "Set Passphrase"), \ ___C(DISABLE_PASSPHRASE, "Disable Passphrase"), \ ___C(FREEZE_SECURITY, "Freeze Security"), \ + ___C(UNLOCK, "Unlock"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a -- cgit v1.2.3 From 8e80b18664934621c742f17cdac3c4fe00728612 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:27 -0700 Subject: tools/testing/cxl: Add "Unlock" security opcode support Add support to emulate a CXL mem device support the "Unlock" operation. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983614730.2734609.2280484207184754073.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 45 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 45c6e6d3cfbb..ddd4a17e5564 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -331,6 +331,48 @@ static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd return 0; } +static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) +{ + struct cxl_mock_mem_pdata *mdata = dev_get_platdata(cxlds->dev); + + if (cmd->size_in != NVDIMM_PASSPHRASE_LEN) + return -EINVAL; + + if (cmd->size_out != 0) + return -EINVAL; + + if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { + if (++mdata->user_limit == PASS_TRY_LIMIT) + mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; + cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; + return -ENXIO; + } + + mdata->user_limit = 0; + mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; + return 0; +} + static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; @@ -438,6 +480,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_FREEZE_SECURITY: rc = mock_freeze_security(cxlds, cmd); break; + case CXL_MBOX_OP_UNLOCK: + rc = mock_unlock_security(cxlds, cmd); + break; default: break; } -- cgit v1.2.3 From 3b502e886d01c2f96b2774176be4c7bceef2516b Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:32 -0700 Subject: cxl/pmem: Add "Passphrase Secure Erase" security command support Create callback function to support the nvdimm_security_ops() ->erase() callback. Translate the operation to send "Passphrase Secure Erase" security command for CXL memory device. When the mem device is secure erased, cpu_cache_invalidate_memregion() is called in order to invalidate all CPU caches before attempting to access the mem device again. See CXL 3.0 spec section 8.2.9.8.6.6 for reference. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983615293.2734609.10358657600295932156.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 1 + drivers/cxl/cxlmem.h | 8 ++++++++ drivers/cxl/security.c | 29 +++++++++++++++++++++++++++++ include/uapi/linux/cxl_mem.h | 1 + 4 files changed, 39 insertions(+) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 4f84d3962fb1..8747db329087 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -70,6 +70,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(DISABLE_PASSPHRASE, 0x40, 0, 0), CXL_CMD(FREEZE_SECURITY, 0, 0, 0), CXL_CMD(UNLOCK, 0x20, 0, 0), + CXL_CMD(PASSPHRASE_SECURE_ERASE, 0x40, 0, 0), }; /* diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 4e6897e8eb7d..75baeb0bbe57 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -278,6 +278,7 @@ enum cxl_opcode { CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502, CXL_MBOX_OP_UNLOCK = 0x4503, CXL_MBOX_OP_FREEZE_SECURITY = 0x4504, + CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE = 0x4505, CXL_MBOX_OP_MAX = 0x10000 }; @@ -400,6 +401,13 @@ struct cxl_disable_pass { u8 pass[NVDIMM_PASSPHRASE_LEN]; } __packed; +/* passphrase secure erase payload */ +struct cxl_pass_erase { + u8 type; + u8 reserved[31]; + u8 pass[NVDIMM_PASSPHRASE_LEN]; +} __packed; + enum { CXL_PMEM_SEC_PASS_MASTER = 0, CXL_PMEM_SEC_PASS_USER, diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index 32b9e279e74b..4a8132559a96 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -120,12 +120,41 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, return 0; } +static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key, + enum nvdimm_passphrase_type ptype) +{ + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); + struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_pass_erase erase; + int rc; + + if (!cpu_cache_has_invalidate_memregion()) + return -EINVAL; + + erase.type = ptype == NVDIMM_MASTER ? + CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; + memcpy(erase.pass, key->data, NVDIMM_PASSPHRASE_LEN); + /* Flush all cache before we erase mem device */ + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); + rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE, + &erase, sizeof(erase), NULL, 0); + if (rc < 0) + return rc; + + /* mem device erased, invalidate all CPU caches before data is read */ + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); + return 0; +} + static const struct nvdimm_security_ops __cxl_security_ops = { .get_flags = cxl_pmem_get_security_flags, .change_key = cxl_pmem_security_change_key, .disable = cxl_pmem_security_disable, .freeze = cxl_pmem_security_freeze, .unlock = cxl_pmem_security_unlock, + .erase = cxl_pmem_security_passphrase_erase, }; const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 95dca8d4584f..82bdad4ce5de 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -46,6 +46,7 @@ ___C(DISABLE_PASSPHRASE, "Disable Passphrase"), \ ___C(FREEZE_SECURITY, "Freeze Security"), \ ___C(UNLOCK, "Unlock"), \ + ___C(PASSPHRASE_SECURE_ERASE, "Passphrase Secure Erase"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a -- cgit v1.2.3 From 9f01733387460ee373eb8da4292062ffc5fa340b Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:38 -0700 Subject: tools/testing/cxl: Add "passphrase secure erase" opcode support Add support to emulate a CXL mem device support the "passphrase secure erase" operation. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983615879.2734609.5177049043677443736.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 102 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index ddd4a17e5564..1008ee2e1e31 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -373,6 +373,105 @@ static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd return 0; } +static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *cmd) +{ + struct cxl_mock_mem_pdata *mdata = dev_get_platdata(cxlds->dev); + struct cxl_pass_erase *erase; + + if (cmd->size_in != sizeof(*erase)) + return -EINVAL; + + if (cmd->size_out != 0) + return -EINVAL; + + erase = cmd->payload_in; + if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT && + erase->type == CXL_PMEM_SEC_PASS_USER) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT && + erase->type == CXL_PMEM_SEC_PASS_MASTER) { + cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; + return -ENXIO; + } + + switch (erase->type) { + case CXL_PMEM_SEC_PASS_MASTER: + /* + * The spec does not clearly define the behavior of the scenario + * where a master passphrase is passed in while the master + * passphrase is not set and user passphrase is not set. The + * code will take the assumption that it will behave the same + * as a CXL secure erase command without passphrase (0x4401). + */ + if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) { + if (memcmp(mdata->master_pass, erase->pass, + NVDIMM_PASSPHRASE_LEN)) { + master_plimit_check(mdata); + cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; + return -ENXIO; + } + mdata->master_limit = 0; + mdata->user_limit = 0; + mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; + memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); + mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; + } else { + /* + * CXL rev3 8.2.9.8.6.3 Disable Passphrase + * When master passphrase is disabled, the device shall + * return Invalid Input for the Passphrase Secure Erase + * command with master passphrase. + */ + return -EINVAL; + } + /* Scramble encryption keys so that data is effectively erased */ + break; + case CXL_PMEM_SEC_PASS_USER: + /* + * The spec does not clearly define the behavior of the scenario + * where a user passphrase is passed in while the user + * passphrase is not set. The code will take the assumption that + * it will behave the same as a CXL secure erase command without + * passphrase (0x4401). + */ + if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { + if (memcmp(mdata->user_pass, erase->pass, + NVDIMM_PASSPHRASE_LEN)) { + user_plimit_check(mdata); + cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; + return -ENXIO; + } + mdata->user_limit = 0; + mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; + memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); + } + + /* + * CXL rev3 Table 8-118 + * If user passphrase is not set or supported by device, current + * passphrase value is ignored. Will make the assumption that + * the operation will proceed as secure erase w/o passphrase + * since spec is not explicit. + */ + + /* Scramble encryption keys so that data is effectively erased */ + break; + default: + return -EINVAL; + } + + return 0; +} + static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; @@ -483,6 +582,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_UNLOCK: rc = mock_unlock_security(cxlds, cmd); break; + case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: + rc = mock_passphrase_secure_erase(cxlds, cmd); + break; default: break; } -- cgit v1.2.3 From dcedadfae28562ad04bc351cabfbc0c65b810847 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:44 -0700 Subject: nvdimm/cxl/pmem: Add support for master passphrase disable security command The original nvdimm_security_ops ->disable() only supports user passphrase for security disable. The CXL spec introduced the disabling of master passphrase. Add a ->disable_master() callback to support this new operation and leaving the old ->disable() mechanism alone. A "disable_master" command is added for the sysfs attribute in order to allow command to be issued from userspace. ndctl will need enabling in order to utilize this new operation. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983616454.2734609.14204031148234398086.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/security.c | 21 ++++++++++++++++++--- drivers/nvdimm/security.c | 33 ++++++++++++++++++++++++++------- include/linux/libnvdimm.h | 2 ++ 3 files changed, 46 insertions(+), 10 deletions(-) diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index 4a8132559a96..cbd005ceb091 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -71,8 +71,9 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, return rc; } -static int cxl_pmem_security_disable(struct nvdimm *nvdimm, - const struct nvdimm_key_data *key_data) +static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data, + enum nvdimm_passphrase_type ptype) { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; @@ -80,7 +81,8 @@ static int cxl_pmem_security_disable(struct nvdimm *nvdimm, struct cxl_disable_pass dis_pass; int rc; - dis_pass.type = CXL_PMEM_SEC_PASS_USER; + dis_pass.type = ptype == NVDIMM_MASTER ? + CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; memcpy(dis_pass.pass, key_data->data, NVDIMM_PASSPHRASE_LEN); rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_DISABLE_PASSPHRASE, @@ -88,6 +90,18 @@ static int cxl_pmem_security_disable(struct nvdimm *nvdimm, return rc; } +static int cxl_pmem_security_disable(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data) +{ + return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_USER); +} + +static int cxl_pmem_security_disable_master(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data) +{ + return __cxl_pmem_security_disable(nvdimm, key_data, NVDIMM_MASTER); +} + static int cxl_pmem_security_freeze(struct nvdimm *nvdimm) { struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); @@ -155,6 +169,7 @@ static const struct nvdimm_security_ops __cxl_security_ops = { .freeze = cxl_pmem_security_freeze, .unlock = cxl_pmem_security_unlock, .erase = cxl_pmem_security_passphrase_erase, + .disable_master = cxl_pmem_security_disable_master, }; const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 8aefb60c42ff..92af4c3ca0d3 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -239,7 +239,8 @@ static int check_security_state(struct nvdimm *nvdimm) return 0; } -static int security_disable(struct nvdimm *nvdimm, unsigned int keyid) +static int security_disable(struct nvdimm *nvdimm, unsigned int keyid, + enum nvdimm_passphrase_type pass_type) { struct device *dev = &nvdimm->dev; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); @@ -250,8 +251,13 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid) /* The bus lock should be held at the top level of the call stack */ lockdep_assert_held(&nvdimm_bus->reconfig_mutex); - if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable - || !nvdimm->sec.flags) + if (!nvdimm->sec.ops || !nvdimm->sec.flags) + return -EOPNOTSUPP; + + if (pass_type == NVDIMM_USER && !nvdimm->sec.ops->disable) + return -EOPNOTSUPP; + + if (pass_type == NVDIMM_MASTER && !nvdimm->sec.ops->disable_master) return -EOPNOTSUPP; rc = check_security_state(nvdimm); @@ -263,12 +269,21 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid) if (!data) return -ENOKEY; - rc = nvdimm->sec.ops->disable(nvdimm, data); - dev_dbg(dev, "key: %d disable: %s\n", key_serial(key), + if (pass_type == NVDIMM_MASTER) { + rc = nvdimm->sec.ops->disable_master(nvdimm, data); + dev_dbg(dev, "key: %d disable_master: %s\n", key_serial(key), rc == 0 ? "success" : "fail"); + } else { + rc = nvdimm->sec.ops->disable(nvdimm, data); + dev_dbg(dev, "key: %d disable: %s\n", key_serial(key), + rc == 0 ? "success" : "fail"); + } nvdimm_put_key(key); - nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); + if (pass_type == NVDIMM_MASTER) + nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); + else + nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); return rc; } @@ -473,6 +488,7 @@ void nvdimm_security_overwrite_query(struct work_struct *work) #define OPS \ C( OP_FREEZE, "freeze", 1), \ C( OP_DISABLE, "disable", 2), \ + C( OP_DISABLE_MASTER, "disable_master", 2), \ C( OP_UPDATE, "update", 3), \ C( OP_ERASE, "erase", 2), \ C( OP_OVERWRITE, "overwrite", 2), \ @@ -524,7 +540,10 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len) rc = nvdimm_security_freeze(nvdimm); } else if (i == OP_DISABLE) { dev_dbg(dev, "disable %u\n", key); - rc = security_disable(nvdimm, key); + rc = security_disable(nvdimm, key, NVDIMM_USER); + } else if (i == OP_DISABLE_MASTER) { + dev_dbg(dev, "disable_master %u\n", key); + rc = security_disable(nvdimm, key, NVDIMM_MASTER); } else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) { dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey); rc = security_update(nvdimm, key, newkey, i == OP_UPDATE diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index c74acfa1a3fe..3bf658a74ccb 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -183,6 +183,8 @@ struct nvdimm_security_ops { int (*overwrite)(struct nvdimm *nvdimm, const struct nvdimm_key_data *key_data); int (*query_overwrite)(struct nvdimm *nvdimm); + int (*disable_master)(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data); }; enum nvdimm_fwa_state { -- cgit v1.2.3 From bd429e5355cd58aeb7e38b905fbecee357a6379b Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:50 -0700 Subject: cxl/pmem: add id attribute to CXL based nvdimm Add an id group attribute for CXL based nvdimm object. The addition allows ndctl to display the "unique id" for the nvdimm. The serial number for the CXL memory device will be used for this id. [ { "dev":"nmem10", "id":"0x4", "security":"disabled" }, ] The id attribute is needed by the ndctl security key management to setup a keyblob with a unique file name tied to the mem device. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983617029.2734609.8251308562882142281.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- Documentation/ABI/testing/sysfs-bus-nvdimm | 6 ++++++ drivers/cxl/pmem.c | 28 +++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-bus-nvdimm b/Documentation/ABI/testing/sysfs-bus-nvdimm index 1c1f5acbf53d..178ce207413d 100644 --- a/Documentation/ABI/testing/sysfs-bus-nvdimm +++ b/Documentation/ABI/testing/sysfs-bus-nvdimm @@ -41,3 +41,9 @@ KernelVersion: 5.18 Contact: Kajol Jain Description: (RO) This sysfs file exposes the cpumask which is designated to to retrieve nvdimm pmu event counter data. + +What: /sys/bus/nd/devices/nmemX/cxl/id +Date: November 2022 +KernelVersion: 6.2 +Contact: Dave Jiang +Description: (RO) Show the id (serial) of the device. This is CXL specific. diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index efffc731c2ec..0493ddcfe32c 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -51,6 +51,31 @@ static void unregister_nvdimm(void *nvdimm) cxl_nvd->bridge = NULL; } +static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); + struct cxl_dev_state *cxlds = cxl_nvd->cxlmd->cxlds; + + return sysfs_emit(buf, "%lld\n", cxlds->serial); +} +static DEVICE_ATTR_RO(id); + +static struct attribute *cxl_dimm_attributes[] = { + &dev_attr_id.attr, + NULL +}; + +static const struct attribute_group cxl_dimm_attribute_group = { + .name = "cxl", + .attrs = cxl_dimm_attributes, +}; + +static const struct attribute_group *cxl_dimm_attribute_groups[] = { + &cxl_dimm_attribute_group, + NULL +}; + static int cxl_nvdimm_probe(struct device *dev) { struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); @@ -80,7 +105,8 @@ static int cxl_nvdimm_probe(struct device *dev) set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); - nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, + nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, + cxl_dimm_attribute_groups, flags, cmd_mask, 0, NULL, NULL, cxl_security_ops, NULL); if (!nvdimm) { rc = -ENOMEM; -- cgit v1.2.3 From 18fa556375c0e28f067f4a52e9f11d4fd2cf6842 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:22:56 -0700 Subject: tools/testing/cxl: add mechanism to lock mem device for testing The mock cxl mem devs needs a way to go into "locked" status to simulate when the platform is rebooted. Add a sysfs mechanism so the device security state is set to "locked" and the frozen state bits are cleared. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983617602.2734609.7042497620931694717.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 48 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 1008ee2e1e31..35d9ad04e0d6 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -245,7 +245,7 @@ static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { - struct cxl_mock_mem_pdata *mdata = dev_get_platdata(cxlds->dev); + struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); struct cxl_disable_pass *dis_pass; if (cmd->size_in != sizeof(*dis_pass)) @@ -316,7 +316,7 @@ static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_ static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { - struct cxl_mock_mem_pdata *mdata = dev_get_platdata(cxlds->dev); + struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); if (cmd->size_in != 0) return -EINVAL; @@ -333,7 +333,7 @@ static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { - struct cxl_mock_mem_pdata *mdata = dev_get_platdata(cxlds->dev); + struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); if (cmd->size_in != NVDIMM_PASSPHRASE_LEN) return -EINVAL; @@ -376,7 +376,7 @@ static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { - struct cxl_mock_mem_pdata *mdata = dev_get_platdata(cxlds->dev); + struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); struct cxl_pass_erase *erase; if (cmd->size_in != sizeof(*erase)) @@ -650,6 +650,45 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) return 0; } +static ssize_t security_lock_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%u\n", + !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)); +} + +static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); + u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT | + CXL_PMEM_SEC_STATE_MASTER_PLIMIT; + int val; + + if (kstrtoint(buf, 0, &val) < 0) + return -EINVAL; + + if (val == 1) { + if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) + return -ENXIO; + mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED; + mdata->security_state &= ~mask; + } else { + return -EINVAL; + } + return count; +} + +static DEVICE_ATTR_RW(security_lock); + +static struct attribute *cxl_mock_mem_attrs[] = { + &dev_attr_security_lock.attr, + NULL +}; +ATTRIBUTE_GROUPS(cxl_mock_mem); + static const struct platform_device_id cxl_mock_mem_ids[] = { { .name = "cxl_mem", }, { }, @@ -661,6 +700,7 @@ static struct platform_driver cxl_mock_mem_driver = { .id_table = cxl_mock_mem_ids, .driver = { .name = KBUILD_MODNAME, + .dev_groups = cxl_mock_mem_groups, }, }; -- cgit v1.2.3 From 452996fa079b1a8751a02ac04c54b1d69f512249 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:23:01 -0700 Subject: cxl/pmem: add provider name to cxl pmem dimm attribute group Add provider name in order to associate cxl test dimm from cxl_test to the cxl pmem device when going through sysfs for security testing. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983618174.2734609.15600031015423828810.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- Documentation/ABI/testing/sysfs-bus-nvdimm | 8 ++++++++ drivers/cxl/pmem.c | 10 ++++++++++ 2 files changed, 18 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-bus-nvdimm b/Documentation/ABI/testing/sysfs-bus-nvdimm index 178ce207413d..de8c5a59c77f 100644 --- a/Documentation/ABI/testing/sysfs-bus-nvdimm +++ b/Documentation/ABI/testing/sysfs-bus-nvdimm @@ -47,3 +47,11 @@ Date: November 2022 KernelVersion: 6.2 Contact: Dave Jiang Description: (RO) Show the id (serial) of the device. This is CXL specific. + +What: /sys/bus/nd/devices/nmemX/cxl/provider +Date: November 2022 +KernelVersion: 6.2 +Contact: Dave Jiang +Description: (RO) Shows the CXL bridge device that ties to a CXL memory device + to this NVDIMM device. I.e. the parent of the device returned is + a /sys/bus/cxl/devices/memX instance. diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 0493ddcfe32c..403e41bcbf2b 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -51,6 +51,15 @@ static void unregister_nvdimm(void *nvdimm) cxl_nvd->bridge = NULL; } +static ssize_t provider_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); + + return sysfs_emit(buf, "%s\n", dev_name(&cxl_nvd->dev)); +} +static DEVICE_ATTR_RO(provider); + static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); @@ -63,6 +72,7 @@ static DEVICE_ATTR_RO(id); static struct attribute *cxl_dimm_attributes[] = { &dev_attr_id.attr, + &dev_attr_provider.attr, NULL }; -- cgit v1.2.3 From 15a8348707ffd2a37516db9bede88cc0bb467e0b Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 12:23:07 -0700 Subject: libnvdimm: Introduce CONFIG_NVDIMM_SECURITY_TEST flag nfit_test overrode the security_show() sysfs attribute function in nvdimm dimm_devs in order to allow testing of security unlock. With the introduction of CXL security commands, the trick to override security_show() becomes significantly more complicated. By introdcing a security flag CONFIG_NVDIMM_SECURITY_TEST, libnvdimm can just toggle the check via a compile option. In addition the original override can can be removed from tools/testing/nvdimm/. The flag will also be used to bypass cpu_cache_invalidate_memregion() when set in a different commit. This allows testing on QEMU with nfit_test or cxl_test since cpu_cache_has_invalidate_memregion() checks whether X86_FEATURE_HYPERVISOR cpu feature flag is set on x86. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166983618758.2734609.18031639517065867138.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/Kconfig | 12 ++++++++++++ drivers/nvdimm/dimm_devs.c | 9 ++++++++- drivers/nvdimm/security.c | 4 ++++ tools/testing/nvdimm/Kbuild | 1 - tools/testing/nvdimm/dimm_devs.c | 30 ------------------------------ 5 files changed, 24 insertions(+), 32 deletions(-) delete mode 100644 tools/testing/nvdimm/dimm_devs.c diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 5a29046e3319..79d93126453d 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig @@ -114,4 +114,16 @@ config NVDIMM_TEST_BUILD core devm_memremap_pages() implementation and other infrastructure. +config NVDIMM_SECURITY_TEST + bool "Enable NVDIMM security unit tests" + depends on NVDIMM_KEYS + help + The NVDIMM and CXL subsystems support unit testing of their device + security state machines. The NVDIMM_SECURITY_TEST option disables CPU + cache maintenance operations around events like secure erase and + overwrite. Also, when enabled, the NVDIMM subsystem core helps the unit + test implement a mock state machine. + + Select N if unsure. + endif diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index c7c980577491..1fc081dcf631 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -349,11 +349,18 @@ static ssize_t available_slots_show(struct device *dev, } static DEVICE_ATTR_RO(available_slots); -__weak ssize_t security_show(struct device *dev, +ssize_t security_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); + /* + * For the test version we need to poll the "hardware" in order + * to get the updated status for unlock testing. + */ + if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) + nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); + if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags)) return sprintf(buf, "overwrite\n"); if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 92af4c3ca0d3..6814339b3dab 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm) || !nvdimm->sec.flags) return -EIO; + /* cxl_test needs this to pre-populate the security state */ + if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) + nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); + /* No need to go further if security is disabled */ if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) return 0; diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild index 5eb5c23b062f..8153251ea389 100644 --- a/tools/testing/nvdimm/Kbuild +++ b/tools/testing/nvdimm/Kbuild @@ -79,7 +79,6 @@ libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o libnvdimm-$(CONFIG_NVDIMM_DAX) += $(NVDIMM_SRC)/dax_devs.o libnvdimm-$(CONFIG_NVDIMM_KEYS) += $(NVDIMM_SRC)/security.o -libnvdimm-y += dimm_devs.o libnvdimm-y += libnvdimm_test.o libnvdimm-y += config_check.o diff --git a/tools/testing/nvdimm/dimm_devs.c b/tools/testing/nvdimm/dimm_devs.c deleted file mode 100644 index 57bd27dedf1f..000000000000 --- a/tools/testing/nvdimm/dimm_devs.c +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright Intel Corp. 2018 */ -#include -#include -#include -#include -#include "pmem.h" -#include "pfn.h" -#include "nd.h" -#include "nd-core.h" - -ssize_t security_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvdimm *nvdimm = to_nvdimm(dev); - - /* - * For the test version we need to poll the "hardware" in order - * to get the updated status for unlock testing. - */ - nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); - - if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) - return sprintf(buf, "disabled\n"); - if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) - return sprintf(buf, "unlocked\n"); - if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags)) - return sprintf(buf, "locked\n"); - return -ENOTTY; -} -- cgit v1.2.3 From cb9cfff82f6a862c1f54b8b01d9d9a979bb8ae78 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 13:33:26 -0800 Subject: cxl/acpi: Simplify cxl_nvdimm_bridge probing The 'struct cxl_nvdimm_bridge' object advertises platform CXL PMEM resources. It coordinates with libnvdimm to attach nvdimm devices and regions for each corresponding CXL object. That coordination is complicated, i.e. difficult to reason about, and it turns out redundant. It is already the case that the CXL core knows how to tear down a cxl_region when a cxl_memdev goes through ->remove(), so that pathway can be extended to directly cleanup cxl_nvdimm and cxl_pmem_region objects. Towards the goal of ripping out the cxl_nvdimm_bridge state machine, arrange for cxl_acpi to optionally pre-load the cxl_pmem driver so that the nvdimm bridge is active synchronously with devm_cxl_add_nvdimm_bridge(), and remove all the bind attributes for the cxl_nvdimm* objects since the cxl root device and cxl_memdev bind attributes are sufficient. Tested-by: Robert Richter Link: https://lore.kernel.org/r/166993040668.1882361.7450361097265836752.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 1 + drivers/cxl/pmem.c | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index fb9f72813067..c540da0cbf1e 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -539,3 +539,4 @@ module_platform_driver(cxl_acpi_driver); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(CXL); MODULE_IMPORT_NS(ACPI); +MODULE_SOFTDEP("pre: cxl_pmem"); diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 4c627d67281a..946e171e7d4a 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -99,6 +99,9 @@ static struct cxl_driver cxl_nvdimm_driver = { .name = "cxl_nvdimm", .probe = cxl_nvdimm_probe, .id = CXL_DEVICE_NVDIMM, + .drv = { + .suppress_bind_attrs = true, + }, }; static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds, @@ -360,6 +363,9 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = { .probe = cxl_nvdimm_bridge_probe, .remove = cxl_nvdimm_bridge_remove, .id = CXL_DEVICE_NVDIMM_BRIDGE, + .drv = { + .suppress_bind_attrs = true, + }, }; static int match_cxl_nvdimm(struct device *dev, void *data) @@ -583,6 +589,9 @@ static struct cxl_driver cxl_pmem_region_driver = { .name = "cxl_pmem_region", .probe = cxl_pmem_region_probe, .id = CXL_DEVICE_PMEM_REGION, + .drv = { + .suppress_bind_attrs = true, + }, }; /* -- cgit v1.2.3 From 16d53cb0d6900ba7c5920397480016d3ee844610 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 13:33:32 -0800 Subject: cxl/region: Drop redundant pmem region release handling Now that a cxl_nvdimm object can only experience ->remove() via an unregistration event (because the cxl_nvdimm bind attributes are suppressed), additional cleanups are possible. It is already the case that the removal of a cxl_memdev object triggers ->remove() on any associated region. With that mechanism in place there is no need for the cxl_nvdimm removal to trigger the same. Just rely on cxl_region_detach() to tear down the whole cxl_pmem_region. Tested-by: Robert Richter Link: https://lore.kernel.org/r/166993041215.1882361.6321535567798911286.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/pmem.c | 2 -- drivers/cxl/cxl.h | 1 - drivers/cxl/pmem.c | 90 ------------------------------------------------- 3 files changed, 93 deletions(-) diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index 36aa5070d902..1d12a8206444 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -188,7 +188,6 @@ static void cxl_nvdimm_release(struct device *dev) { struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); - xa_destroy(&cxl_nvd->pmem_regions); kfree(cxl_nvd); } @@ -231,7 +230,6 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) dev = &cxl_nvd->dev; cxl_nvd->cxlmd = cxlmd; - xa_init(&cxl_nvd->pmem_regions); device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_nvdimm_key); device_set_pm_not_required(dev); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 7d07127eade3..4ac7938eaf6c 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -424,7 +424,6 @@ struct cxl_nvdimm { struct device dev; struct cxl_memdev *cxlmd; struct cxl_nvdimm_bridge *bridge; - struct xarray pmem_regions; }; struct cxl_pmem_region_mapping { diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 946e171e7d4a..652f00fc68ca 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -27,26 +27,7 @@ static void clear_exclusive(void *cxlds) static void unregister_nvdimm(void *nvdimm) { - struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); - struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge; - struct cxl_pmem_region *cxlr_pmem; - unsigned long index; - - device_lock(&cxl_nvb->dev); - dev_set_drvdata(&cxl_nvd->dev, NULL); - xa_for_each(&cxl_nvd->pmem_regions, index, cxlr_pmem) { - get_device(&cxlr_pmem->dev); - device_unlock(&cxl_nvb->dev); - - device_release_driver(&cxlr_pmem->dev); - put_device(&cxlr_pmem->dev); - - device_lock(&cxl_nvb->dev); - } - device_unlock(&cxl_nvb->dev); - nvdimm_delete(nvdimm); - cxl_nvd->bridge = NULL; } static int cxl_nvdimm_probe(struct device *dev) @@ -243,21 +224,6 @@ static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb) return 0; } -static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb) -{ - struct cxl_pmem_region *cxlr_pmem; - - if (!is_cxl_pmem_region(dev)) - return 0; - - cxlr_pmem = to_cxl_pmem_region(dev); - if (cxlr_pmem->bridge != cxl_nvb) - return 0; - - device_release_driver(dev); - return 0; -} - static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb, struct nvdimm_bus *nvdimm_bus) { @@ -269,8 +235,6 @@ static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb, * nvdimm_bus_unregister() rips the nvdimm objects out from * underneath them. */ - bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb, - cxl_pmem_region_release_driver); bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb, cxl_nvdimm_release_driver); nvdimm_bus_unregister(nvdimm_bus); @@ -378,48 +342,6 @@ static void unregister_nvdimm_region(void *nd_region) nvdimm_region_delete(nd_region); } -static int cxl_nvdimm_add_region(struct cxl_nvdimm *cxl_nvd, - struct cxl_pmem_region *cxlr_pmem) -{ - int rc; - - rc = xa_insert(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem, - cxlr_pmem, GFP_KERNEL); - if (rc) - return rc; - - get_device(&cxlr_pmem->dev); - return 0; -} - -static void cxl_nvdimm_del_region(struct cxl_nvdimm *cxl_nvd, - struct cxl_pmem_region *cxlr_pmem) -{ - /* - * It is possible this is called without a corresponding - * cxl_nvdimm_add_region for @cxlr_pmem - */ - cxlr_pmem = xa_erase(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem); - if (cxlr_pmem) - put_device(&cxlr_pmem->dev); -} - -static void release_mappings(void *data) -{ - int i; - struct cxl_pmem_region *cxlr_pmem = data; - struct cxl_nvdimm_bridge *cxl_nvb = cxlr_pmem->bridge; - - device_lock(&cxl_nvb->dev); - for (i = 0; i < cxlr_pmem->nr_mappings; i++) { - struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; - struct cxl_nvdimm *cxl_nvd = m->cxl_nvd; - - cxl_nvdimm_del_region(cxl_nvd, cxlr_pmem); - } - device_unlock(&cxl_nvb->dev); -} - static void cxlr_pmem_remove_resource(void *res) { remove_resource(res); @@ -508,10 +430,6 @@ static int cxl_pmem_region_probe(struct device *dev) goto out_nvb; } - rc = devm_add_action_or_reset(dev, release_mappings, cxlr_pmem); - if (rc) - goto out_nvd; - for (i = 0; i < cxlr_pmem->nr_mappings; i++) { struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; struct cxl_memdev *cxlmd = m->cxlmd; @@ -538,14 +456,6 @@ static int cxl_pmem_region_probe(struct device *dev) goto out_nvd; } - /* - * Pin the region per nvdimm device as those may be released - * out-of-order with respect to the region, and a single nvdimm - * maybe associated with multiple regions - */ - rc = cxl_nvdimm_add_region(cxl_nvd, cxlr_pmem); - if (rc) - goto out_nvd; m->cxl_nvd = cxl_nvd; mappings[i] = (struct nd_mapping_desc) { .nvdimm = nvdimm, -- cgit v1.2.3 From f17b558d6663101f876a1d9cbbad3de0c8f4ce4d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 13:33:37 -0800 Subject: cxl/pmem: Refactor nvdimm device registration, delete the workqueue The three objects 'struct cxl_nvdimm_bridge', 'struct cxl_nvdimm', and 'struct cxl_pmem_region' manage CXL persistent memory resources. The bridge represents base platform resources, the nvdimm represents one or more endpoints, and the region is a collection of nvdimms that contribute to an assembled address range. Their relationship is such that a region is torn down if any component endpoints are removed. All regions and endpoints are torn down if the foundational bridge device goes down. A workqueue was deployed to manage these interdependencies, but it is difficult to reason about, and fragile. A recent attempt to take the CXL root device lock in the cxl_mem driver was reported by lockdep as colliding with the flush_work() in the cxl_pmem flows. Instead of the workqueue, arrange for all pmem/nvdimm devices to be torn down immediately and hierarchically. A similar change is made to both the 'cxl_nvdimm' and 'cxl_pmem_region' objects. For bisect-ability both changes are made in the same patch which unfortunately makes the patch bigger than desired. Arrange for cxl_memdev and cxl_region to register a cxl_nvdimm and cxl_pmem_region as a devres release action of the bridge device. Additionally, include a devres release action of the cxl_memdev or cxl_region device that triggers the bridge's release action if an endpoint exits before the bridge. I.e. this allows either unplugging the bridge, or unplugging and endpoint to result in the same cleanup actions. To keep the patch smaller the cleanup of the now defunct workqueue infrastructure is saved for a follow-on patch. Tested-by: Robert Richter Link: https://lore.kernel.org/r/166993041773.1882361.16444301376147207609.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/pmem.c | 78 ++++++++++++++++++++++++---- drivers/cxl/core/region.c | 64 +++++++++++++++++++++-- drivers/cxl/cxl.h | 7 ++- drivers/cxl/cxlmem.h | 4 ++ drivers/cxl/mem.c | 9 ++++ drivers/cxl/pci.c | 3 -- drivers/cxl/pmem.c | 121 +++++++++++++------------------------------ tools/testing/cxl/test/mem.c | 3 -- 8 files changed, 181 insertions(+), 108 deletions(-) diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index 1d12a8206444..4d36805079ad 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -219,7 +219,8 @@ EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL); static struct lock_class_key cxl_nvdimm_key; -static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) +static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb, + struct cxl_memdev *cxlmd) { struct cxl_nvdimm *cxl_nvd; struct device *dev; @@ -230,6 +231,7 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) dev = &cxl_nvd->dev; cxl_nvd->cxlmd = cxlmd; + cxlmd->cxl_nvd = cxl_nvd; device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_nvdimm_key); device_set_pm_not_required(dev); @@ -240,27 +242,60 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) return cxl_nvd; } -static void cxl_nvd_unregister(void *dev) +static void cxl_nvd_unregister(void *_cxl_nvd) { - device_unregister(dev); + struct cxl_nvdimm *cxl_nvd = _cxl_nvd; + struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb; + + /* + * Either the bridge is in ->remove() context under the device_lock(), + * or cxlmd_release_nvdimm() is cancelling the bridge's release action + * for @cxl_nvd and doing it itself (while manually holding the bridge + * lock). + */ + device_lock_assert(&cxl_nvb->dev); + cxl_nvd->cxlmd = NULL; + cxlmd->cxl_nvd = NULL; + device_unregister(&cxl_nvd->dev); +} + +static void cxlmd_release_nvdimm(void *_cxlmd) +{ + struct cxl_memdev *cxlmd = _cxlmd; + struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb; + + device_lock(&cxl_nvb->dev); + if (cxlmd->cxl_nvd) + devm_release_action(&cxl_nvb->dev, cxl_nvd_unregister, + cxlmd->cxl_nvd); + device_unlock(&cxl_nvb->dev); + put_device(&cxl_nvb->dev); } /** * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm - * @host: same host as @cxlmd * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations * * Return: 0 on success negative error code on failure. */ -int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) +int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd) { + struct cxl_nvdimm_bridge *cxl_nvb; struct cxl_nvdimm *cxl_nvd; struct device *dev; int rc; - cxl_nvd = cxl_nvdimm_alloc(cxlmd); - if (IS_ERR(cxl_nvd)) - return PTR_ERR(cxl_nvd); + cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev); + if (!cxl_nvb) + return -ENODEV; + + cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd); + if (IS_ERR(cxl_nvd)) { + rc = PTR_ERR(cxl_nvd); + goto err_alloc; + } + cxlmd->cxl_nvb = cxl_nvb; dev = &cxl_nvd->dev; rc = dev_set_name(dev, "pmem%d", cxlmd->id); @@ -271,13 +306,34 @@ int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) if (rc) goto err; - dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), - dev_name(dev)); + dev_dbg(&cxlmd->dev, "register %s\n", dev_name(dev)); + + /* + * The two actions below arrange for @cxl_nvd to be deleted when either + * the top-level PMEM bridge goes down, or the endpoint device goes + * through ->remove(). + */ + device_lock(&cxl_nvb->dev); + if (cxl_nvb->dev.driver) + rc = devm_add_action_or_reset(&cxl_nvb->dev, cxl_nvd_unregister, + cxl_nvd); + else + rc = -ENXIO; + device_unlock(&cxl_nvb->dev); + + if (rc) + goto err_alloc; - return devm_add_action_or_reset(host, cxl_nvd_unregister, dev); + /* @cxlmd carries a reference on @cxl_nvb until cxlmd_release_nvdimm */ + return devm_add_action_or_reset(&cxlmd->dev, cxlmd_release_nvdimm, cxlmd); err: put_device(dev); +err_alloc: + cxlmd->cxl_nvb = NULL; + cxlmd->cxl_nvd = NULL; + put_device(&cxl_nvb->dev); + return rc; } EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL); diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index f9ae5ad284ff..1e61d1bafc0c 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1812,6 +1812,7 @@ static struct lock_class_key cxl_pmem_region_key; static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) { struct cxl_region_params *p = &cxlr->params; + struct cxl_nvdimm_bridge *cxl_nvb; struct cxl_pmem_region *cxlr_pmem; struct device *dev; int i; @@ -1839,6 +1840,18 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; + /* + * Regions never span CXL root devices, so by definition the + * bridge for one device is the same for all. + */ + if (i == 0) { + cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev); + if (!cxl_nvb) { + cxlr_pmem = ERR_PTR(-ENODEV); + goto out; + } + cxlr->cxl_nvb = cxl_nvb; + } m->cxlmd = cxlmd; get_device(&cxlmd->dev); m->start = cxled->dpa_res->start; @@ -1848,6 +1861,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) dev = &cxlr_pmem->dev; cxlr_pmem->cxlr = cxlr; + cxlr->cxlr_pmem = cxlr_pmem; device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); device_set_pm_not_required(dev); @@ -1860,9 +1874,36 @@ out: return cxlr_pmem; } -static void cxlr_pmem_unregister(void *dev) +static void cxlr_pmem_unregister(void *_cxlr_pmem) { - device_unregister(dev); + struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem; + struct cxl_region *cxlr = cxlr_pmem->cxlr; + struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; + + /* + * Either the bridge is in ->remove() context under the device_lock(), + * or cxlr_release_nvdimm() is cancelling the bridge's release action + * for @cxlr_pmem and doing it itself (while manually holding the bridge + * lock). + */ + device_lock_assert(&cxl_nvb->dev); + cxlr->cxlr_pmem = NULL; + cxlr_pmem->cxlr = NULL; + device_unregister(&cxlr_pmem->dev); +} + +static void cxlr_release_nvdimm(void *_cxlr) +{ + struct cxl_region *cxlr = _cxlr; + struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; + + device_lock(&cxl_nvb->dev); + if (cxlr->cxlr_pmem) + devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, + cxlr->cxlr_pmem); + device_unlock(&cxl_nvb->dev); + cxlr->cxl_nvb = NULL; + put_device(&cxl_nvb->dev); } /** @@ -1874,12 +1915,14 @@ static void cxlr_pmem_unregister(void *dev) static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) { struct cxl_pmem_region *cxlr_pmem; + struct cxl_nvdimm_bridge *cxl_nvb; struct device *dev; int rc; cxlr_pmem = cxl_pmem_region_alloc(cxlr); if (IS_ERR(cxlr_pmem)) return PTR_ERR(cxlr_pmem); + cxl_nvb = cxlr->cxl_nvb; dev = &cxlr_pmem->dev; rc = dev_set_name(dev, "pmem_region%d", cxlr->id); @@ -1893,10 +1936,25 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), dev_name(dev)); - return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev); + device_lock(&cxl_nvb->dev); + if (cxl_nvb->dev.driver) + rc = devm_add_action_or_reset(&cxl_nvb->dev, + cxlr_pmem_unregister, cxlr_pmem); + else + rc = -ENXIO; + device_unlock(&cxl_nvb->dev); + + if (rc) + goto err_bridge; + + /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */ + return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr); err: put_device(dev); +err_bridge: + put_device(&cxl_nvb->dev); + cxlr->cxl_nvb = NULL; return rc; } diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 4ac7938eaf6c..fc6083b0e467 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -386,6 +386,8 @@ struct cxl_region_params { * @id: This region's id. Id is globally unique across all regions * @mode: Endpoint decoder allocation / access mode * @type: Endpoint decoder target type + * @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown + * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge * @params: active + config params for the region */ struct cxl_region { @@ -393,6 +395,8 @@ struct cxl_region { int id; enum cxl_decoder_mode mode; enum cxl_decoder_type type; + struct cxl_nvdimm_bridge *cxl_nvb; + struct cxl_pmem_region *cxlr_pmem; struct cxl_region_params params; }; @@ -438,7 +442,6 @@ struct cxl_pmem_region { struct device dev; struct cxl_region *cxlr; struct nd_region *nd_region; - struct cxl_nvdimm_bridge *bridge; struct range hpa_range; int nr_mappings; struct cxl_pmem_region_mapping mapping[]; @@ -637,7 +640,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm(struct device *dev); bool is_cxl_nvdimm_bridge(struct device *dev); -int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd); +int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd); struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev); #ifdef CONFIG_CXL_REGION diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 88e3a8e54b6a..c1c9960ab05f 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -35,6 +35,8 @@ * @cdev: char dev core object for ioctl operations * @cxlds: The device state backing this device * @detach_work: active memdev lost a port in its ancestry + * @cxl_nvb: coordinate removal of @cxl_nvd if present + * @cxl_nvd: optional bridge to an nvdimm if the device supports pmem * @id: id number of this memdev instance. */ struct cxl_memdev { @@ -42,6 +44,8 @@ struct cxl_memdev { struct cdev cdev; struct cxl_dev_state *cxlds; struct work_struct detach_work; + struct cxl_nvdimm_bridge *cxl_nvb; + struct cxl_nvdimm *cxl_nvd; int id; }; diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 64ccf053d32c..549b6b499bae 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -48,6 +48,7 @@ static int cxl_mem_dpa_show(struct seq_file *file, void *data) static int cxl_mem_probe(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_port *parent_port; struct cxl_dport *dport; struct dentry *dentry; @@ -95,6 +96,14 @@ unlock: if (rc) return rc; + if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) { + rc = devm_cxl_add_nvdimm(cxlmd); + if (rc == -ENODEV) + dev_info(dev, "PMEM disabled by platform\n"); + else + return rc; + } + /* * The kernel may be operating out of CXL memory on this device, * there is no spec defined way to determine whether this device diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 621a0522b554..e15da405b948 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -503,9 +503,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); - if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) - rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); - return rc; } diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 652f00fc68ca..76cf54eeb310 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -34,26 +34,16 @@ static int cxl_nvdimm_probe(struct device *dev) { struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; + struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb; unsigned long flags = 0, cmd_mask = 0; struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_nvdimm_bridge *cxl_nvb; struct nvdimm *nvdimm; int rc; - cxl_nvb = cxl_find_nvdimm_bridge(dev); - if (!cxl_nvb) - return -ENXIO; - - device_lock(&cxl_nvb->dev); - if (!cxl_nvb->nvdimm_bus) { - rc = -ENXIO; - goto out; - } - set_exclusive_cxl_commands(cxlds, exclusive_cmds); rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds); if (rc) - goto out; + return rc; set_bit(NDD_LABELING, &flags); set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); @@ -61,19 +51,11 @@ static int cxl_nvdimm_probe(struct device *dev) set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, cmd_mask, 0, NULL); - if (!nvdimm) { - rc = -ENOMEM; - goto out; - } + if (!nvdimm) + return -ENOMEM; dev_set_drvdata(dev, nvdimm); - cxl_nvd->bridge = cxl_nvb; - rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm); -out: - device_unlock(&cxl_nvb->dev); - put_device(&cxl_nvb->dev); - - return rc; + return devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm); } static struct cxl_driver cxl_nvdimm_driver = { @@ -200,6 +182,15 @@ static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc, return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len); } +static void unregister_nvdimm_bus(void *_cxl_nvb) +{ + struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb; + struct nvdimm_bus *nvdimm_bus = cxl_nvb->nvdimm_bus; + + cxl_nvb->nvdimm_bus = NULL; + nvdimm_bus_unregister(nvdimm_bus); +} + static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb) { if (cxl_nvb->nvdimm_bus) @@ -303,23 +294,21 @@ static int cxl_nvdimm_bridge_probe(struct device *dev) { struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); - if (cxl_nvb->state == CXL_NVB_DEAD) - return -ENXIO; + cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) { + .provider_name = "CXL", + .module = THIS_MODULE, + .ndctl = cxl_pmem_ctl, + }; - if (cxl_nvb->state == CXL_NVB_NEW) { - cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) { - .provider_name = "CXL", - .module = THIS_MODULE, - .ndctl = cxl_pmem_ctl, - }; + cxl_nvb->nvdimm_bus = + nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc); - INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state); - } + if (!cxl_nvb->nvdimm_bus) + return -ENOMEM; - cxl_nvb->state = CXL_NVB_ONLINE; - cxl_nvdimm_bridge_state_work(cxl_nvb); + INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state); - return 0; + return devm_add_action_or_reset(dev, unregister_nvdimm_bus, cxl_nvb); } static struct cxl_driver cxl_nvdimm_bridge_driver = { @@ -332,11 +321,6 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = { }, }; -static int match_cxl_nvdimm(struct device *dev, void *data) -{ - return is_cxl_nvdimm(dev); -} - static void unregister_nvdimm_region(void *nd_region) { nvdimm_region_delete(nd_region); @@ -357,8 +341,8 @@ static int cxl_pmem_region_probe(struct device *dev) struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE]; struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev); struct cxl_region *cxlr = cxlr_pmem->cxlr; + struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; struct cxl_pmem_region_info *info = NULL; - struct cxl_nvdimm_bridge *cxl_nvb; struct nd_interleave_set *nd_set; struct nd_region_desc ndr_desc; struct cxl_nvdimm *cxl_nvd; @@ -366,28 +350,12 @@ static int cxl_pmem_region_probe(struct device *dev) struct resource *res; int rc, i = 0; - cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev); - if (!cxl_nvb) { - dev_dbg(dev, "bridge not found\n"); - return -ENXIO; - } - cxlr_pmem->bridge = cxl_nvb; - - device_lock(&cxl_nvb->dev); - if (!cxl_nvb->nvdimm_bus) { - dev_dbg(dev, "nvdimm bus not found\n"); - rc = -ENXIO; - goto out_nvb; - } - memset(&mappings, 0, sizeof(mappings)); memset(&ndr_desc, 0, sizeof(ndr_desc)); res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); - if (!res) { - rc = -ENOMEM; - goto out_nvb; - } + if (!res) + return -ENOMEM; res->name = "Persistent Memory"; res->start = cxlr_pmem->hpa_range.start; @@ -397,11 +365,11 @@ static int cxl_pmem_region_probe(struct device *dev) rc = insert_resource(&iomem_resource, res); if (rc) - goto out_nvb; + return rc; rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res); if (rc) - goto out_nvb; + return rc; ndr_desc.res = res; ndr_desc.provider_data = cxlr_pmem; @@ -415,39 +383,23 @@ static int cxl_pmem_region_probe(struct device *dev) } nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); - if (!nd_set) { - rc = -ENOMEM; - goto out_nvb; - } + if (!nd_set) + return -ENOMEM; ndr_desc.memregion = cxlr->id; set_bit(ND_REGION_CXL, &ndr_desc.flags); set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags); info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL); - if (!info) { - rc = -ENOMEM; - goto out_nvb; - } + if (!info) + return -ENOMEM; for (i = 0; i < cxlr_pmem->nr_mappings; i++) { struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; struct cxl_memdev *cxlmd = m->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct device *d; - d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm); - if (!d) { - dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i, - dev_name(&cxlmd->dev)); - rc = -ENODEV; - goto out_nvd; - } - - /* safe to drop ref now with bridge lock held */ - put_device(d); - - cxl_nvd = to_cxl_nvdimm(d); + cxl_nvd = cxlmd->cxl_nvd; nvdimm = dev_get_drvdata(&cxl_nvd->dev); if (!nvdimm) { dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i, @@ -488,9 +440,6 @@ static int cxl_pmem_region_probe(struct device *dev) cxlr_pmem->nd_region); out_nvd: kfree(info); -out_nvb: - device_unlock(&cxl_nvb->dev); - put_device(&cxl_nvb->dev); return rc; } diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index aa2df3a15051..a4ee8e61dd60 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -285,9 +285,6 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); - if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) - rc = devm_cxl_add_nvdimm(dev, cxlmd); - return 0; } -- cgit v1.2.3 From 03ff079aa633369763bc0b7409b0a3a8ffa21d40 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 13:33:43 -0800 Subject: cxl/pmem: Remove the cxl_pmem_wq and related infrastructure Now that cxl_nvdimm and cxl_pmem_region objects are torn down sychronously with the removal of either the bridge, or an endpoint, the cxl_pmem_wq infrastructure can be jettisoned. Tested-by: Robert Richter Link: https://lore.kernel.org/r/166993042335.1882361.17022872468068436287.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/pmem.c | 22 -------- drivers/cxl/cxl.h | 17 ------ drivers/cxl/pmem.c | 143 +----------------------------------------------- 3 files changed, 1 insertion(+), 181 deletions(-) diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index 4d36805079ad..16446473d814 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -99,7 +99,6 @@ static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port) dev = &cxl_nvb->dev; cxl_nvb->port = port; - cxl_nvb->state = CXL_NVB_NEW; device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key); device_set_pm_not_required(dev); @@ -117,28 +116,7 @@ err: static void unregister_nvb(void *_cxl_nvb) { struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb; - bool flush; - /* - * If the bridge was ever activated then there might be in-flight state - * work to flush. Once the state has been changed to 'dead' then no new - * work can be queued by user-triggered bind. - */ - device_lock(&cxl_nvb->dev); - flush = cxl_nvb->state != CXL_NVB_NEW; - cxl_nvb->state = CXL_NVB_DEAD; - device_unlock(&cxl_nvb->dev); - - /* - * Even though the device core will trigger device_release_driver() - * before the unregister, it does not know about the fact that - * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver - * release not and flush it before tearing down the nvdimm device - * hierarchy. - */ - device_release_driver(&cxl_nvb->dev); - if (flush) - flush_work(&cxl_nvb->state_work); device_unregister(&cxl_nvb->dev); } diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index fc6083b0e467..f0ca2d768385 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -400,34 +400,17 @@ struct cxl_region { struct cxl_region_params params; }; -/** - * enum cxl_nvdimm_brige_state - state machine for managing bus rescans - * @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed - * @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing - * @CXL_NVB_ONLINE: Target state after successful ->probe() - * @CXL_NVB_OFFLINE: Target state after ->remove() or failed ->probe() - */ -enum cxl_nvdimm_brige_state { - CXL_NVB_NEW, - CXL_NVB_DEAD, - CXL_NVB_ONLINE, - CXL_NVB_OFFLINE, -}; - struct cxl_nvdimm_bridge { int id; struct device dev; struct cxl_port *port; struct nvdimm_bus *nvdimm_bus; struct nvdimm_bus_descriptor nd_desc; - struct work_struct state_work; - enum cxl_nvdimm_brige_state state; }; struct cxl_nvdimm { struct device dev; struct cxl_memdev *cxlmd; - struct cxl_nvdimm_bridge *bridge; }; struct cxl_pmem_region_mapping { diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 76cf54eeb310..0910367a3ead 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -11,13 +11,6 @@ #include "cxlmem.h" #include "cxl.h" -/* - * Ordered workqueue for cxl nvdimm device arrival and departure - * to coordinate bus rescans when a bridge arrives and trigger remove - * operations when the bridge is removed. - */ -static struct workqueue_struct *cxl_pmem_wq; - static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); static void clear_exclusive(void *cxlds) @@ -191,105 +184,6 @@ static void unregister_nvdimm_bus(void *_cxl_nvb) nvdimm_bus_unregister(nvdimm_bus); } -static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb) -{ - if (cxl_nvb->nvdimm_bus) - return true; - cxl_nvb->nvdimm_bus = - nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc); - return cxl_nvb->nvdimm_bus != NULL; -} - -static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb) -{ - struct cxl_nvdimm *cxl_nvd; - - if (!is_cxl_nvdimm(dev)) - return 0; - - cxl_nvd = to_cxl_nvdimm(dev); - if (cxl_nvd->bridge != cxl_nvb) - return 0; - - device_release_driver(dev); - return 0; -} - -static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb, - struct nvdimm_bus *nvdimm_bus) -{ - if (!nvdimm_bus) - return; - - /* - * Set the state of cxl_nvdimm devices to unbound / idle before - * nvdimm_bus_unregister() rips the nvdimm objects out from - * underneath them. - */ - bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb, - cxl_nvdimm_release_driver); - nvdimm_bus_unregister(nvdimm_bus); -} - -static void cxl_nvb_update_state(struct work_struct *work) -{ - struct cxl_nvdimm_bridge *cxl_nvb = - container_of(work, typeof(*cxl_nvb), state_work); - struct nvdimm_bus *victim_bus = NULL; - bool release = false, rescan = false; - - device_lock(&cxl_nvb->dev); - switch (cxl_nvb->state) { - case CXL_NVB_ONLINE: - if (!online_nvdimm_bus(cxl_nvb)) { - dev_err(&cxl_nvb->dev, - "failed to establish nvdimm bus\n"); - release = true; - } else - rescan = true; - break; - case CXL_NVB_OFFLINE: - case CXL_NVB_DEAD: - victim_bus = cxl_nvb->nvdimm_bus; - cxl_nvb->nvdimm_bus = NULL; - break; - default: - break; - } - device_unlock(&cxl_nvb->dev); - - if (release) - device_release_driver(&cxl_nvb->dev); - if (rescan) { - int rc = bus_rescan_devices(&cxl_bus_type); - - dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc); - } - offline_nvdimm_bus(cxl_nvb, victim_bus); - - put_device(&cxl_nvb->dev); -} - -static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb) -{ - /* - * Take a reference that the workqueue will drop if new work - * gets queued. - */ - get_device(&cxl_nvb->dev); - if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work)) - put_device(&cxl_nvb->dev); -} - -static void cxl_nvdimm_bridge_remove(struct device *dev) -{ - struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); - - if (cxl_nvb->state == CXL_NVB_ONLINE) - cxl_nvb->state = CXL_NVB_OFFLINE; - cxl_nvdimm_bridge_state_work(cxl_nvb); -} - static int cxl_nvdimm_bridge_probe(struct device *dev) { struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); @@ -306,15 +200,12 @@ static int cxl_nvdimm_bridge_probe(struct device *dev) if (!cxl_nvb->nvdimm_bus) return -ENOMEM; - INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state); - return devm_add_action_or_reset(dev, unregister_nvdimm_bus, cxl_nvb); } static struct cxl_driver cxl_nvdimm_bridge_driver = { .name = "cxl_nvdimm_bridge", .probe = cxl_nvdimm_bridge_probe, - .remove = cxl_nvdimm_bridge_remove, .id = CXL_DEVICE_NVDIMM_BRIDGE, .drv = { .suppress_bind_attrs = true, @@ -453,31 +344,6 @@ static struct cxl_driver cxl_pmem_region_driver = { }, }; -/* - * Return all bridges to the CXL_NVB_NEW state to invalidate any - * ->state_work referring to the now destroyed cxl_pmem_wq. - */ -static int cxl_nvdimm_bridge_reset(struct device *dev, void *data) -{ - struct cxl_nvdimm_bridge *cxl_nvb; - - if (!is_cxl_nvdimm_bridge(dev)) - return 0; - - cxl_nvb = to_cxl_nvdimm_bridge(dev); - device_lock(dev); - cxl_nvb->state = CXL_NVB_NEW; - device_unlock(dev); - - return 0; -} - -static void destroy_cxl_pmem_wq(void) -{ - destroy_workqueue(cxl_pmem_wq); - bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset); -} - static __init int cxl_pmem_init(void) { int rc; @@ -485,13 +351,9 @@ static __init int cxl_pmem_init(void) set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds); set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds); - cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0); - if (!cxl_pmem_wq) - return -ENXIO; - rc = cxl_driver_register(&cxl_nvdimm_bridge_driver); if (rc) - goto err_bridge; + return rc; rc = cxl_driver_register(&cxl_nvdimm_driver); if (rc) @@ -507,8 +369,6 @@ err_region: cxl_driver_unregister(&cxl_nvdimm_driver); err_nvdimm: cxl_driver_unregister(&cxl_nvdimm_bridge_driver); -err_bridge: - destroy_cxl_pmem_wq(); return rc; } @@ -517,7 +377,6 @@ static __exit void cxl_pmem_exit(void) cxl_driver_unregister(&cxl_pmem_region_driver); cxl_driver_unregister(&cxl_nvdimm_driver); cxl_driver_unregister(&cxl_nvdimm_bridge_driver); - destroy_cxl_pmem_wq(); } MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 4029c32fb601d505dfb92bdf0db9fdcc41fe1434 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 13:33:48 -0800 Subject: cxl/acpi: Move rescan to the workqueue Now that the cxl_mem driver has a need to take the root device lock, the cxl_bus_rescan() needs to run outside of the root lock context. That need arises from RCH topologies and the locking that the cxl_mem driver does to attach a descendant to an upstream port. In the RCH case the lock needed is the CXL root device lock [1]. Link: http://lore.kernel.org/r/166993045621.1882361.1730100141527044744.stgit@dwillia2-xfh.jf.intel.com [1] Tested-by: Robert Richter Link: http://lore.kernel.org/r/166993042884.1882361.5633723613683058881.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 17 +++++++++++++++-- drivers/cxl/core/port.c | 19 +++++++++++++++++-- drivers/cxl/cxl.h | 3 ++- 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index c540da0cbf1e..b8407b77aff6 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -511,7 +511,8 @@ static int cxl_acpi_probe(struct platform_device *pdev) return rc; /* In case PCI is scanned before ACPI re-trigger memdev attach */ - return cxl_bus_rescan(); + cxl_bus_rescan(); + return 0; } static const struct acpi_device_id cxl_acpi_ids[] = { @@ -535,7 +536,19 @@ static struct platform_driver cxl_acpi_driver = { .id_table = cxl_test_ids, }; -module_platform_driver(cxl_acpi_driver); +static int __init cxl_acpi_init(void) +{ + return platform_driver_register(&cxl_acpi_driver); +} + +static void __exit cxl_acpi_exit(void) +{ + platform_driver_unregister(&cxl_acpi_driver); + cxl_bus_drain(); +} + +module_init(cxl_acpi_init); +module_exit(cxl_acpi_exit); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(CXL); MODULE_IMPORT_NS(ACPI); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 0d2f5eaaca7d..d225267c69bb 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1844,12 +1844,27 @@ static void cxl_bus_remove(struct device *dev) static struct workqueue_struct *cxl_bus_wq; -int cxl_bus_rescan(void) +static void cxl_bus_rescan_queue(struct work_struct *w) { - return bus_rescan_devices(&cxl_bus_type); + int rc = bus_rescan_devices(&cxl_bus_type); + + pr_debug("CXL bus rescan result: %d\n", rc); +} + +void cxl_bus_rescan(void) +{ + static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue); + + queue_work(cxl_bus_wq, &rescan_work); } EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL); +void cxl_bus_drain(void) +{ + drain_workqueue(cxl_bus_wq); +} +EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL); + bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) { return queue_work(cxl_bus_wq, &cxlmd->detach_work); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index f0ca2d768385..281b1db5a271 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -552,7 +552,8 @@ int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd, struct cxl_dport *parent_dport); struct cxl_port *find_cxl_root(struct device *dev); int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); -int cxl_bus_rescan(void); +void cxl_bus_rescan(void); +void cxl_bus_drain(void); struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, struct cxl_dport **dport); bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd); -- cgit v1.2.3 From 8b3b1c0dc500a00c34ab74fb8a0d9e7286220c04 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 13:33:54 -0800 Subject: tools/testing/cxl: Make mock CEDT parsing more robust Accept any cxl_test topology device as the first argument in cxl_chbs_context. This is in preparation for reworking the detection of the component registers across VH and RCH topologies. Move mock_acpi_table_parse_cedt() beneath the definition of is_mock_port() and use is_mock_port() instead of the explicit mock cxl_acpi device check. Acked-by: Alison Schofield Reviewed-by: Robert Richter Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/166993043433.1882361.17651413716599606118.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 4 ++++ tools/testing/cxl/test/cxl.c | 10 ++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index b8407b77aff6..2992bac4c0e4 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -70,6 +70,10 @@ static int cxl_acpi_cfmws_verify(struct device *dev, return 0; } +/* + * Note, @dev must be the first member, see 'struct cxl_chbs_context' + * and mock_acpi_table_parse_cedt() + */ struct cxl_cfmws_context { struct device *dev; struct cxl_port *root_port; diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index facfcd11cb67..4f9dc2b3f655 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -320,10 +320,12 @@ static int populate_cedt(void) return 0; } +static bool is_mock_port(struct device *dev); + /* - * WARNING, this hack assumes the format of 'struct - * cxl_cfmws_context' and 'struct cxl_chbs_context' share the property that - * the first struct member is the device being probed by the cxl_acpi + * WARNING, this hack assumes the format of 'struct cxl_cfmws_context' + * and 'struct cxl_chbs_context' share the property that the first + * struct member is a cxl_test device being probed by the cxl_acpi * driver. */ struct cxl_cedt_context { @@ -340,7 +342,7 @@ static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id, unsigned long end; int i; - if (dev != &cxl_acpi->dev) + if (!is_mock_port(dev) && !is_mock_dev(dev)) return acpi_table_parse_cedt(id, handler_arg, arg); if (id == ACPI_CEDT_TYPE_CHBS) -- cgit v1.2.3 From 1dedb6f3cf7feeb84b10c24046d8e4436173cc4d Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 1 Dec 2022 13:33:59 -0800 Subject: cxl/ACPI: Register CXL host ports by bridge device A port of a CXL host bridge links to the bridge's ACPI device (&adev->dev) with its corresponding uport/dport device (uport_dev and dport_dev respectively). The device is not a direct parent device in the PCI topology as pdev->dev.parent points to a PCI bridge's (struct pci_host_bridge) device. The following CXL memory device hierarchy would be valid for an endpoint once an RCD EP would be enabled (note this will be done in a later patch): VH mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_dev (Type 1, Downstream Port) \ pci_dev (Type 0, PCI Express Endpoint) cxl mem device RCD mode: cxlmd->dev.parent->parent ^^^\^^^^^^\ ^^^^^^\ \ \ pci_host_bridge \ pci_dev (Type 0, RCiEP) cxl mem device In VH mode a downstream port is created by port enumeration and thus always exists. Now, in RCD mode the host bridge also already exists but it references to an ACPI device. A port lookup by the PCI device's parent device will fail as a direct link to the registered port is missing. The ACPI device of the bridge must be determined first. To prevent this, change port registration of a CXL host to use the bridge device instead. Do this also for the VH case as port topology will better reflect the PCI topology then. Signed-off-by: Robert Richter [djbw: rebase on brige mocking] Reviewed-by: Robert Richter Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/166993043978.1882361.16238060349889579369.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 2992bac4c0e4..694868a85c93 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -197,35 +197,34 @@ static int add_host_bridge_uport(struct device *match, void *arg) { struct cxl_port *root_port = arg; struct device *host = root_port->dev.parent; - struct acpi_device *bridge = to_cxl_host_bridge(host, match); + struct acpi_device *hb = to_cxl_host_bridge(host, match); struct acpi_pci_root *pci_root; struct cxl_dport *dport; struct cxl_port *port; + struct device *bridge; int rc; - if (!bridge) + if (!hb) return 0; - dport = cxl_find_dport_by_dev(root_port, match); + pci_root = acpi_pci_find_root(hb->handle); + bridge = pci_root->bus->bridge; + dport = cxl_find_dport_by_dev(root_port, bridge); if (!dport) { dev_dbg(host, "host bridge expected and not found\n"); return 0; } - /* - * Note that this lookup already succeeded in - * to_cxl_host_bridge(), so no need to check for failure here - */ - pci_root = acpi_pci_find_root(bridge->handle); - rc = devm_cxl_register_pci_bus(host, match, pci_root->bus); + rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus); if (rc) return rc; - port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport); + port = devm_cxl_add_port(host, bridge, dport->component_reg_phys, + dport); if (IS_ERR(port)) return PTR_ERR(port); - dev_info(pci_root->bus->bridge, "host supports CXL\n"); + dev_info(bridge, "host supports CXL\n"); return 0; } @@ -256,20 +255,21 @@ static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg, static int add_host_bridge_dport(struct device *match, void *arg) { - acpi_status status; + acpi_status rc; + struct device *bridge; unsigned long long uid; struct cxl_dport *dport; struct cxl_chbs_context ctx; + struct acpi_pci_root *pci_root; struct cxl_port *root_port = arg; struct device *host = root_port->dev.parent; - struct acpi_device *bridge = to_cxl_host_bridge(host, match); + struct acpi_device *hb = to_cxl_host_bridge(host, match); - if (!bridge) + if (!hb) return 0; - status = acpi_evaluate_integer(bridge->handle, METHOD_NAME__UID, NULL, - &uid); - if (status != AE_OK) { + rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid); + if (rc != AE_OK) { dev_err(match, "unable to retrieve _UID\n"); return -ENODEV; } @@ -289,7 +289,9 @@ static int add_host_bridge_dport(struct device *match, void *arg) dev_dbg(match, "CHBCR found: 0x%08llx\n", (u64)ctx.chbcr); - dport = devm_cxl_add_dport(root_port, match, uid, ctx.chbcr); + pci_root = acpi_pci_find_root(hb->handle); + bridge = pci_root->bus->bridge; + dport = devm_cxl_add_dport(root_port, bridge, uid, ctx.chbcr); if (IS_ERR(dport)) return PTR_ERR(dport); -- cgit v1.2.3 From b5807c80b5bc49764724ca22e83c04f527e86fd4 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 1 Dec 2022 14:03:19 -0800 Subject: cxl: add dimm_id support for __nvdimm_create() Set the cxlds->serial as the dimm_id to be fed to __nvdimm_create(). The security code uses that as the key description for the security key of the memory device. The nvdimm unlock code cannot find the respective key without the dimm_id. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166863357043.80269.4337575149671383294.stgit@djiang5-desk3.ch.intel.com Link: https://lore.kernel.org/r/166983620459.2734609.10175456773200251184.stgit@djiang5-desk3.ch.intel.com Link: https://lore.kernel.org/r/166993219918.1995348.10786511454826454601.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/pmem.c | 7 +++++++ drivers/cxl/cxl.h | 3 +++ drivers/cxl/pmem.c | 3 ++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index 36aa5070d902..7b9a9573e6f2 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -238,6 +238,13 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) dev->parent = &cxlmd->dev; dev->bus = &cxl_bus_type; dev->type = &cxl_nvdimm_type; + /* + * A "%llx" string is 17-bytes vs dimm_id that is max + * NVDIMM_KEY_DESC_LEN + */ + BUILD_BUG_ON(sizeof(cxl_nvd->dev_id) < 17 || + sizeof(cxl_nvd->dev_id) > NVDIMM_KEY_DESC_LEN); + sprintf(cxl_nvd->dev_id, "%llx", cxlmd->cxlds->serial); return cxl_nvd; } diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 7d07127eade3..b433e541a054 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -420,11 +420,14 @@ struct cxl_nvdimm_bridge { enum cxl_nvdimm_brige_state state; }; +#define CXL_DEV_ID_LEN 19 + struct cxl_nvdimm { struct device dev; struct cxl_memdev *cxlmd; struct cxl_nvdimm_bridge *bridge; struct xarray pmem_regions; + u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */ }; struct cxl_pmem_region_mapping { diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 403e41bcbf2b..ab40c93c44e5 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -117,7 +117,8 @@ static int cxl_nvdimm_probe(struct device *dev) set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, cxl_dimm_attribute_groups, flags, - cmd_mask, 0, NULL, NULL, cxl_security_ops, NULL); + cmd_mask, 0, NULL, cxl_nvd->dev_id, + cxl_security_ops, NULL); if (!nvdimm) { rc = -ENOMEM; goto out; -- cgit v1.2.3 From bf3e5da8cb43a671b32fc125fa81b8f6a3677192 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 14:03:24 -0800 Subject: cxl/region: Fix missing probe failure cxl_region_probe() allows for regions not in the 'commit' state to be enabled. Fail probe when the region is not committed otherwise the kernel may indicate that an address range is active when none of the decoders are active. Fixes: 8d48817df6ac ("cxl/region: Add region driver boiler plate") Cc: Reviewed-by: Davidlohr Bueso Reviewed-by: Dave Jiang Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/166993220462.1995348.1698008475198427361.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/region.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index f9ae5ad284ff..1bc2ebefa2a5 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1923,6 +1923,9 @@ static int cxl_region_probe(struct device *dev) */ up_read(&cxl_region_rwsem); + if (rc) + return rc; + switch (cxlr->mode) { case CXL_DECODER_PMEM: return devm_cxl_add_pmem_region(cxlr); -- cgit v1.2.3 From 07cb5f705b4fe9e1386a610da4cb3c063267714f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 14:03:30 -0800 Subject: cxl/pmem: Enforce keyctl ABI for PMEM security Preclude the possibility of user tooling sending device secrets in the clear into the kernel by marking the security commands as exclusive. This mandates the usage of the keyctl ABI for managing the device passphrase. Reviewed-by: Davidlohr Bueso Reviewed-by: Dave Jiang Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/166993221008.1995348.11651567302609703175.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 8747db329087..35dd889f1d3a 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -704,6 +704,16 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds) rc = 0; } + /* + * Setup permanently kernel exclusive commands, i.e. the + * mechanism is driven through sysfs, keyctl, etc... + */ + set_bit(CXL_MEM_COMMAND_ID_SET_PASSPHRASE, cxlds->exclusive_cmds); + set_bit(CXL_MEM_COMMAND_ID_DISABLE_PASSPHRASE, cxlds->exclusive_cmds); + set_bit(CXL_MEM_COMMAND_ID_UNLOCK, cxlds->exclusive_cmds); + set_bit(CXL_MEM_COMMAND_ID_PASSPHRASE_SECURE_ERASE, + cxlds->exclusive_cmds); + out: kvfree(gsl); return rc; -- cgit v1.2.3 From dc370b28c8425669e7ed5af4c01540645cfb00ec Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 14:03:35 -0800 Subject: nvdimm/region: Move cache management to the region driver Now that cpu_cache_invalidate_memregion() is generically available, use it to centralize CPU cache management in the nvdimm region driver. This trades off removing redundant per-dimm CPU cache flushing with an opportunistic flush on every region disable event to cover the case of sensitive dirty data in the cache being written back to media after a secure erase / overwrite event. Reviewed-by: Davidlohr Bueso Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/166993221550.1995348.16843505129579060258.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/acpi/nfit/intel.c | 25 ---------------------- drivers/nvdimm/region.c | 11 ++++++++++ drivers/nvdimm/region_devs.c | 50 +++++++++++++++++++++++++++++++++++++++++++- drivers/nvdimm/security.c | 6 ++++++ include/linux/libnvdimm.h | 5 +++++ 5 files changed, 71 insertions(+), 26 deletions(-) diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index fa0e57e35162..3902759abcba 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -212,9 +212,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask)) return -ENOTTY; - if (!cpu_cache_has_invalidate_memregion()) - return -EINVAL; - memcpy(nd_cmd.cmd.passphrase, key_data->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -229,9 +226,6 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm, return -EIO; } - /* DIMM unlocked, invalidate all CPU caches before we read it */ - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); - return 0; } @@ -299,11 +293,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, if (!test_bit(cmd, &nfit_mem->dsm_mask)) return -ENOTTY; - if (!cpu_cache_has_invalidate_memregion()) - return -EINVAL; - - /* flush all cache before we erase DIMM */ - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); memcpy(nd_cmd.cmd.passphrase, key->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -322,8 +311,6 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm, return -ENXIO; } - /* DIMM erased, invalidate all CPU caches before we read it */ - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); return 0; } @@ -346,9 +333,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask)) return -ENOTTY; - if (!cpu_cache_has_invalidate_memregion()) - return -EINVAL; - rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); if (rc < 0) return rc; @@ -362,8 +346,6 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm) return -ENXIO; } - /* flush all cache before we make the nvdimms available */ - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); return 0; } @@ -388,11 +370,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm, if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask)) return -ENOTTY; - if (!cpu_cache_has_invalidate_memregion()) - return -EINVAL; - - /* flush all cache before we erase DIMM */ - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); memcpy(nd_cmd.cmd.passphrase, nkey->data, sizeof(nd_cmd.cmd.passphrase)); rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); @@ -770,5 +747,3 @@ static const struct nvdimm_fw_ops __intel_fw_ops = { }; const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops; - -MODULE_IMPORT_NS(DEVMEM); diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index 390123d293ea..88dc062af5f8 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -2,6 +2,7 @@ /* * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. */ +#include #include #include #include @@ -100,6 +101,16 @@ static void nd_region_remove(struct device *dev) */ sysfs_put(nd_region->bb_state); nd_region->bb_state = NULL; + + /* + * Try to flush caches here since a disabled region may be subject to + * secure erase while disabled, and previous dirty data should not be + * written back to a new instance of the region. This only matters on + * bare metal where security commands are available, so silent failure + * here is ok. + */ + if (cpu_cache_has_invalidate_memregion()) + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); } static int child_notify(struct device *dev, void *data) diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index e0875d369762..83dbf398ea84 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -59,9 +59,51 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, return 0; } +static int nd_region_invalidate_memregion(struct nd_region *nd_region) +{ + int i, incoherent = 0; + + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + + if (test_bit(NDD_INCOHERENT, &nvdimm->flags)) { + incoherent++; + break; + } + } + + if (!incoherent) + return 0; + + if (!cpu_cache_has_invalidate_memregion()) { + if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) { + dev_warn( + &nd_region->dev, + "Bypassing cpu_cache_invalidate_memergion() for testing!\n"); + goto out; + } else { + dev_err(&nd_region->dev, + "Failed to synchronize CPU cache state\n"); + return -ENXIO; + } + } + + cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); +out: + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + + clear_bit(NDD_INCOHERENT, &nvdimm->flags); + } + + return 0; +} + int nd_region_activate(struct nd_region *nd_region) { - int i, j, num_flush = 0; + int i, j, rc, num_flush = 0; struct nd_region_data *ndrd; struct device *dev = &nd_region->dev; size_t flush_data_size = sizeof(void *); @@ -85,6 +127,10 @@ int nd_region_activate(struct nd_region *nd_region) } nvdimm_bus_unlock(&nd_region->dev); + rc = nd_region_invalidate_memregion(nd_region); + if (rc) + return rc; + ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); if (!ndrd) return -ENOMEM; @@ -1222,3 +1268,5 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict); } + +MODULE_IMPORT_NS(DEVMEM); diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 6814339b3dab..a03e3c45f297 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -208,6 +208,8 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm) rc = nvdimm->sec.ops->unlock(nvdimm, data); dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key), rc == 0 ? "success" : "fail"); + if (rc == 0) + set_bit(NDD_INCOHERENT, &nvdimm->flags); nvdimm_put_key(key); nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); @@ -374,6 +376,8 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid, return -ENOKEY; rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type); + if (rc == 0) + set_bit(NDD_INCOHERENT, &nvdimm->flags); dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key), pass_type == NVDIMM_MASTER ? "(master)" : "(user)", rc == 0 ? "success" : "fail"); @@ -408,6 +412,8 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) return -ENOKEY; rc = nvdimm->sec.ops->overwrite(nvdimm, data); + if (rc == 0) + set_bit(NDD_INCOHERENT, &nvdimm->flags); dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key), rc == 0 ? "success" : "fail"); diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 3bf658a74ccb..af38252ad704 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -35,6 +35,11 @@ enum { NDD_WORK_PENDING = 4, /* dimm supports namespace labels */ NDD_LABELING = 6, + /* + * dimm contents have changed requiring invalidation of CPU caches prior + * to activation of a region that includes this device + */ + NDD_INCOHERENT = 7, /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, -- cgit v1.2.3 From d18bc74aced65e4ad68a30ac8db883398141e918 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 14:03:41 -0800 Subject: cxl/region: Manage CPU caches relative to DPA invalidation events A "DPA invalidation event" is any scenario where the contents of a DPA (Device Physical Address) is modified in a way that is incoherent with CPU caches, or if the HPA (Host Physical Address) to DPA association changes due to a remapping event. PMEM security events like Unlock and Passphrase Secure Erase already manage caches through LIBNVDIMM, so that leaves HPA to DPA remap events that need cache management by the CXL core. Those only happen when the boot time CXL configuration has changed. That event occurs when userspace attaches an endpoint decoder to a region configuration, and that region is subsequently activated. The implications of not invalidating caches between remap events is that reads from the region at different points in time may return different results due to stale cached data from the previous HPA to DPA mapping. Without a guarantee that the region contents after cxl_region_probe() are written before being read (a layering-violation assumption that cxl_region_probe() can not make) the CXL subsystem needs to ensure that reads that precede writes see consistent results. A CONFIG_CXL_REGION_INVALIDATION_TEST option is added to support debug and unit testing of the CXL implementation in QEMU or other environments where cpu_cache_has_invalidate_memregion() returns false. This may prove too restrictive for QEMU where the HDM decoders are emulated, but in that case the CXL subsystem needs some new mechanism / indication that the HDM decoder is emulated and not a passthrough of real hardware. Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/166993222098.1995348.16604163596374520890.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/Kconfig | 18 ++++++++++++++++++ drivers/cxl/core/region.c | 31 +++++++++++++++++++++++++++++++ drivers/cxl/cxl.h | 8 ++++++++ drivers/cxl/security.c | 14 -------------- 4 files changed, 57 insertions(+), 14 deletions(-) diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig index 768ced3d6fe8..0ac53c422c31 100644 --- a/drivers/cxl/Kconfig +++ b/drivers/cxl/Kconfig @@ -111,4 +111,22 @@ config CXL_REGION select MEMREGION select GET_FREE_REGION +config CXL_REGION_INVALIDATION_TEST + bool "CXL: Region Cache Management Bypass (TEST)" + depends on CXL_REGION + help + CXL Region management and security operations potentially invalidate + the content of CPU caches without notifiying those caches to + invalidate the affected cachelines. The CXL Region driver attempts + to invalidate caches when those events occur. If that invalidation + fails the region will fail to enable. Reasons for cache + invalidation failure are due to the CPU not providing a cache + invalidation mechanism. For example usage of wbinvd is restricted to + bare metal x86. However, for testing purposes toggling this option + can disable that data integrity safety and proceed with enabling + regions when there might be conflicting contents in the CPU cache. + + If unsure, or if this kernel is meant for production environments, + say N. + endif diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 1bc2ebefa2a5..3a6c3f84015f 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1403,6 +1403,8 @@ static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos) goto out; down_read(&cxl_dpa_rwsem); rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos); + if (rc == 0) + set_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags); up_read(&cxl_dpa_rwsem); up_write(&cxl_region_rwsem); out: @@ -1900,6 +1902,30 @@ err: return rc; } +static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) +{ + if (!test_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags)) + return 0; + + if (!cpu_cache_has_invalidate_memregion()) { + if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) { + dev_warn( + &cxlr->dev, + "Bypassing cpu_cache_invalidate_memergion() for testing!\n"); + clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags); + return 0; + } else { + dev_err(&cxlr->dev, + "Failed to synchronize CPU cache state\n"); + return -ENXIO; + } + } + + cpu_cache_invalidate_memregion(IORES_DESC_CXL); + clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags); + return 0; +} + static int cxl_region_probe(struct device *dev) { struct cxl_region *cxlr = to_cxl_region(dev); @@ -1915,12 +1941,16 @@ static int cxl_region_probe(struct device *dev) if (p->state < CXL_CONFIG_COMMIT) { dev_dbg(&cxlr->dev, "config state: %d\n", p->state); rc = -ENXIO; + goto out; } + rc = cxl_region_invalidate_memregion(cxlr); + /* * From this point on any path that changes the region's state away from * CXL_CONFIG_COMMIT is also responsible for releasing the driver. */ +out: up_read(&cxl_region_rwsem); if (rc) @@ -1953,4 +1983,5 @@ void cxl_region_exit(void) } MODULE_IMPORT_NS(CXL); +MODULE_IMPORT_NS(DEVMEM); MODULE_ALIAS_CXL(CXL_DEVICE_REGION); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index b433e541a054..e5e1abceeca7 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -380,12 +380,19 @@ struct cxl_region_params { int nr_targets; }; +/* + * Flag whether this region needs to have its HPA span synchronized with + * CPU cache state at region activation time. + */ +#define CXL_REGION_F_INCOHERENT 0 + /** * struct cxl_region - CXL region * @dev: This region's device * @id: This region's id. Id is globally unique across all regions * @mode: Endpoint decoder allocation / access mode * @type: Endpoint decoder target type + * @flags: Region state flags * @params: active + config params for the region */ struct cxl_region { @@ -393,6 +400,7 @@ struct cxl_region { int id; enum cxl_decoder_mode mode; enum cxl_decoder_type type; + unsigned long flags; struct cxl_region_params params; }; diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index cbd005ceb091..5484d4eecfd1 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -120,17 +120,12 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, u8 pass[NVDIMM_PASSPHRASE_LEN]; int rc; - if (!cpu_cache_has_invalidate_memregion()) - return -EINVAL; - memcpy(pass, key_data->data, NVDIMM_PASSPHRASE_LEN); rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_UNLOCK, pass, NVDIMM_PASSPHRASE_LEN, NULL, 0); if (rc < 0) return rc; - /* DIMM unlocked, invalidate all CPU caches before we read it */ - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); return 0; } @@ -144,21 +139,14 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm, struct cxl_pass_erase erase; int rc; - if (!cpu_cache_has_invalidate_memregion()) - return -EINVAL; - erase.type = ptype == NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; memcpy(erase.pass, key->data, NVDIMM_PASSPHRASE_LEN); - /* Flush all cache before we erase mem device */ - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE, &erase, sizeof(erase), NULL, 0); if (rc < 0) return rc; - /* mem device erased, invalidate all CPU caches before data is read */ - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); return 0; } @@ -173,5 +161,3 @@ static const struct nvdimm_security_ops __cxl_security_ops = { }; const struct nvdimm_security_ops *cxl_security_ops = &__cxl_security_ops; - -MODULE_IMPORT_NS(DEVMEM); -- cgit v1.2.3 From d5b1a27143cb7f78030bb2b6812730992a930c47 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Sat, 3 Dec 2022 00:40:29 -0800 Subject: cxl/acpi: Extract component registers of restricted hosts from RCRB A downstream port must be connected to a component register block. For restricted hosts the base address is determined from the RCRB. The RCRB is provided by the host's CEDT CHBS entry. Rework CEDT parser to get the RCRB and add code to extract the component register block from it. RCRB's BAR[0..1] point to the component block containing CXL subsystem component registers. MEMBAR extraction follows the PCI base spec here, esp. 64 bit extraction and memory range alignment (6.0, 7.5.1.2.1). The RCRB base address is cached in the cxl_dport per-host bridge so that the upstream port component registers can be retrieved later by an RCD (RCIEP) associated with the host bridge. Note: Right now the component register block is used for HDM decoder capability only which is optional for RCDs. If unsupported by the RCD, the HDM init will fail. It is future work to bypass it in this case. Co-developed-by: Terry Bowman Signed-off-by: Terry Bowman Signed-off-by: Robert Richter Link: https://lore.kernel.org/r/Y4dsGZ24aJlxSfI1@rric.localdomain [djbw: introduce devm_cxl_add_rch_dport()] Link: https://lore.kernel.org/r/166993044524.1882361.2539922887413208807.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Dave Jiang Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 51 +++++++++++++++++++++++++++++---- drivers/cxl/core/port.c | 53 +++++++++++++++++++++++++++++++---- drivers/cxl/core/regs.c | 65 +++++++++++++++++++++++++++++++++++++++++++ drivers/cxl/cxl.h | 16 +++++++++++ tools/testing/cxl/Kbuild | 1 + tools/testing/cxl/test/cxl.c | 10 +++++++ tools/testing/cxl/test/mock.c | 19 +++++++++++++ tools/testing/cxl/test/mock.h | 3 ++ 8 files changed, 207 insertions(+), 11 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 694868a85c93..b9472fbfdefc 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -9,6 +9,8 @@ #include "cxlpci.h" #include "cxl.h" +#define CXL_RCRB_SIZE SZ_8K + static unsigned long cfmws_to_decoder_flags(int restrictions) { unsigned long flags = CXL_DECODER_F_ENABLE; @@ -215,6 +217,11 @@ static int add_host_bridge_uport(struct device *match, void *arg) return 0; } + if (dport->rch) { + dev_info(bridge, "host supports CXL (restricted)\n"); + return 0; + } + rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus); if (rc) return rc; @@ -232,7 +239,9 @@ static int add_host_bridge_uport(struct device *match, void *arg) struct cxl_chbs_context { struct device *dev; unsigned long long uid; + resource_size_t rcrb; resource_size_t chbcr; + u32 cxl_version; }; static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg, @@ -248,7 +257,25 @@ static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg, if (ctx->uid != chbs->uid) return 0; - ctx->chbcr = chbs->base; + + ctx->cxl_version = chbs->cxl_version; + ctx->rcrb = CXL_RESOURCE_NONE; + ctx->chbcr = CXL_RESOURCE_NONE; + + if (!chbs->base) + return 0; + + if (chbs->cxl_version != ACPI_CEDT_CHBS_VERSION_CXL11) { + ctx->chbcr = chbs->base; + return 0; + } + + if (chbs->length != CXL_RCRB_SIZE) + return 0; + + ctx->rcrb = chbs->base; + ctx->chbcr = cxl_rcrb_to_component(ctx->dev, chbs->base, + CXL_RCRB_DOWNSTREAM); return 0; } @@ -277,21 +304,35 @@ static int add_host_bridge_dport(struct device *match, void *arg) dev_dbg(match, "UID found: %lld\n", uid); ctx = (struct cxl_chbs_context) { - .dev = host, + .dev = match, .uid = uid, }; acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx); - if (ctx.chbcr == 0) { + if (!ctx.chbcr) { + dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n", + uid); + return 0; + } + + if (ctx.rcrb != CXL_RESOURCE_NONE) + dev_dbg(match, "RCRB found for UID %lld: %pa\n", uid, &ctx.rcrb); + + if (ctx.chbcr == CXL_RESOURCE_NONE) { dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n", uid); return 0; } - dev_dbg(match, "CHBCR found: 0x%08llx\n", (u64)ctx.chbcr); + dev_dbg(match, "CHBCR found: %pa\n", &ctx.chbcr); pci_root = acpi_pci_find_root(hb->handle); bridge = pci_root->bus->bridge; - dport = devm_cxl_add_dport(root_port, bridge, uid, ctx.chbcr); + if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) + dport = devm_cxl_add_rch_dport(root_port, bridge, uid, + ctx.chbcr, ctx.rcrb); + else + dport = devm_cxl_add_dport(root_port, bridge, uid, + ctx.chbcr); if (IS_ERR(dport)) return PTR_ERR(dport); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index d225267c69bb..dae2ca31885e 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -628,6 +628,8 @@ static struct cxl_port *cxl_port_alloc(struct device *uport, iter = to_cxl_port(iter->dev.parent); if (iter->host_bridge) port->host_bridge = iter->host_bridge; + else if (parent_dport->rch) + port->host_bridge = parent_dport->dport; else port->host_bridge = iter->uport; dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge)); @@ -899,10 +901,10 @@ static void cxl_dport_unlink(void *data) sysfs_remove_link(&port->dev.kobj, link_name); } -static struct cxl_dport *__devm_cxl_add_dport(struct cxl_port *port, - struct device *dport_dev, - int port_id, - resource_size_t component_reg_phys) +static struct cxl_dport * +__devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, + int port_id, resource_size_t component_reg_phys, + resource_size_t rcrb) { char link_name[CXL_TARGET_STRLEN]; struct cxl_dport *dport; @@ -932,6 +934,9 @@ static struct cxl_dport *__devm_cxl_add_dport(struct cxl_port *port, dport->port_id = port_id; dport->component_reg_phys = component_reg_phys; dport->port = port; + if (rcrb != CXL_RESOURCE_NONE) + dport->rch = true; + dport->rcrb = rcrb; cond_cxl_root_lock(port); rc = add_dport(port, dport); @@ -956,7 +961,7 @@ static struct cxl_dport *__devm_cxl_add_dport(struct cxl_port *port, } /** - * devm_cxl_add_dport - append downstream port data to a cxl_port + * devm_cxl_add_dport - append VH downstream port data to a cxl_port * @port: the cxl_port that references this dport * @dport_dev: firmware or PCI device representing the dport * @port_id: identifier for this dport in a decoder's target list @@ -973,7 +978,7 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, struct cxl_dport *dport; dport = __devm_cxl_add_dport(port, dport_dev, port_id, - component_reg_phys); + component_reg_phys, CXL_RESOURCE_NONE); if (IS_ERR(dport)) { dev_dbg(dport_dev, "failed to add dport to %s: %ld\n", dev_name(&port->dev), PTR_ERR(dport)); @@ -986,6 +991,42 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, } EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); +/** + * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port + * @port: the cxl_port that references this dport + * @dport_dev: firmware or PCI device representing the dport + * @port_id: identifier for this dport in a decoder's target list + * @component_reg_phys: optional location of CXL component registers + * @rcrb: mandatory location of a Root Complex Register Block + * + * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH + */ +struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, + struct device *dport_dev, int port_id, + resource_size_t component_reg_phys, + resource_size_t rcrb) +{ + struct cxl_dport *dport; + + if (rcrb == CXL_RESOURCE_NONE) { + dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n"); + return ERR_PTR(-EINVAL); + } + + dport = __devm_cxl_add_dport(port, dport_dev, port_id, + component_reg_phys, rcrb); + if (IS_ERR(dport)) { + dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n", + dev_name(&port->dev), PTR_ERR(dport)); + } else { + dev_dbg(dport_dev, "RCH dport added to %s\n", + dev_name(&port->dev)); + } + + return dport; +} +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL); + static int add_ep(struct cxl_ep *new) { struct cxl_port *port = new->dport->port; diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index ec178e69b18f..1173912b9cf7 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -307,3 +307,68 @@ int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, return -ENODEV; } EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL); + +resource_size_t cxl_rcrb_to_component(struct device *dev, + resource_size_t rcrb, + enum cxl_rcrb which) +{ + resource_size_t component_reg_phys; + u32 bar0, bar1; + void *addr; + u16 cmd; + u32 id; + + if (which == CXL_RCRB_UPSTREAM) + rcrb += SZ_4K; + + /* + * RCRB's BAR[0..1] point to component block containing CXL + * subsystem component registers. MEMBAR extraction follows + * the PCI Base spec here, esp. 64 bit extraction and memory + * ranges alignment (6.0, 7.5.1.2.1). + */ + if (!request_mem_region(rcrb, SZ_4K, "CXL RCRB")) + return CXL_RESOURCE_NONE; + addr = ioremap(rcrb, SZ_4K); + if (!addr) { + dev_err(dev, "Failed to map region %pr\n", addr); + release_mem_region(rcrb, SZ_4K); + return CXL_RESOURCE_NONE; + } + + id = readl(addr + PCI_VENDOR_ID); + cmd = readw(addr + PCI_COMMAND); + bar0 = readl(addr + PCI_BASE_ADDRESS_0); + bar1 = readl(addr + PCI_BASE_ADDRESS_1); + iounmap(addr); + release_mem_region(rcrb, SZ_4K); + + /* + * Sanity check, see CXL 3.0 Figure 9-8 CXL Device that Does Not + * Remap Upstream Port and Component Registers + */ + if (id == U32_MAX) { + if (which == CXL_RCRB_DOWNSTREAM) + dev_err(dev, "Failed to access Downstream Port RCRB\n"); + return CXL_RESOURCE_NONE; + } + if (!(cmd & PCI_COMMAND_MEMORY)) + return CXL_RESOURCE_NONE; + /* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */ + if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO)) + return CXL_RESOURCE_NONE; + + component_reg_phys = bar0 & PCI_BASE_ADDRESS_MEM_MASK; + if (bar0 & PCI_BASE_ADDRESS_MEM_TYPE_64) + component_reg_phys |= ((u64)bar1) << 32; + + if (!component_reg_phys) + return CXL_RESOURCE_NONE; + + /* MEMBAR is block size (64k) aligned. */ + if (!IS_ALIGNED(component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE)) + return CXL_RESOURCE_NONE; + + return component_reg_phys; +} +EXPORT_SYMBOL_NS_GPL(cxl_rcrb_to_component, CXL); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 281b1db5a271..1342e4e61537 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -223,6 +223,14 @@ enum cxl_regloc_type; int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, struct cxl_register_map *map); +enum cxl_rcrb { + CXL_RCRB_DOWNSTREAM, + CXL_RCRB_UPSTREAM, +}; +resource_size_t cxl_rcrb_to_component(struct device *dev, + resource_size_t rcrb, + enum cxl_rcrb which); + #define CXL_RESOURCE_NONE ((resource_size_t) -1) #define CXL_TARGET_STRLEN 20 @@ -486,12 +494,16 @@ cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev) * @dport: PCI bridge or firmware device representing the downstream link * @port_id: unique hardware identifier for dport in decoder target list * @component_reg_phys: downstream port component registers + * @rcrb: base address for the Root Complex Register Block + * @rch: Indicate whether this dport was enumerated in RCH or VH mode * @port: reference to cxl_port that contains this downstream port */ struct cxl_dport { struct device *dport; int port_id; resource_size_t component_reg_phys; + resource_size_t rcrb; + bool rch; struct cxl_port *port; }; @@ -561,6 +573,10 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd); struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id, resource_size_t component_reg_phys); +struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, + struct device *dport_dev, int port_id, + resource_size_t component_reg_phys, + resource_size_t rcrb); struct cxl_decoder *to_cxl_decoder(struct device *dev); struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild index 500be85729cc..9e4d94e81723 100644 --- a/tools/testing/cxl/Kbuild +++ b/tools/testing/cxl/Kbuild @@ -10,6 +10,7 @@ ldflags-y += --wrap=devm_cxl_add_passthrough_decoder ldflags-y += --wrap=devm_cxl_enumerate_decoders ldflags-y += --wrap=cxl_await_media_ready ldflags-y += --wrap=cxl_hdm_decode_init +ldflags-y += --wrap=cxl_rcrb_to_component DRIVERS := ../../../drivers CXL_SRC := $(DRIVERS)/cxl diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 4f9dc2b3f655..7d87be53ac8e 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -696,6 +696,15 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port) return 0; } +resource_size_t mock_cxl_rcrb_to_component(struct device *dev, + resource_size_t rcrb, + enum cxl_rcrb which) +{ + dev_dbg(dev, "rcrb: %pa which: %d\n", &rcrb, which); + + return (resource_size_t) which + 1; +} + static struct cxl_mock_ops cxl_mock_ops = { .is_mock_adev = is_mock_adev, .is_mock_bridge = is_mock_bridge, @@ -704,6 +713,7 @@ static struct cxl_mock_ops cxl_mock_ops = { .is_mock_dev = is_mock_dev, .acpi_table_parse_cedt = mock_acpi_table_parse_cedt, .acpi_evaluate_integer = mock_acpi_evaluate_integer, + .cxl_rcrb_to_component = mock_cxl_rcrb_to_component, .acpi_pci_find_root = mock_acpi_pci_find_root, .devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports, .devm_cxl_setup_hdm = mock_cxl_setup_hdm, diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c index bce6a21df0d5..5dface08e0de 100644 --- a/tools/testing/cxl/test/mock.c +++ b/tools/testing/cxl/test/mock.c @@ -224,6 +224,25 @@ int __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds, } EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, CXL); +resource_size_t __wrap_cxl_rcrb_to_component(struct device *dev, + resource_size_t rcrb, + enum cxl_rcrb which) +{ + int index; + resource_size_t component_reg_phys; + struct cxl_mock_ops *ops = get_cxl_mock_ops(&index); + + if (ops && ops->is_mock_port(dev)) + component_reg_phys = + ops->cxl_rcrb_to_component(dev, rcrb, which); + else + component_reg_phys = cxl_rcrb_to_component(dev, rcrb, which); + put_cxl_mock_ops(index); + + return component_reg_phys; +} +EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcrb_to_component, CXL); + MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(ACPI); MODULE_IMPORT_NS(CXL); diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h index 738f24e3988a..ef33f159375e 100644 --- a/tools/testing/cxl/test/mock.h +++ b/tools/testing/cxl/test/mock.h @@ -15,6 +15,9 @@ struct cxl_mock_ops { acpi_string pathname, struct acpi_object_list *arguments, unsigned long long *data); + resource_size_t (*cxl_rcrb_to_component)(struct device *dev, + resource_size_t rcrb, + enum cxl_rcrb which); struct acpi_pci_root *(*acpi_pci_find_root)(acpi_handle handle); bool (*is_mock_bus)(struct pci_bus *bus); bool (*is_mock_port)(struct device *dev); -- cgit v1.2.3 From af2dfef854aa6afdf380e15e39d936d3b66097f1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 29 Nov 2022 10:48:12 -0700 Subject: cxl/pci: Cleanup repeated code in cxl_probe_regs() helpers Rather then duplicating the setting of valid, length, and offset for each type, just convey a pointer to the register map to common code. Yes, the change in cxl_probe_component_regs() does not save any lines of code, but it is preparation for adding another component register type to map (RAS Capability Structure). Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166974409293.1608150.17661353937678581423.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/regs.c | 46 ++++++++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 39a129c57d40..bd6ae14b679e 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -59,36 +59,41 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, for (cap = 1; cap <= cap_count; cap++) { void __iomem *register_block; - u32 hdr; - int decoder_cnt; + struct cxl_reg_map *rmap; u16 cap_id, offset; - u32 length; + u32 length, hdr; hdr = readl(base + cap * 0x4); cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr); offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr); register_block = base + offset; + hdr = readl(register_block); + rmap = NULL; switch (cap_id) { - case CXL_CM_CAP_CAP_ID_HDM: + case CXL_CM_CAP_CAP_ID_HDM: { + int decoder_cnt; + dev_dbg(dev, "found HDM decoder capability (0x%x)\n", offset); - hdr = readl(register_block); - decoder_cnt = cxl_hdm_decoder_count(hdr); length = 0x20 * decoder_cnt + 0x10; - - map->hdm_decoder.valid = true; - map->hdm_decoder.offset = CXL_CM_OFFSET + offset; - map->hdm_decoder.size = length; + rmap = &map->hdm_decoder; break; + } default: dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id, offset); break; } + + if (!rmap) + continue; + rmap->valid = true; + rmap->offset = CXL_CM_OFFSET + offset; + rmap->size = length; } } EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, CXL); @@ -117,6 +122,7 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base, cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array); for (cap = 1; cap <= cap_count; cap++) { + struct cxl_reg_map *rmap; u32 offset, length; u16 cap_id; @@ -125,28 +131,22 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base, offset = readl(base + cap * 0x10 + 0x4); length = readl(base + cap * 0x10 + 0x8); + rmap = NULL; switch (cap_id) { case CXLDEV_CAP_CAP_ID_DEVICE_STATUS: dev_dbg(dev, "found Status capability (0x%x)\n", offset); - - map->status.valid = true; - map->status.offset = offset; - map->status.size = length; + rmap = &map->status; break; case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX: dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset); - map->mbox.valid = true; - map->mbox.offset = offset; - map->mbox.size = length; + rmap = &map->mbox; break; case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX: dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset); break; case CXLDEV_CAP_CAP_ID_MEMDEV: dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset); - map->memdev.valid = true; - map->memdev.offset = offset; - map->memdev.size = length; + rmap = &map->memdev; break; default: if (cap_id >= 0x8000) @@ -155,6 +155,12 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base, dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset); break; } + + if (!rmap) + continue; + rmap->valid = true; + rmap->offset = offset; + rmap->size = length; } } EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL); -- cgit v1.2.3 From 1191ca102d32456d29cf64bd4547e216986ff6b6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 29 Nov 2022 10:48:18 -0700 Subject: cxl/pci: Cleanup cxl_map_device_regs() Use a loop to reduce the duplicated code in cxl_map_device_regs(). This is in preparation for deleting cxl_map_regs(). Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166974409867.1608150.14886452053935226038.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/regs.c | 51 +++++++++++++++++++------------------------------ 1 file changed, 20 insertions(+), 31 deletions(-) diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index bd6ae14b679e..f03ede86412d 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -211,42 +211,31 @@ int cxl_map_device_regs(struct pci_dev *pdev, struct cxl_device_regs *regs, struct cxl_register_map *map) { + resource_size_t phys_addr = + pci_resource_start(pdev, map->barno) + map->block_offset; struct device *dev = &pdev->dev; - resource_size_t phys_addr; - - phys_addr = pci_resource_start(pdev, map->barno); - phys_addr += map->block_offset; - - if (map->device_map.status.valid) { - resource_size_t addr; + struct mapinfo { + struct cxl_reg_map *rmap; + void __iomem **addr; + } mapinfo[] = { + { &map->device_map.status, ®s->status, }, + { &map->device_map.mbox, ®s->mbox, }, + { &map->device_map.memdev, ®s->memdev, }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(mapinfo); i++) { + struct mapinfo *mi = &mapinfo[i]; resource_size_t length; - - addr = phys_addr + map->device_map.status.offset; - length = map->device_map.status.size; - regs->status = devm_cxl_iomap_block(dev, addr, length); - if (!regs->status) - return -ENOMEM; - } - - if (map->device_map.mbox.valid) { resource_size_t addr; - resource_size_t length; - addr = phys_addr + map->device_map.mbox.offset; - length = map->device_map.mbox.size; - regs->mbox = devm_cxl_iomap_block(dev, addr, length); - if (!regs->mbox) - return -ENOMEM; - } - - if (map->device_map.memdev.valid) { - resource_size_t addr; - resource_size_t length; + if (!mi->rmap->valid) + continue; - addr = phys_addr + map->device_map.memdev.offset; - length = map->device_map.memdev.size; - regs->memdev = devm_cxl_iomap_block(dev, addr, length); - if (!regs->memdev) + addr = phys_addr + mi->rmap->offset; + length = mi->rmap->size; + *(mi->addr) = devm_cxl_iomap_block(dev, addr, length); + if (!*(mi->addr)) return -ENOMEM; } -- cgit v1.2.3 From 43a2fb3aef165ffe9d4315059a2e951253f4050b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 29 Nov 2022 10:48:24 -0700 Subject: cxl/pci: Kill cxl_map_regs() The component registers are currently unused by the cxl_pci driver. Only the physical address base of the component registers is conveyed to the cxl_mem driver. Just call cxl_map_device_registers() directly. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166974410443.1608150.15855499736133349600.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index faeb5d9d7a7a..82023cf0cdcf 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -347,27 +347,6 @@ static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map) return 0; } -static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map) -{ - struct device *dev = cxlds->dev; - struct pci_dev *pdev = to_pci_dev(dev); - - switch (map->reg_type) { - case CXL_REGLOC_RBI_COMPONENT: - cxl_map_component_regs(pdev, &cxlds->regs.component, map); - dev_dbg(dev, "Mapping component registers...\n"); - break; - case CXL_REGLOC_RBI_MEMDEV: - cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map); - dev_dbg(dev, "Probing device registers...\n"); - break; - default: - break; - } - - return 0; -} - static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, struct cxl_register_map *map) { @@ -461,7 +440,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - rc = cxl_map_regs(cxlds, &map); + rc = cxl_map_device_regs(pdev, &cxlds->regs.device_regs, &map); if (rc) return rc; -- cgit v1.2.3 From 6c7f4f1e51c2a2474e6d4024d2ed32f8965be4a4 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 29 Nov 2022 10:48:30 -0700 Subject: cxl/core/regs: Make cxl_map_{component, device}_regs() device generic There is no need to carry the barno and the block offset through the stack, just convert them to a resource base immediately. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166974411035.1608150.8605988708101648442.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 3 +-- drivers/cxl/core/port.c | 2 +- drivers/cxl/core/regs.c | 40 +++++++++++++++++++++++----------------- drivers/cxl/cxl.h | 14 ++++++-------- drivers/cxl/cxlpci.h | 9 --------- drivers/cxl/pci.c | 25 ++++++------------------- 6 files changed, 37 insertions(+), 56 deletions(-) diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 9240df53ed87..2a8a88d38533 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -54,8 +54,7 @@ static int match_add_dports(struct pci_dev *pdev, void *data) dev_dbg(&port->dev, "failed to find component registers\n"); port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); - dport = devm_cxl_add_dport(port, &pdev->dev, port_num, - cxl_regmap_to_base(pdev, &map)); + dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource); if (IS_ERR(dport)) { ctx->error = PTR_ERR(dport); return PTR_ERR(dport); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index e7556864ea80..3f37b061ff26 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1243,7 +1243,7 @@ static resource_size_t find_component_registers(struct device *dev) pdev = to_pci_dev(dev); cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map); - return cxl_regmap_to_base(pdev, &map); + return map.resource; } static int add_port_attach_ep(struct cxl_memdev *cxlmd, diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index f03ede86412d..e4b0d52ac3a1 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -186,17 +186,13 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, return ret_val; } -int cxl_map_component_regs(struct pci_dev *pdev, - struct cxl_component_regs *regs, +int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs, struct cxl_register_map *map) { - struct device *dev = &pdev->dev; resource_size_t phys_addr; resource_size_t length; - phys_addr = pci_resource_start(pdev, map->barno); - phys_addr += map->block_offset; - + phys_addr = map->resource; phys_addr += map->component_map.hdm_decoder.offset; length = map->component_map.hdm_decoder.size; regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length); @@ -207,13 +203,11 @@ int cxl_map_component_regs(struct pci_dev *pdev, } EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL); -int cxl_map_device_regs(struct pci_dev *pdev, +int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs, struct cxl_register_map *map) { - resource_size_t phys_addr = - pci_resource_start(pdev, map->barno) + map->block_offset; - struct device *dev = &pdev->dev; + resource_size_t phys_addr = map->resource; struct mapinfo { struct cxl_reg_map *rmap; void __iomem **addr; @@ -243,13 +237,24 @@ int cxl_map_device_regs(struct pci_dev *pdev, } EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL); -static void cxl_decode_regblock(u32 reg_lo, u32 reg_hi, +static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi, struct cxl_register_map *map) { - map->block_offset = ((u64)reg_hi << 32) | - (reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK); - map->barno = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo); + int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo); + u64 offset = ((u64)reg_hi << 32) | + (reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK); + + if (offset > pci_resource_len(pdev, bar)) { + dev_warn(&pdev->dev, + "BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar, + &pdev->resource[bar], &offset, map->reg_type); + return false; + } + map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo); + map->resource = pci_resource_start(pdev, bar) + offset; + map->max_size = pci_resource_len(pdev, bar) - offset; + return true; } /** @@ -269,7 +274,7 @@ int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, u32 regloc_size, regblocks; int regloc, i; - map->block_offset = U64_MAX; + map->resource = CXL_RESOURCE_NONE; regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_REG_LOCATOR); if (!regloc) @@ -287,13 +292,14 @@ int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, pci_read_config_dword(pdev, regloc, ®_lo); pci_read_config_dword(pdev, regloc + 4, ®_hi); - cxl_decode_regblock(reg_lo, reg_hi, map); + if (!cxl_decode_regblock(pdev, reg_lo, reg_hi, map)) + continue; if (map->reg_type == type) return 0; } - map->block_offset = U64_MAX; + map->resource = CXL_RESOURCE_NONE; return -ENODEV; } EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index ac75554b5d76..46299e72f2c9 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -187,17 +187,17 @@ struct cxl_device_reg_map { /** * struct cxl_register_map - DVSEC harvested register block mapping parameters * @base: virtual base of the register-block-BAR + @block_offset - * @block_offset: offset to start of register block in @barno + * @resource: physical resource base of the register block + * @max_size: maximum mapping size to perform register search * @reg_type: see enum cxl_regloc_type - * @barno: PCI BAR number containing the register block * @component_map: cxl_reg_map for component registers * @device_map: cxl_reg_maps for device registers */ struct cxl_register_map { void __iomem *base; - u64 block_offset; + resource_size_t resource; + resource_size_t max_size; u8 reg_type; - u8 barno; union { struct cxl_component_reg_map component_map; struct cxl_device_reg_map device_map; @@ -208,11 +208,9 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, struct cxl_component_reg_map *map); void cxl_probe_device_regs(struct device *dev, void __iomem *base, struct cxl_device_reg_map *map); -int cxl_map_component_regs(struct pci_dev *pdev, - struct cxl_component_regs *regs, +int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs, struct cxl_register_map *map); -int cxl_map_device_regs(struct pci_dev *pdev, - struct cxl_device_regs *regs, +int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs, struct cxl_register_map *map); enum cxl_regloc_type; diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h index eec597dbe763..920909791bb9 100644 --- a/drivers/cxl/cxlpci.h +++ b/drivers/cxl/cxlpci.h @@ -62,15 +62,6 @@ enum cxl_regloc_type { CXL_REGLOC_RBI_TYPES }; -static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev, - struct cxl_register_map *map) -{ - if (map->block_offset == U64_MAX) - return CXL_RESOURCE_NONE; - - return pci_resource_start(pdev, map->barno) + map->block_offset; -} - int devm_cxl_port_enumerate_dports(struct cxl_port *port); struct cxl_dev_state; int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm); diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 82023cf0cdcf..aba31c2291c4 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -276,35 +276,22 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds) static int cxl_map_regblock(struct pci_dev *pdev, struct cxl_register_map *map) { - void __iomem *addr; - int bar = map->barno; struct device *dev = &pdev->dev; - resource_size_t offset = map->block_offset; - /* Basic sanity check that BAR is big enough */ - if (pci_resource_len(pdev, bar) < offset) { - dev_err(dev, "BAR%d: %pr: too small (offset: %pa)\n", bar, - &pdev->resource[bar], &offset); - return -ENXIO; - } - - addr = pci_iomap(pdev, bar, 0); - if (!addr) { + map->base = ioremap(map->resource, map->max_size); + if (!map->base) { dev_err(dev, "failed to map registers\n"); return -ENOMEM; } - dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %pa\n", - bar, &offset); - - map->base = addr + map->block_offset; + dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource); return 0; } static void cxl_unmap_regblock(struct pci_dev *pdev, struct cxl_register_map *map) { - pci_iounmap(pdev, map->base - map->block_offset); + iounmap(map->base); map->base = NULL; } @@ -440,7 +427,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - rc = cxl_map_device_regs(pdev, &cxlds->regs.device_regs, &map); + rc = cxl_map_device_regs(&pdev->dev, &cxlds->regs.device_regs, &map); if (rc) return rc; @@ -453,7 +440,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) dev_warn(&pdev->dev, "No component registers (%d)\n", rc); - cxlds->component_reg_phys = cxl_regmap_to_base(pdev, &map); + cxlds->component_reg_phys = map.resource; devm_cxl_pci_create_doe(cxlds); -- cgit v1.2.3 From 920d8d2c60787bf63e023b120e81ca788d4191ff Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 29 Nov 2022 10:48:36 -0700 Subject: cxl/port: Limit the port driver to just the HDM Decoder Capability Update the port driver to use cxl_map_component_registers() so that the component register block can be shared between the cxl_pci driver and the cxl_port driver. I.e. stop the port driver from reserving the entire component register block for itself via request_region() when it only needs the HDM Decoder Capability subset. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166974411625.1608150.7149373371599960307.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/hdm.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index d1d2caea5c62..061551148cfe 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -82,18 +82,22 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) cxlhdm->interleave_mask |= GENMASK(14, 12); } -static void __iomem *map_hdm_decoder_regs(struct cxl_port *port, - void __iomem *crb) +static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, + struct cxl_component_regs *regs) { - struct cxl_component_reg_map map; + struct cxl_register_map map = { + .resource = port->component_reg_phys, + .base = crb, + .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, + }; - cxl_probe_component_regs(&port->dev, crb, &map); - if (!map.hdm_decoder.valid) { + cxl_probe_component_regs(&port->dev, crb, &map.component_map); + if (!map.component_map.hdm_decoder.valid) { dev_err(&port->dev, "HDM decoder registers invalid\n"); - return IOMEM_ERR_PTR(-ENXIO); + return -ENXIO; } - return crb + map.hdm_decoder.offset; + return cxl_map_component_regs(&port->dev, regs, &map); } /** @@ -103,25 +107,25 @@ static void __iomem *map_hdm_decoder_regs(struct cxl_port *port, struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port) { struct device *dev = &port->dev; - void __iomem *crb, *hdm; struct cxl_hdm *cxlhdm; + void __iomem *crb; + int rc; cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); if (!cxlhdm) return ERR_PTR(-ENOMEM); cxlhdm->port = port; - crb = devm_cxl_iomap_block(dev, port->component_reg_phys, - CXL_COMPONENT_REG_BLOCK_SIZE); + crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); if (!crb) { dev_err(dev, "No component registers mapped\n"); return ERR_PTR(-ENXIO); } - hdm = map_hdm_decoder_regs(port, crb); - if (IS_ERR(hdm)) - return ERR_CAST(hdm); - cxlhdm->regs.hdm_decoder = hdm; + rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs); + iounmap(crb); + if (rc) + return ERR_PTR(rc); parse_hdm_decoder_caps(cxlhdm); if (cxlhdm->decoder_count == 0) { -- cgit v1.2.3 From a1554e9cac5ea04aaf2fb2de0df9936a94cb96fc Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 29 Nov 2022 10:48:42 -0700 Subject: cxl/pci: Prepare for mapping RAS Capability Structure The RAS Capabilitiy Structure is a CXL Component register capability block. Unlike the HDM Decoder Capability, it will be referenced by the cxl_pci driver in response to PCIe AER events. Due to this it is no longer the case that cxl_map_component_regs() can assume that it should map all component registers. Plumb a bitmask of capability ids to map through cxl_map_component_regs(). For symmetry cxl_probe_device_regs() is updated to populate @id in 'struct cxl_reg_map' even though cxl_map_device_regs() does not have a need to map a subset of the device registers per caller. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166974412214.1608150.11487843455070795378.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/hdm.c | 3 ++- drivers/cxl/core/regs.c | 36 ++++++++++++++++++++++++++---------- drivers/cxl/cxl.h | 4 +++- 3 files changed, 31 insertions(+), 12 deletions(-) diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 061551148cfe..100d0881bde4 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -97,7 +97,8 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, return -ENXIO; } - return cxl_map_component_regs(&port->dev, regs, &map); + return cxl_map_component_regs(&port->dev, regs, &map, + BIT(CXL_CM_CAP_CAP_ID_HDM)); } /** diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index e4b0d52ac3a1..97e8f4201493 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -92,6 +92,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, if (!rmap) continue; rmap->valid = true; + rmap->id = cap_id; rmap->offset = CXL_CM_OFFSET + offset; rmap->size = length; } @@ -159,6 +160,7 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base, if (!rmap) continue; rmap->valid = true; + rmap->id = cap_id; rmap->offset = offset; rmap->size = length; } @@ -187,17 +189,31 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, } int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs, - struct cxl_register_map *map) + struct cxl_register_map *map, unsigned long map_mask) { - resource_size_t phys_addr; - resource_size_t length; - - phys_addr = map->resource; - phys_addr += map->component_map.hdm_decoder.offset; - length = map->component_map.hdm_decoder.size; - regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length); - if (!regs->hdm_decoder) - return -ENOMEM; + struct mapinfo { + struct cxl_reg_map *rmap; + void __iomem **addr; + } mapinfo[] = { + { &map->component_map.hdm_decoder, ®s->hdm_decoder }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(mapinfo); i++) { + struct mapinfo *mi = &mapinfo[i]; + resource_size_t phys_addr; + resource_size_t length; + + if (!mi->rmap->valid) + continue; + if (!test_bit(mi->rmap->id, &map_mask)) + continue; + phys_addr = map->resource + mi->rmap->offset; + length = mi->rmap->size; + *(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length); + if (!*(mi->addr)) + return -ENOMEM; + } return 0; } diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 46299e72f2c9..fc16c9a9e1ba 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -170,6 +170,7 @@ struct cxl_regs { struct cxl_reg_map { bool valid; + int id; unsigned long offset; unsigned long size; }; @@ -209,7 +210,8 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, void cxl_probe_device_regs(struct device *dev, void __iomem *base, struct cxl_device_reg_map *map); int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs, - struct cxl_register_map *map); + struct cxl_register_map *map, + unsigned long map_mask); int cxl_map_device_regs(struct device *dev, struct cxl_device_regs *regs, struct cxl_register_map *map); -- cgit v1.2.3 From bd09626b39dff97779e1543e25e60ab2876e7e88 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 29 Nov 2022 10:48:48 -0700 Subject: cxl/pci: Find and map the RAS Capability Structure The RAS Capability Structure has some ancillary information that may be relevant with respect to AER events, link and protcol error status registers. Map the RAS Capability Registers in support of defining a 'struct pci_error_handlers' instance for the cxl_pci driver. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166974412803.1608150.7096566580400947001.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/regs.c | 7 +++++++ drivers/cxl/cxl.h | 19 +++++++++++++++++++ drivers/cxl/pci.c | 8 ++++++++ 3 files changed, 34 insertions(+) diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 97e8f4201493..b10f8b79ec40 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -83,6 +83,12 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, rmap = &map->hdm_decoder; break; } + case CXL_CM_CAP_CAP_ID_RAS: + dev_dbg(dev, "found RAS capability (0x%x)\n", + offset); + length = CXL_RAS_CAPABILITY_LENGTH; + rmap = &map->ras; + break; default: dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id, offset); @@ -196,6 +202,7 @@ int cxl_map_component_regs(struct device *dev, struct cxl_component_regs *regs, void __iomem **addr; } mapinfo[] = { { &map->component_map.hdm_decoder, ®s->hdm_decoder }, + { &map->component_map.ras, ®s->ras }, }; int i; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index fc16c9a9e1ba..e533c5b5e4b0 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -33,6 +33,7 @@ #define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24) #define CXL_CM_CAP_PTR_MASK GENMASK(31, 20) +#define CXL_CM_CAP_CAP_ID_RAS 0x2 #define CXL_CM_CAP_CAP_ID_HDM 0x5 #define CXL_CM_CAP_CAP_HDM_VERSION 1 @@ -119,6 +120,21 @@ static inline int ways_to_cxl(unsigned int ways, u8 *iw) return 0; } +/* RAS Registers CXL 2.0 8.2.5.9 CXL RAS Capability Structure */ +#define CXL_RAS_UNCORRECTABLE_STATUS_OFFSET 0x0 +#define CXL_RAS_UNCORRECTABLE_STATUS_MASK (GENMASK(16, 14) | GENMASK(11, 0)) +#define CXL_RAS_UNCORRECTABLE_MASK_OFFSET 0x4 +#define CXL_RAS_UNCORRECTABLE_MASK_MASK (GENMASK(16, 14) | GENMASK(11, 0)) +#define CXL_RAS_UNCORRECTABLE_SEVERITY_OFFSET 0x8 +#define CXL_RAS_UNCORRECTABLE_SEVERITY_MASK (GENMASK(16, 14) | GENMASK(11, 0)) +#define CXL_RAS_CORRECTABLE_STATUS_OFFSET 0xC +#define CXL_RAS_CORRECTABLE_STATUS_MASK GENMASK(6, 0) +#define CXL_RAS_CORRECTABLE_MASK_OFFSET 0x10 +#define CXL_RAS_CORRECTABLE_MASK_MASK GENMASK(6, 0) +#define CXL_RAS_CAP_CONTROL_OFFSET 0x14 +#define CXL_RAS_HEADER_LOG_OFFSET 0x18 +#define CXL_RAS_CAPABILITY_LENGTH 0x58 + /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */ #define CXLDEV_CAP_ARRAY_OFFSET 0x0 #define CXLDEV_CAP_ARRAY_CAP_ID 0 @@ -153,9 +169,11 @@ struct cxl_regs { /* * Common set of CXL Component register block base pointers * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure + * @ras: CXL 2.0 8.2.5.9 CXL RAS Capability Structure */ struct_group_tagged(cxl_component_regs, component, void __iomem *hdm_decoder; + void __iomem *ras; ); /* * Common set of CXL Device register block base pointers @@ -177,6 +195,7 @@ struct cxl_reg_map { struct cxl_component_reg_map { struct cxl_reg_map hdm_decoder; + struct cxl_reg_map ras; }; struct cxl_device_reg_map { diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index aba31c2291c4..610b3a77f205 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -311,6 +311,9 @@ static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map) return -ENXIO; } + if (!comp_map->ras.valid) + dev_dbg(dev, "RAS registers not found\n"); + dev_dbg(dev, "Set up component registers\n"); break; case CXL_REGLOC_RBI_MEMDEV: @@ -444,6 +447,11 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) devm_cxl_pci_create_doe(cxlds); + rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component, + &map, BIT(CXL_CM_CAP_CAP_ID_RAS)); + if (rc) + dev_dbg(&pdev->dev, "Failed to map RAS capability.\n"); + rc = cxl_pci_setup_mailbox(cxlds); if (rc) return rc; -- cgit v1.2.3 From 2f6e9c305127f8dea4e2d697b4bdd33e126ccbf7 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 29 Nov 2022 10:48:53 -0700 Subject: cxl/pci: add tracepoint events for CXL RAS Add tracepoint events for recording the CXL uncorrectable and correctable errors. For uncorrectable errors, there is additional data of 512B from the header log register (CXL spec rev3 8.2.4.16.7). The trace event will intake a dynamic array that will dump the entire Header Log data. If multiple errors are set in the status register, then the 'first error' field (CXL spec rev3 v8.2.4.16.6) is read from the Error Capabilities and Control Register in order to determine the error. This implementation does not include CXL IDE Error details. Cc: Steven Rostedt Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Reviewed-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/166974413388.1608150.5875712482260436188.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 2 + include/trace/events/cxl.h | 112 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+) create mode 100644 include/trace/events/cxl.h diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 610b3a77f205..8b817138140a 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -13,6 +13,8 @@ #include "cxlmem.h" #include "cxlpci.h" #include "cxl.h" +#define CREATE_TRACE_POINTS +#include /** * DOC: cxl pci diff --git a/include/trace/events/cxl.h b/include/trace/events/cxl.h new file mode 100644 index 000000000000..72c3e2870a9e --- /dev/null +++ b/include/trace/events/cxl.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cxl + +#if !defined(_CXL_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _CXL_EVENTS_H + +#include + +#define CXL_HEADERLOG_SIZE SZ_512 +#define CXL_HEADERLOG_SIZE_U32 SZ_512 / sizeof(u32) + +#define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0) +#define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1) +#define CXL_RAS_UC_CACHE_BE_PARITY BIT(2) +#define CXL_RAS_UC_CACHE_DATA_ECC BIT(3) +#define CXL_RAS_UC_MEM_DATA_PARITY BIT(4) +#define CXL_RAS_UC_MEM_ADDR_PARITY BIT(5) +#define CXL_RAS_UC_MEM_BE_PARITY BIT(6) +#define CXL_RAS_UC_MEM_DATA_ECC BIT(7) +#define CXL_RAS_UC_REINIT_THRESH BIT(8) +#define CXL_RAS_UC_RSVD_ENCODE BIT(9) +#define CXL_RAS_UC_POISON BIT(10) +#define CXL_RAS_UC_RECV_OVERFLOW BIT(11) +#define CXL_RAS_UC_INTERNAL_ERR BIT(14) +#define CXL_RAS_UC_IDE_TX_ERR BIT(15) +#define CXL_RAS_UC_IDE_RX_ERR BIT(16) + +#define show_uc_errs(status) __print_flags(status, " | ", \ + { CXL_RAS_UC_CACHE_DATA_PARITY, "Cache Data Parity Error" }, \ + { CXL_RAS_UC_CACHE_ADDR_PARITY, "Cache Address Parity Error" }, \ + { CXL_RAS_UC_CACHE_BE_PARITY, "Cache Byte Enable Parity Error" }, \ + { CXL_RAS_UC_CACHE_DATA_ECC, "Cache Data ECC Error" }, \ + { CXL_RAS_UC_MEM_DATA_PARITY, "Memory Data Parity Error" }, \ + { CXL_RAS_UC_MEM_ADDR_PARITY, "Memory Address Parity Error" }, \ + { CXL_RAS_UC_MEM_BE_PARITY, "Memory Byte Enable Parity Error" }, \ + { CXL_RAS_UC_MEM_DATA_ECC, "Memory Data ECC Error" }, \ + { CXL_RAS_UC_REINIT_THRESH, "REINIT Threshold Hit" }, \ + { CXL_RAS_UC_RSVD_ENCODE, "Received Unrecognized Encoding" }, \ + { CXL_RAS_UC_POISON, "Received Poison From Peer" }, \ + { CXL_RAS_UC_RECV_OVERFLOW, "Receiver Overflow" }, \ + { CXL_RAS_UC_INTERNAL_ERR, "Component Specific Error" }, \ + { CXL_RAS_UC_IDE_TX_ERR, "IDE Tx Error" }, \ + { CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \ +) + +TRACE_EVENT(cxl_aer_uncorrectable_error, + TP_PROTO(const char *dev_name, u32 status, u32 fe, u32 *hl), + TP_ARGS(dev_name, status, fe, hl), + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(u32, status) + __field(u32, first_error) + __array(u32, header_log, CXL_HEADERLOG_SIZE_U32) + ), + TP_fast_assign( + __assign_str(dev_name, dev_name); + __entry->status = status; + __entry->first_error = fe; + /* + * Embed the 512B headerlog data for user app retrieval and + * parsing, but no need to print this in the trace buffer. + */ + memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE); + ), + TP_printk("%s: status: '%s' first_error: '%s'", + __get_str(dev_name), + show_uc_errs(__entry->status), + show_uc_errs(__entry->first_error) + ) +); + +#define CXL_RAS_CE_CACHE_DATA_ECC BIT(0) +#define CXL_RAS_CE_MEM_DATA_ECC BIT(1) +#define CXL_RAS_CE_CRC_THRESH BIT(2) +#define CLX_RAS_CE_RETRY_THRESH BIT(3) +#define CXL_RAS_CE_CACHE_POISON BIT(4) +#define CXL_RAS_CE_MEM_POISON BIT(5) +#define CXL_RAS_CE_PHYS_LAYER_ERR BIT(6) + +#define show_ce_errs(status) __print_flags(status, " | ", \ + { CXL_RAS_CE_CACHE_DATA_ECC, "Cache Data ECC Error" }, \ + { CXL_RAS_CE_MEM_DATA_ECC, "Memory Data ECC Error" }, \ + { CXL_RAS_CE_CRC_THRESH, "CRC Threshold Hit" }, \ + { CLX_RAS_CE_RETRY_THRESH, "Retry Threshold" }, \ + { CXL_RAS_CE_CACHE_POISON, "Received Cache Poison From Peer" }, \ + { CXL_RAS_CE_MEM_POISON, "Received Memory Poison From Peer" }, \ + { CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \ +) + +TRACE_EVENT(cxl_aer_correctable_error, + TP_PROTO(const char *dev_name, u32 status), + TP_ARGS(dev_name, status), + TP_STRUCT__entry( + __string(dev_name, dev_name) + __field(u32, status) + ), + TP_fast_assign( + __assign_str(dev_name, dev_name); + __entry->status = status; + ), + TP_printk("%s: status: '%s'", + __get_str(dev_name), show_ce_errs(__entry->status) + ) +); + +#endif /* _CXL_EVENTS_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cxl +#include -- cgit v1.2.3 From 2905cb5236cba63a5dc8a83752dcc31f3cc819f9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 29 Nov 2022 10:48:59 -0700 Subject: cxl/pci: Add (hopeful) error handling support Add nominal error handling that tears down CXL.mem in response to error notifications that imply a device reset. Given some CXL.mem may be operating as System RAM, there is a high likelihood that these error events are fatal. However, if the system survives the notification the expectation is that the driver behavior is equivalent to a hot-unplug and re-plug of an endpoint. Note that this does not change the mask values from the default. That awaits CXL _OSC support to determine whether platform firmware is in control of the mask registers. Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166974413966.1608150.15522782911404473932.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/memdev.c | 1 + drivers/cxl/cxl.h | 1 + drivers/cxl/cxlmem.h | 2 + drivers/cxl/pci.c | 137 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 141 insertions(+) diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 20ce488a7754..a74a93310d26 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -344,6 +344,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds) * needed as this is ordered with cdev_add() publishing the device. */ cxlmd->cxlds = cxlds; + cxlds->cxlmd = cxlmd; cdev = &cxlmd->cdev; rc = cdev_device_add(cdev, dev); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index e533c5b5e4b0..2f3951b76e2d 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -132,6 +132,7 @@ static inline int ways_to_cxl(unsigned int ways, u8 *iw) #define CXL_RAS_CORRECTABLE_MASK_OFFSET 0x10 #define CXL_RAS_CORRECTABLE_MASK_MASK GENMASK(6, 0) #define CXL_RAS_CAP_CONTROL_OFFSET 0x14 +#define CXL_RAS_CAP_CONTROL_FE_MASK GENMASK(5, 0) #define CXL_RAS_HEADER_LOG_OFFSET 0x18 #define CXL_RAS_CAPABILITY_LENGTH 0x58 diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 88e3a8e54b6a..b3117fd67f42 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -186,6 +186,7 @@ struct cxl_endpoint_dvsec_info { * Currently only memory devices are represented. * * @dev: The device associated with this CXL state + * @cxlmd: The device representing the CXL.mem capabilities of @dev * @regs: Parsed register blocks * @cxl_dvsec: Offset to the PCIe device DVSEC * @payload_size: Size of space for payload @@ -218,6 +219,7 @@ struct cxl_endpoint_dvsec_info { */ struct cxl_dev_state { struct device *dev; + struct cxl_memdev *cxlmd; struct cxl_regs regs; int cxl_dvsec; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 8b817138140a..a7c2d733e5ae 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "cxlmem.h" #include "cxlpci.h" @@ -399,6 +400,11 @@ static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds) } } +static void disable_aer(void *pdev) +{ + pci_disable_pcie_error_reporting(pdev); +} + static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct cxl_register_map map; @@ -420,6 +426,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) cxlds = cxl_dev_state_create(&pdev->dev); if (IS_ERR(cxlds)) return PTR_ERR(cxlds); + pci_set_drvdata(pdev, cxlds); cxlds->serial = pci_get_dsn(pdev); cxlds->cxl_dvsec = pci_find_dvsec_capability( @@ -474,6 +481,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); + if (cxlds->regs.ras) { + pci_enable_pcie_error_reporting(pdev); + rc = devm_add_action_or_reset(&pdev->dev, disable_aer, pdev); + if (rc) + return rc; + } + pci_save_state(pdev); + if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); @@ -487,10 +502,132 @@ static const struct pci_device_id cxl_mem_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); +/* CXL spec rev3.0 8.2.4.16.1 */ +static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log) +{ + void __iomem *addr; + u32 *log_addr; + int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32); + + addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET; + log_addr = log; + + for (i = 0; i < log_u32_size; i++) { + *log_addr = readl(addr); + log_addr++; + addr += sizeof(u32); + } +} + +/* + * Log the state of the RAS status registers and prepare them to log the + * next error status. Return 1 if reset needed. + */ +static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) +{ + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &cxlmd->dev; + u32 hl[CXL_HEADERLOG_SIZE_U32]; + void __iomem *addr; + u32 status; + u32 fe; + + if (!cxlds->regs.ras) + return false; + + addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET; + status = le32_to_cpu((__force __le32)readl(addr)); + if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK)) + return false; + + /* If multiple errors, log header points to first error from ctrl reg */ + if (hweight32(status) > 1) { + addr = cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET; + fe = BIT(le32_to_cpu((__force __le32)readl(addr)) & + CXL_RAS_CAP_CONTROL_FE_MASK); + } else { + fe = status; + } + + header_log_copy(cxlds, hl); + trace_cxl_aer_uncorrectable_error(dev_name(dev), status, fe, hl); + writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr); + + return true; +} + +static pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &cxlmd->dev; + bool ue; + + /* + * A frozen channel indicates an impending reset which is fatal to + * CXL.mem operation, and will likely crash the system. On the off + * chance the situation is recoverable dump the status of the RAS + * capability registers and bounce the active state of the memdev. + */ + ue = cxl_report_and_clear(cxlds); + + switch (state) { + case pci_channel_io_normal: + if (ue) { + device_release_driver(dev); + return PCI_ERS_RESULT_NEED_RESET; + } + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + dev_warn(&pdev->dev, + "%s: frozen state error detected, disable CXL.mem\n", + dev_name(dev)); + device_release_driver(dev); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + dev_warn(&pdev->dev, + "failure state error detected, request disconnect\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &cxlmd->dev; + + dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n", + dev_name(dev)); + pci_restore_state(pdev); + if (device_attach(dev) <= 0) + return PCI_ERS_RESULT_DISCONNECT; + return PCI_ERS_RESULT_RECOVERED; +} + +static void cxl_error_resume(struct pci_dev *pdev) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &cxlmd->dev; + + dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev), + dev->driver ? "successful" : "failed"); +} + +static const struct pci_error_handlers cxl_error_handlers = { + .error_detected = cxl_error_detected, + .slot_reset = cxl_slot_reset, + .resume = cxl_error_resume, +}; + static struct pci_driver cxl_pci_driver = { .name = KBUILD_MODNAME, .id_table = cxl_mem_pci_tbl, .probe = cxl_pci_probe, + .err_handler = &cxl_error_handlers, .driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, -- cgit v1.2.3 From 361187e04733eee19778ea9b01cb95a977c14c10 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 15:11:21 -0700 Subject: PCI/AER: Add optional logging callback for correctable error Some new devices such as CXL devices may want to record additional error information on a corrected error. Add a callback to allow the PCI device driver to do additional logging such as providing additional stats for user space RAS monitoring. For CXL device, this is actually a need due to CXL needing to write to the CXL RAS capability structure correctable error status register in order to clear the unmasked correctable errors. See CXL spec rev3.0 8.2.4.16. Suggested-by: Jonathan Cameron Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166984619233.2804404.3966368388544312674.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- Documentation/PCI/pci-error-recovery.rst | 7 +++++++ drivers/pci/pcie/aer.c | 8 +++++++- include/linux/pci.h | 3 +++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/Documentation/PCI/pci-error-recovery.rst b/Documentation/PCI/pci-error-recovery.rst index 187f43a03200..bdafeb4b66dc 100644 --- a/Documentation/PCI/pci-error-recovery.rst +++ b/Documentation/PCI/pci-error-recovery.rst @@ -83,6 +83,7 @@ This structure has the form:: int (*mmio_enabled)(struct pci_dev *dev); int (*slot_reset)(struct pci_dev *dev); void (*resume)(struct pci_dev *dev); + void (*cor_error_detected)(struct pci_dev *dev); }; The possible channel states are:: @@ -422,5 +423,11 @@ That is, the recovery API only requires that: - drivers/net/cxgb3 - drivers/net/s2io.c + The cor_error_detected() callback is invoked in handle_error_source() when + the error severity is "correctable". The callback is optional and allows + additional logging to be done if desired. See example: + + - drivers/cxl/pci.c + The End ------- diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index e2d8a74f83c3..625f7b2cafe4 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -961,8 +961,14 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) if (aer) pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS, info->status); - if (pcie_aer_is_native(dev)) + if (pcie_aer_is_native(dev)) { + struct pci_driver *pdrv = dev->driver; + + if (pdrv && pdrv->err_handler && + pdrv->err_handler->cor_error_detected) + pdrv->err_handler->cor_error_detected(dev); pcie_clear_device_status(dev); + } } else if (info->severity == AER_NONFATAL) pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset); else if (info->severity == AER_FATAL) diff --git a/include/linux/pci.h b/include/linux/pci.h index 2bda4a4e47e8..2119a16ecb10 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -843,6 +843,9 @@ struct pci_error_handlers { /* Device driver may resume normal operations */ void (*resume)(struct pci_dev *dev); + + /* Allow device driver to record more details of a correctable error */ + void (*cor_error_detected)(struct pci_dev *dev); }; -- cgit v1.2.3 From 6155ccc9ddf6642056f1c00c2851d1938d27a7f2 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 30 Nov 2022 17:02:25 -0700 Subject: cxl/pci: Add callback to log AER correctable error Add AER error handler callback to read the RAS capability structure correctable error (CE) status register for the CXL device. Log the error as a trace event and clear the error. For CXL devices, the driver also needs to write back to the status register to clear the unmasked correctable errors. See CXL spec rev3.0 8.2.4.16 for RAS capability structure CE Status Register. Suggested-by: Jonathan Cameron Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/166985287203.2871899.13605149073500556137.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index a7c2d733e5ae..36db681f3705 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -617,10 +617,30 @@ static void cxl_error_resume(struct pci_dev *pdev) dev->driver ? "successful" : "failed"); } +static void cxl_cor_error_detected(struct pci_dev *pdev) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &cxlmd->dev; + void __iomem *addr; + u32 status; + + if (!cxlds->regs.ras) + return; + + addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET; + status = le32_to_cpu(readl(addr)); + if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) { + writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr); + trace_cxl_aer_correctable_error(dev_name(dev), status); + } +} + static const struct pci_error_handlers cxl_error_handlers = { .error_detected = cxl_error_detected, .slot_reset = cxl_slot_reset, .resume = cxl_error_resume, + .cor_error_detected = cxl_cor_error_detected, }; static struct pci_driver cxl_pci_driver = { -- cgit v1.2.3 From f9db85bfec0dcc01556a41d23aec47b866ab3569 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Wed, 30 Nov 2022 14:47:25 -0800 Subject: cxl/acpi: Support CXL XOR Interleave Math (CXIMS) When the CFMWS is using XOR math, parse the corresponding CXIMS structure and store the xormaps in the root decoder structure. Use the xormaps in a new lookup, cxl_hb_xor(), to find a targets entry in the host bridge interleave target list. Defined in CXL Specfication 3.0 Section: 9.17.1 Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/5794813acdf7b67cfba3609c6aaff46932fa38d0.1669847017.git.alison.schofield@intel.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 137 ++++++++++++++++++++++++++++++++++++++++++++++-- drivers/cxl/core/port.c | 9 ++-- drivers/cxl/cxl.h | 11 +++- 3 files changed, 149 insertions(+), 8 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index fb649683dd3a..ace72e7277ba 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -6,9 +6,118 @@ #include #include #include +#include #include "cxlpci.h" #include "cxl.h" +struct cxl_cxims_data { + int nr_maps; + u64 xormaps[]; +}; + +/* + * Find a targets entry (n) in the host bridge interleave list. + * CXL Specfication 3.0 Table 9-22 + */ +static int cxl_xor_calc_n(u64 hpa, struct cxl_cxims_data *cximsd, int iw, + int ig) +{ + int i = 0, n = 0; + u8 eiw; + + /* IW: 2,4,6,8,12,16 begin building 'n' using xormaps */ + if (iw != 3) { + for (i = 0; i < cximsd->nr_maps; i++) + n |= (hweight64(hpa & cximsd->xormaps[i]) & 1) << i; + } + /* IW: 3,6,12 add a modulo calculation to 'n' */ + if (!is_power_of_2(iw)) { + if (ways_to_cxl(iw, &eiw)) + return -1; + hpa &= GENMASK_ULL(51, eiw + ig); + n |= do_div(hpa, 3) << i; + } + return n; +} + +static struct cxl_dport *cxl_hb_xor(struct cxl_root_decoder *cxlrd, int pos) +{ + struct cxl_cxims_data *cximsd = cxlrd->platform_data; + struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; + struct cxl_decoder *cxld = &cxlsd->cxld; + int ig = cxld->interleave_granularity; + int iw = cxld->interleave_ways; + int n = 0; + u64 hpa; + + if (dev_WARN_ONCE(&cxld->dev, + cxld->interleave_ways != cxlsd->nr_targets, + "misconfigured root decoder\n")) + return NULL; + + hpa = cxlrd->res->start + pos * ig; + + /* Entry (n) is 0 for no interleave (iw == 1) */ + if (iw != 1) + n = cxl_xor_calc_n(hpa, cximsd, iw, ig); + + if (n < 0) + return NULL; + + return cxlrd->cxlsd.target[n]; +} + +struct cxl_cxims_context { + struct device *dev; + struct cxl_root_decoder *cxlrd; +}; + +static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg, + const unsigned long end) +{ + struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header; + struct cxl_cxims_context *ctx = arg; + struct cxl_root_decoder *cxlrd = ctx->cxlrd; + struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; + struct device *dev = ctx->dev; + struct cxl_cxims_data *cximsd; + unsigned int hbig, nr_maps; + int rc; + + rc = cxl_to_granularity(cxims->hbig, &hbig); + if (rc) + return rc; + + /* Does this CXIMS entry apply to the given CXL Window? */ + if (hbig != cxld->interleave_granularity) + return 0; + + /* IW 1,3 do not use xormaps and skip this parsing entirely */ + if (is_power_of_2(cxld->interleave_ways)) + /* 2, 4, 8, 16 way */ + nr_maps = ilog2(cxld->interleave_ways); + else + /* 6, 12 way */ + nr_maps = ilog2(cxld->interleave_ways / 3); + + if (cxims->nr_xormaps < nr_maps) { + dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n", + cxims->nr_xormaps, nr_maps); + return -ENXIO; + } + + cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps), + GFP_KERNEL); + if (!cximsd) + return -ENOMEM; + memcpy(cximsd->xormaps, cxims->xormap_list, + nr_maps * sizeof(*cximsd->xormaps)); + cximsd->nr_maps = nr_maps; + cxlrd->platform_data = cximsd; + + return 0; +} + static unsigned long cfmws_to_decoder_flags(int restrictions) { unsigned long flags = CXL_DECODER_F_ENABLE; @@ -33,8 +142,10 @@ static int cxl_acpi_cfmws_verify(struct device *dev, int rc, expected_len; unsigned int ways; - if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) { - dev_err(dev, "CFMWS Unsupported Interleave Arithmetic\n"); + if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO && + cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { + dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n", + cfmws->interleave_arithmetic); return -EINVAL; } @@ -84,9 +195,11 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, struct cxl_cfmws_context *ctx = arg; struct cxl_port *root_port = ctx->root_port; struct resource *cxl_res = ctx->cxl_res; + struct cxl_cxims_context cxims_ctx; struct cxl_root_decoder *cxlrd; struct device *dev = ctx->dev; struct acpi_cedt_cfmws *cfmws; + cxl_calc_hb_fn cxl_calc_hb; struct cxl_decoder *cxld; unsigned int ways, i, ig; struct resource *res; @@ -128,7 +241,12 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, if (rc) goto err_insert; - cxlrd = cxl_root_decoder_alloc(root_port, ways); + if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) + cxl_calc_hb = cxl_hb_modulo; + else + cxl_calc_hb = cxl_hb_xor; + + cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb); if (IS_ERR(cxlrd)) return 0; @@ -148,7 +266,20 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, ig = CXL_DECODER_MIN_GRANULARITY; cxld->interleave_granularity = ig; + if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { + if (ways != 1 && ways != 3) { + cxims_ctx = (struct cxl_cxims_context) { + .dev = dev, + .cxlrd = cxlrd, + }; + rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS, + cxl_parse_cxims, &cxims_ctx); + if (rc < 0) + goto err_xormap; + } + } rc = cxl_decoder_add(cxld, target_map); +err_xormap: if (rc) put_device(&cxld->dev); else diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index e7556864ea80..42cdf224a85d 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1428,7 +1428,7 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, return rc; } -static struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos) +struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos) { struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; struct cxl_decoder *cxld = &cxlsd->cxld; @@ -1441,6 +1441,7 @@ static struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos) return cxlrd->cxlsd.target[pos % iw]; } +EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL); static struct lock_class_key cxl_decoder_key; @@ -1502,6 +1503,7 @@ static int cxl_switch_decoder_init(struct cxl_port *port, * cxl_root_decoder_alloc - Allocate a root level decoder * @port: owning CXL root of this decoder * @nr_targets: static number of downstream targets + * @calc_hb: which host bridge covers the n'th position by granularity * * Return: A new cxl decoder to be registered by cxl_decoder_add(). A * 'CXL root' decoder is one that decodes from a top-level / static platform @@ -1509,7 +1511,8 @@ static int cxl_switch_decoder_init(struct cxl_port *port, * topology. */ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, - unsigned int nr_targets) + unsigned int nr_targets, + cxl_calc_hb_fn calc_hb) { struct cxl_root_decoder *cxlrd; struct cxl_switch_decoder *cxlsd; @@ -1531,7 +1534,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, return ERR_PTR(rc); } - cxlrd->calc_hb = cxl_hb_modulo; + cxlrd->calc_hb = calc_hb; cxld = &cxlsd->cxld; cxld->dev.type = &cxl_decoder_root_type; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index ac75554b5d76..5f611b8a37cf 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -324,18 +324,23 @@ struct cxl_switch_decoder { struct cxl_dport *target[]; }; +struct cxl_root_decoder; +typedef struct cxl_dport *(*cxl_calc_hb_fn)(struct cxl_root_decoder *cxlrd, + int pos); /** * struct cxl_root_decoder - Static platform CXL address decoder * @res: host / parent resource for region allocations * @region_id: region id for next region provisioning event * @calc_hb: which host bridge covers the n'th position by granularity + * @platform_data: platform specific configuration data * @cxlsd: base cxl switch decoder */ struct cxl_root_decoder { struct resource *res; atomic_t region_id; - struct cxl_dport *(*calc_hb)(struct cxl_root_decoder *cxlrd, int pos); + cxl_calc_hb_fn calc_hb; + void *platform_data; struct cxl_switch_decoder cxlsd; }; @@ -581,7 +586,9 @@ struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); bool is_root_decoder(struct device *dev); bool is_endpoint_decoder(struct device *dev); struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, - unsigned int nr_targets); + unsigned int nr_targets, + cxl_calc_hb_fn calc_hb); +struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos); struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, unsigned int nr_targets); int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); -- cgit v1.2.3 From 7a7e6edfca857f490e3a963198c67620de26b7f0 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Wed, 30 Nov 2022 14:47:26 -0800 Subject: tools/testing/cxl: Add XOR Math support to cxl_test Expand the cxl_test topology to include CFMWS's that use XOR math for interleave arithmetic, as defined in the CXL Specification 3.0. With this expanded topology, cxl_test is useful for testing: x1,x2,x4 ways with XOR interleave arithmetic. Define the additional XOR CFMWS entries to appear only with the module parameter interleave_arithmetic=1. The cxl_test default continues to be modulo math. modprobe cxl_test interleave_arithmetic=1 Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/54670400cd48ba7fcc6d8ee0d6ae2276d3f51aad.1669847017.git.alison.schofield@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/cxl.c | 118 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 115 insertions(+), 3 deletions(-) diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 7edce12fd2ce..c7dba7550106 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -11,6 +11,8 @@ #include #include "mock.h" +static int interleave_arithmetic; + #define NR_CXL_HOST_BRIDGES 2 #define NR_CXL_SINGLE_HOST 1 #define NR_CXL_ROOT_PORTS 2 @@ -122,6 +124,22 @@ static struct { struct acpi_cedt_cfmws cfmws; u32 target[1]; } cfmws4; + struct { + struct acpi_cedt_cfmws cfmws; + u32 target[1]; + } cfmws5; + struct { + struct acpi_cedt_cfmws cfmws; + u32 target[2]; + } cfmws6; + struct { + struct acpi_cedt_cfmws cfmws; + u32 target[4]; + } cfmws7; + struct { + struct acpi_cedt_cxims cxims; + u64 xormap_list[2]; + } cxims0; } __packed mock_cedt = { .cedt = { .header = { @@ -229,14 +247,89 @@ static struct { }, .target = { 2 }, }, + /* .cfmws5,6,7 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */ + .cfmws5 = { + .cfmws = { + .header = { + .type = ACPI_CEDT_TYPE_CFMWS, + .length = sizeof(mock_cedt.cfmws5), + }, + .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR, + .interleave_ways = 0, + .granularity = 4, + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | + ACPI_CEDT_CFMWS_RESTRICT_PMEM, + .qtg_id = 0, + .window_size = SZ_256M * 8UL, + }, + .target = { 0, }, + }, + .cfmws6 = { + .cfmws = { + .header = { + .type = ACPI_CEDT_TYPE_CFMWS, + .length = sizeof(mock_cedt.cfmws6), + }, + .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR, + .interleave_ways = 1, + .granularity = 0, + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | + ACPI_CEDT_CFMWS_RESTRICT_PMEM, + .qtg_id = 1, + .window_size = SZ_256M * 8UL, + }, + .target = { 0, 1, }, + }, + .cfmws7 = { + .cfmws = { + .header = { + .type = ACPI_CEDT_TYPE_CFMWS, + .length = sizeof(mock_cedt.cfmws7), + }, + .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR, + .interleave_ways = 2, + .granularity = 0, + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | + ACPI_CEDT_CFMWS_RESTRICT_PMEM, + .qtg_id = 0, + .window_size = SZ_256M * 16UL, + }, + .target = { 0, 1, 0, 1, }, + }, + .cxims0 = { + .cxims = { + .header = { + .type = ACPI_CEDT_TYPE_CXIMS, + .length = sizeof(mock_cedt.cxims0), + }, + .hbig = 0, + .nr_xormaps = 2, + }, + .xormap_list = { 0x404100, 0x808200, }, + }, }; -struct acpi_cedt_cfmws *mock_cfmws[] = { +struct acpi_cedt_cfmws *mock_cfmws[8] = { [0] = &mock_cedt.cfmws0.cfmws, [1] = &mock_cedt.cfmws1.cfmws, [2] = &mock_cedt.cfmws2.cfmws, [3] = &mock_cedt.cfmws3.cfmws, [4] = &mock_cedt.cfmws4.cfmws, + /* Modulo Math above, XOR Math below */ + [5] = &mock_cedt.cfmws5.cfmws, + [6] = &mock_cedt.cfmws6.cfmws, + [7] = &mock_cedt.cfmws7.cfmws, +}; + +static int cfmws_start; +static int cfmws_end; +#define CFMWS_MOD_ARRAY_START 0 +#define CFMWS_MOD_ARRAY_END 4 +#define CFMWS_XOR_ARRAY_START 5 +#define CFMWS_XOR_ARRAY_END 7 + +struct acpi_cedt_cxims *mock_cxims[1] = { + [0] = &mock_cedt.cxims0.cxims, }; struct cxl_mock_res { @@ -308,7 +401,7 @@ static int populate_cedt(void) chbs->length = size; } - for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) { + for (i = cfmws_start; i <= cfmws_end; i++) { struct acpi_cedt_cfmws *window = mock_cfmws[i]; res = alloc_mock_res(window->window_size); @@ -351,12 +444,19 @@ static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id, } if (id == ACPI_CEDT_TYPE_CFMWS) - for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) { + for (i = cfmws_start; i <= cfmws_end; i++) { h = (union acpi_subtable_headers *) mock_cfmws[i]; end = (unsigned long) h + mock_cfmws[i]->header.length; handler_arg(h, arg, end); } + if (id == ACPI_CEDT_TYPE_CXIMS) + for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) { + h = (union acpi_subtable_headers *)mock_cxims[i]; + end = (unsigned long)h + mock_cxims[i]->header.length; + handler_arg(h, arg, end); + } + return 0; } @@ -897,6 +997,16 @@ static __init int cxl_test_init(void) if (rc) goto err_gen_pool_add; + if (interleave_arithmetic == 1) { + cfmws_start = CFMWS_XOR_ARRAY_START; + cfmws_end = CFMWS_XOR_ARRAY_END; + dev_dbg(NULL, "cxl_test loading xor math option\n"); + } else { + cfmws_start = CFMWS_MOD_ARRAY_START; + cfmws_end = CFMWS_MOD_ARRAY_END; + dev_dbg(NULL, "cxl_test loading modulo math option\n"); + } + rc = populate_cedt(); if (rc) goto err_populate; @@ -1073,6 +1183,8 @@ static __exit void cxl_test_exit(void) unregister_cxl_mock_ops(&cxl_mock_ops); } +module_param(interleave_arithmetic, int, 0000); +MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1"); module_init(cxl_test_init); module_exit(cxl_test_exit); MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 7592d935b7ae71e2b4ff93830743c39a9d13d113 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 13:34:10 -0800 Subject: cxl/mem: Move devm_cxl_add_endpoint() from cxl_core to cxl_mem tl;dr: Clean up an unnecessary export and enable cxl_test. An RCD (Restricted CXL Device), in contrast to a typical CXL device in a VH topology, obtains its component registers from the bottom half of the associated CXL host bridge RCRB (Root Complex Register Block). In turn this means that cxl_rcrb_to_component() needs to be called from devm_cxl_add_endpoint(). Presently devm_cxl_add_endpoint() is part of the CXL core, but the only user is the CXL mem module. Move it from cxl_core to cxl_mem to not only get rid of an unnecessary export, but to also enable its call out to cxl_rcrb_to_component(), in a subsequent patch, to be mocked by cxl_test. Recall that cxl_test can only mock exported symbols, and since cxl_rcrb_to_component() is itself inside the core, all callers must be outside of cxl_core to allow cxl_test to mock it. Reviewed-by: Robert Richter Link: https://lore.kernel.org/r/166993045072.1882361.13944923741276843683.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/core.h | 8 -------- drivers/cxl/core/port.c | 39 --------------------------------------- drivers/cxl/cxl.h | 2 -- drivers/cxl/cxlmem.h | 9 +++++++++ drivers/cxl/mem.c | 38 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 47 insertions(+), 49 deletions(-) diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 1d8f87be283f..8c04672dca56 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -58,14 +58,6 @@ extern struct rw_semaphore cxl_dpa_rwsem; bool is_switch_decoder(struct device *dev); struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); -static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, - struct cxl_memdev *cxlmd) -{ - if (!port) - return NULL; - - return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev); -} int cxl_memdev_init(void); void cxl_memdev_exit(void); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index dae2ca31885e..4982b6902ef5 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1212,45 +1212,6 @@ static void reap_dports(struct cxl_port *port) } } -int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd, - struct cxl_dport *parent_dport) -{ - struct cxl_port *parent_port = parent_dport->port; - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_port *endpoint, *iter, *down; - int rc; - - /* - * Now that the path to the root is established record all the - * intervening ports in the chain. - */ - for (iter = parent_port, down = NULL; !is_cxl_root(iter); - down = iter, iter = to_cxl_port(iter->dev.parent)) { - struct cxl_ep *ep; - - ep = cxl_ep_load(iter, cxlmd); - ep->next = down; - } - - endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev, - cxlds->component_reg_phys, parent_dport); - if (IS_ERR(endpoint)) - return PTR_ERR(endpoint); - - rc = cxl_endpoint_autoremove(cxlmd, endpoint); - if (rc) - return rc; - - if (!endpoint->dev.driver) { - dev_err(&cxlmd->dev, "%s failed probe\n", - dev_name(&endpoint->dev)); - return -ENXIO; - } - - return 0; -} -EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL); - static void cxl_detach_ep(void *data) { struct cxl_memdev *cxlmd = data; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 1342e4e61537..9a212ab3cae4 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -560,8 +560,6 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port); struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, resource_size_t component_reg_phys, struct cxl_dport *parent_dport); -int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd, - struct cxl_dport *parent_dport); struct cxl_port *find_cxl_root(struct device *dev); int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); void cxl_bus_rescan(void); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index c1c9960ab05f..e082991bc58c 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -80,6 +80,15 @@ static inline bool is_cxl_endpoint(struct cxl_port *port) struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds); +static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, + struct cxl_memdev *cxlmd) +{ + if (!port) + return NULL; + + return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev); +} + /** * struct cxl_mbox_cmd - A command to be submitted to hardware. * @opcode: (input) The command set and command submitted to hardware. diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 549b6b499bae..aa63ce8c7ca6 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -45,6 +45,44 @@ static int cxl_mem_dpa_show(struct seq_file *file, void *data) return 0; } +static int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd, + struct cxl_dport *parent_dport) +{ + struct cxl_port *parent_port = parent_dport->port; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_port *endpoint, *iter, *down; + int rc; + + /* + * Now that the path to the root is established record all the + * intervening ports in the chain. + */ + for (iter = parent_port, down = NULL; !is_cxl_root(iter); + down = iter, iter = to_cxl_port(iter->dev.parent)) { + struct cxl_ep *ep; + + ep = cxl_ep_load(iter, cxlmd); + ep->next = down; + } + + endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev, + cxlds->component_reg_phys, parent_dport); + if (IS_ERR(endpoint)) + return PTR_ERR(endpoint); + + rc = cxl_endpoint_autoremove(cxlmd, endpoint); + if (rc) + return rc; + + if (!endpoint->dev.driver) { + dev_err(&cxlmd->dev, "%s failed probe\n", + dev_name(&endpoint->dev)); + return -ENXIO; + } + + return 0; +} + static int cxl_mem_probe(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); -- cgit v1.2.3 From 0a19bfc8de93d5b5d12cf0a7bb74efc88b9ad077 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 13:34:16 -0800 Subject: cxl/port: Add RCD endpoint port enumeration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unlike a CXL memory expander in a VH topology that has at least one intervening 'struct cxl_port' instance between itself and the CXL root device, an RCD attaches one-level higher. For example: VH ┌──────────┐ │ ACPI0017 │ │ root0 │ └─────┬────┘ │ ┌─────┴────┐ │ dport0 │ ┌─────┤ ACPI0016 ├─────┐ │ │ port1 │ │ │ └────┬─────┘ │ │ │ │ ┌──┴───┐ ┌──┴───┐ ┌───┴──┐ │dport0│ │dport1│ │dport2│ │ RP0 │ │ RP1 │ │ RP2 │ └──────┘ └──┬───┘ └──────┘ │ ┌───┴─────┐ │endpoint0│ │ port2 │ └─────────┘ ...vs: RCH ┌──────────┐ │ ACPI0017 │ │ root0 │ └────┬─────┘ │ ┌───┴────┐ │ dport0 │ │ACPI0016│ └───┬────┘ │ ┌────┴─────┐ │endpoint0 │ │ port1 │ └──────────┘ So arrange for endpoint port in the RCH/RCD case to appear directly connected to the host-bridge in its singular role as a dport. Compare that to the VH case where the host-bridge serves a dual role as a 'cxl_dport' for the CXL root device *and* a 'cxl_port' upstream port for the Root Ports in the Root Complex that are modeled as 'cxl_dport' instances in the CXL topology. Another deviation from the VH case is that RCDs may need to look up their component registers from the Root Complex Register Block (RCRB). That platform firmware specified RCRB area is cached by the cxl_acpi driver and conveyed via the host-bridge dport to the cxl_mem driver to perform the cxl_rcrb_to_component() lookup for the endpoint port (See 9.11.8 CXL Devices Attached to an RCH for the lookup of the upstream port component registers). Tested-by: Robert Richter Link: https://lore.kernel.org/r/166993045621.1882361.1730100141527044744.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Robert Richter Reviewed-by: Jonathan Camerom Signed-off-by: Dan Williams --- drivers/cxl/core/port.c | 7 +++++++ drivers/cxl/cxlmem.h | 2 ++ drivers/cxl/mem.c | 33 +++++++++++++++++++++++++-------- drivers/cxl/pci.c | 10 ++++++++++ 4 files changed, 44 insertions(+), 8 deletions(-) diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 4982b6902ef5..50bdbd9f8da3 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1369,6 +1369,13 @@ int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) struct device *iter; int rc; + /* + * Skip intermediate port enumeration in the RCH case, there + * are no ports in between a host bridge and an endpoint. + */ + if (cxlmd->cxlds->rcd) + return 0; + rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd); if (rc) return rc; diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index e082991bc58c..35d485d041f0 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -201,6 +201,7 @@ struct cxl_endpoint_dvsec_info { * @dev: The device associated with this CXL state * @regs: Parsed register blocks * @cxl_dvsec: Offset to the PCIe device DVSEC + * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH) * @payload_size: Size of space for payload * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register) * @lsa_size: Size of Label Storage Area @@ -235,6 +236,7 @@ struct cxl_dev_state { struct cxl_regs regs; int cxl_dvsec; + bool rcd; size_t payload_size; size_t lsa_size; struct mutex mbox_mutex; /* Protects device mailbox and firmware */ diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index aa63ce8c7ca6..39c4b54f0715 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -45,12 +45,13 @@ static int cxl_mem_dpa_show(struct seq_file *file, void *data) return 0; } -static int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd, +static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd, struct cxl_dport *parent_dport) { struct cxl_port *parent_port = parent_dport->port; struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_port *endpoint, *iter, *down; + resource_size_t component_reg_phys; int rc; /* @@ -65,8 +66,18 @@ static int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd, ep->next = down; } - endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev, - cxlds->component_reg_phys, parent_dport); + /* + * The component registers for an RCD might come from the + * host-bridge RCRB if they are not already mapped via the + * typical register locator mechanism. + */ + if (parent_dport->rch && cxlds->component_reg_phys == CXL_RESOURCE_NONE) + component_reg_phys = cxl_rcrb_to_component( + &cxlmd->dev, parent_dport->rcrb, CXL_RCRB_UPSTREAM); + else + component_reg_phys = cxlds->component_reg_phys; + endpoint = devm_cxl_add_port(host, &cxlmd->dev, component_reg_phys, + parent_dport); if (IS_ERR(endpoint)) return PTR_ERR(endpoint); @@ -87,6 +98,7 @@ static int cxl_mem_probe(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct device *endpoint_parent; struct cxl_port *parent_port; struct cxl_dport *dport; struct dentry *dentry; @@ -119,17 +131,22 @@ static int cxl_mem_probe(struct device *dev) return -ENXIO; } - device_lock(&parent_port->dev); - if (!parent_port->dev.driver) { + if (dport->rch) + endpoint_parent = parent_port->uport; + else + endpoint_parent = &parent_port->dev; + + device_lock(endpoint_parent); + if (!endpoint_parent->driver) { dev_err(dev, "CXL port topology %s not enabled\n", - dev_name(&parent_port->dev)); + dev_name(endpoint_parent)); rc = -ENXIO; goto unlock; } - rc = devm_cxl_add_endpoint(cxlmd, dport); + rc = devm_cxl_add_endpoint(endpoint_parent, cxlmd, dport); unlock: - device_unlock(&parent_port->dev); + device_unlock(endpoint_parent); put_device(&parent_port->dev); if (rc) return rc; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index e15da405b948..73ff6c33a0c0 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -433,6 +433,15 @@ static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds) } } +/* + * Assume that any RCIEP that emits the CXL memory expander class code + * is an RCD + */ +static bool is_cxl_restricted(struct pci_dev *pdev) +{ + return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END; +} + static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct cxl_register_map map; @@ -455,6 +464,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_ERR(cxlds)) return PTR_ERR(cxlds); + cxlds->rcd = is_cxl_restricted(pdev); cxlds->serial = pci_get_dsn(pdev); cxlds->cxl_dvsec = pci_find_dvsec_capability( pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); -- cgit v1.2.3 From c9435dbee119f42132af2c3fc0382d16bda32601 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 1 Dec 2022 13:34:21 -0800 Subject: tools/testing/cxl: Add an RCH topology In an RCH topology a CXL host-bridge as Root Complex Integrated Endpoint the represents the memory expander. Unlike a VH topology there is no CXL/PCIE Root Port that host the endpoint. The CXL subsystem maps this as the CXL root object (ACPI0017 on ACPI based systems) targeting the host-bridge as a dport, per usual, but then that dport directly hosts the endpoint port. Mock up that configuration with a 4th host-bridge that has a 'cxl_rcd' device instance as its immediate child. Reviewed-by: Alison Schofield Link: https://lore.kernel.org/r/166993046170.1882361.12460762475782283638.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Robert Richter Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- tools/testing/cxl/test/cxl.c | 150 ++++++++++++++++++++++++++++++++++++++++--- tools/testing/cxl/test/mem.c | 37 ++++++++++- 2 files changed, 176 insertions(+), 11 deletions(-) diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c index 7d87be53ac8e..ffe775178507 100644 --- a/tools/testing/cxl/test/cxl.c +++ b/tools/testing/cxl/test/cxl.c @@ -13,9 +13,11 @@ #define NR_CXL_HOST_BRIDGES 2 #define NR_CXL_SINGLE_HOST 1 +#define NR_CXL_RCH 1 #define NR_CXL_ROOT_PORTS 2 #define NR_CXL_SWITCH_PORTS 2 #define NR_CXL_PORT_DECODERS 8 +#define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH) static struct platform_device *cxl_acpi; static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES]; @@ -35,6 +37,8 @@ static struct platform_device *cxl_swd_single[NR_MEM_SINGLE]; struct platform_device *cxl_mem[NR_MEM_MULTI]; struct platform_device *cxl_mem_single[NR_MEM_SINGLE]; +static struct platform_device *cxl_rch[NR_CXL_RCH]; +static struct platform_device *cxl_rcd[NR_CXL_RCH]; static inline bool is_multi_bridge(struct device *dev) { @@ -57,7 +61,7 @@ static inline bool is_single_bridge(struct device *dev) } static struct acpi_device acpi0017_mock; -static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST] = { +static struct acpi_device host_bridge[NR_BRIDGES] = { [0] = { .handle = &host_bridge[0], }, @@ -67,7 +71,9 @@ static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST] [2] = { .handle = &host_bridge[2], }, - + [3] = { + .handle = &host_bridge[3], + }, }; static bool is_mock_dev(struct device *dev) @@ -80,6 +86,9 @@ static bool is_mock_dev(struct device *dev) for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) if (dev == &cxl_mem_single[i]->dev) return true; + for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) + if (dev == &cxl_rcd[i]->dev) + return true; if (dev == &cxl_acpi->dev) return true; return false; @@ -101,7 +110,7 @@ static bool is_mock_adev(struct acpi_device *adev) static struct { struct acpi_table_cedt cedt; - struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST]; + struct acpi_cedt_chbs chbs[NR_BRIDGES]; struct { struct acpi_cedt_cfmws cfmws; u32 target[1]; @@ -122,6 +131,10 @@ static struct { struct acpi_cedt_cfmws cfmws; u32 target[1]; } cfmws4; + struct { + struct acpi_cedt_cfmws cfmws; + u32 target[1]; + } cfmws5; } __packed mock_cedt = { .cedt = { .header = { @@ -154,6 +167,14 @@ static struct { .uid = 2, .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, }, + .chbs[3] = { + .header = { + .type = ACPI_CEDT_TYPE_CHBS, + .length = sizeof(mock_cedt.chbs[0]), + }, + .uid = 3, + .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11, + }, .cfmws0 = { .cfmws = { .header = { @@ -229,6 +250,21 @@ static struct { }, .target = { 2 }, }, + .cfmws5 = { + .cfmws = { + .header = { + .type = ACPI_CEDT_TYPE_CFMWS, + .length = sizeof(mock_cedt.cfmws5), + }, + .interleave_ways = 0, + .granularity = 4, + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | + ACPI_CEDT_CFMWS_RESTRICT_VOLATILE, + .qtg_id = 5, + .window_size = SZ_256M, + }, + .target = { 3 }, + }, }; struct acpi_cedt_cfmws *mock_cfmws[] = { @@ -237,6 +273,7 @@ struct acpi_cedt_cfmws *mock_cfmws[] = { [2] = &mock_cedt.cfmws2.cfmws, [3] = &mock_cedt.cfmws3.cfmws, [4] = &mock_cedt.cfmws4.cfmws, + [5] = &mock_cedt.cfmws5.cfmws, }; struct cxl_mock_res { @@ -262,11 +299,11 @@ static void depopulate_all_mock_resources(void) mutex_unlock(&mock_res_lock); } -static struct cxl_mock_res *alloc_mock_res(resource_size_t size) +static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align) { struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL); struct genpool_data_align data = { - .align = SZ_256M, + .align = align, }; unsigned long phys; @@ -301,7 +338,7 @@ static int populate_cedt(void) else size = ACPI_CEDT_CHBS_LENGTH_CXL11; - res = alloc_mock_res(size); + res = alloc_mock_res(size, size); if (!res) return -ENOMEM; chbs->base = res->range.start; @@ -311,7 +348,7 @@ static int populate_cedt(void) for (i = 0; i < ARRAY_SIZE(mock_cfmws); i++) { struct acpi_cedt_cfmws *window = mock_cfmws[i]; - res = alloc_mock_res(window->window_size); + res = alloc_mock_res(window->window_size, SZ_256M); if (!res) return -ENOMEM; window->base_hpa = res->range.start; @@ -372,6 +409,10 @@ static bool is_mock_bridge(struct device *dev) for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) if (dev == &cxl_hb_single[i]->dev) return true; + for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) + if (dev == &cxl_rch[i]->dev) + return true; + return false; } @@ -441,7 +482,7 @@ mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname, return AE_OK; } -static struct pci_bus mock_pci_bus[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST]; +static struct pci_bus mock_pci_bus[NR_BRIDGES]; static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = { [0] = { .bus = &mock_pci_bus[0], @@ -452,6 +493,9 @@ static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = { [2] = { .bus = &mock_pci_bus[2], }, + [3] = { + .bus = &mock_pci_bus[3], + }, }; @@ -738,6 +782,87 @@ static void mock_companion(struct acpi_device *adev, struct device *dev) #define SZ_512G (SZ_64G * 8) #endif +static __init int cxl_rch_init(void) +{ + int rc, i; + + for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) { + int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i; + struct acpi_device *adev = &host_bridge[idx]; + struct platform_device *pdev; + + pdev = platform_device_alloc("cxl_host_bridge", idx); + if (!pdev) + goto err_bridge; + + mock_companion(adev, &pdev->dev); + rc = platform_device_add(pdev); + if (rc) { + platform_device_put(pdev); + goto err_bridge; + } + + cxl_rch[i] = pdev; + mock_pci_bus[idx].bridge = &pdev->dev; + rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, + "firmware_node"); + if (rc) + goto err_bridge; + } + + for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) { + int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i; + struct platform_device *rch = cxl_rch[i]; + struct platform_device *pdev; + + pdev = platform_device_alloc("cxl_rcd", idx); + if (!pdev) + goto err_mem; + pdev->dev.parent = &rch->dev; + set_dev_node(&pdev->dev, i % 2); + + rc = platform_device_add(pdev); + if (rc) { + platform_device_put(pdev); + goto err_mem; + } + cxl_rcd[i] = pdev; + } + + return 0; + +err_mem: + for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) + platform_device_unregister(cxl_rcd[i]); +err_bridge: + for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) { + struct platform_device *pdev = cxl_rch[i]; + + if (!pdev) + continue; + sysfs_remove_link(&pdev->dev.kobj, "firmware_node"); + platform_device_unregister(cxl_rch[i]); + } + + return rc; +} + +static void cxl_rch_exit(void) +{ + int i; + + for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--) + platform_device_unregister(cxl_rcd[i]); + for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) { + struct platform_device *pdev = cxl_rch[i]; + + if (!pdev) + continue; + sysfs_remove_link(&pdev->dev.kobj, "firmware_node"); + platform_device_unregister(cxl_rch[i]); + } +} + static __init int cxl_single_init(void) { int i, rc; @@ -1010,9 +1135,13 @@ static __init int cxl_test_init(void) if (rc) goto err_mem; + rc = cxl_rch_init(); + if (rc) + goto err_single; + cxl_acpi = platform_device_alloc("cxl_acpi", 0); if (!cxl_acpi) - goto err_single; + goto err_rch; mock_companion(&acpi0017_mock, &cxl_acpi->dev); acpi0017_mock.dev.bus = &platform_bus_type; @@ -1025,6 +1154,8 @@ static __init int cxl_test_init(void) err_add: platform_device_put(cxl_acpi); +err_rch: + cxl_rch_exit(); err_single: cxl_single_exit(); err_mem: @@ -1062,6 +1193,7 @@ static __exit void cxl_test_exit(void) int i; platform_device_unregister(cxl_acpi); + cxl_rch_exit(); cxl_single_exit(); for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) platform_device_unregister(cxl_mem[i]); diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index a4ee8e61dd60..b59c5976b2d9 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -100,6 +100,24 @@ static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) return 0; } +static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) +{ + struct cxl_mbox_identify id = { + .fw_revision = { "mock fw v1 " }, + .total_capacity = + cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), + .volatile_capacity = + cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), + }; + + if (cmd->size_out < sizeof(id)) + return -EINVAL; + + memcpy(cmd->payload_out, &id, sizeof(id)); + + return 0; +} + static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_identify id = { @@ -216,7 +234,10 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * rc = mock_get_log(cxlds, cmd); break; case CXL_MBOX_OP_IDENTIFY: - rc = mock_id(cxlds, cmd); + if (cxlds->rcd) + rc = mock_rcd_id(cxlds, cmd); + else + rc = mock_id(cxlds, cmd); break; case CXL_MBOX_OP_GET_LSA: rc = mock_get_lsa(cxlds, cmd); @@ -245,6 +266,13 @@ static void label_area_release(void *lsa) vfree(lsa); } +static bool is_rcd(struct platform_device *pdev) +{ + const struct platform_device_id *id = platform_get_device_id(pdev); + + return !!id->driver_data; +} + static int cxl_mock_mem_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -268,6 +296,10 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) cxlds->serial = pdev->id; cxlds->mbox_send = cxl_mock_mbox_send; cxlds->payload_size = SZ_4K; + if (is_rcd(pdev)) { + cxlds->rcd = true; + cxlds->component_reg_phys = CXL_RESOURCE_NONE; + } rc = cxl_enumerate_cmds(cxlds); if (rc) @@ -289,7 +321,8 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) } static const struct platform_device_id cxl_mock_mem_ids[] = { - { .name = "cxl_mem", }, + { .name = "cxl_mem", 0 }, + { .name = "cxl_rcd", 1 }, { }, }; MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids); -- cgit v1.2.3 From da8380bbbe90f69bec4be69af4d0deb7cf2bbcfe Mon Sep 17 00:00:00 2001 From: Terry Bowman Date: Thu, 1 Dec 2022 13:34:27 -0800 Subject: cxl/acpi: Set ACPI's CXL _OSC to indicate RCD mode support ACPI uses the CXL _OSC support method to communicate the available CXL functionality to FW. The CXL _OSC support method includes a field to indicate the OS is capable of RCD mode. FW can potentially change it's operation depending on the _OSC support method reported by the OS. The ACPI driver currently only sets the ACPI _OSC support method to indicate CXL VH mode. Change the capability reported to also include CXL RCD mode. [1] CXL3.0 Table 9-26 'Interpretation of CXL _OSC Support Field' Signed-off-by: Terry Bowman [rrichter@amd.com: Reworded patch description.] Signed-off-by: Robert Richter Link: http://lore.kernel.org/r/Y4cRV/Sj0epVW7bE@rric.localdomain Link: https://lore.kernel.org/r/166993046717.1882361.10587956243041624761.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/acpi/pci_root.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 4e3db20e9cbb..b3c202d2a433 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -493,6 +493,7 @@ static u32 calculate_cxl_support(void) u32 support; support = OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT; + support |= OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT; if (pci_aer_available()) support |= OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT; if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) -- cgit v1.2.3 From 397cd265815eae562685d9832dc8b61c927c1cfb Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 3 Dec 2022 09:14:41 -0800 Subject: cxl/regs: Fix sparse warning The 0day robot belatedly points out that @addr is not properly tagged as an iomap pointer: "drivers/cxl/core/regs.c:332:14: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected void *addr @@ got void [noderef] __iomem * @@" Fixes: 1168271ca054 ("cxl/acpi: Extract component registers of restricted hosts from RCRB") Reported-by: kernel test robot Reviewed-by: Robert Richter Reviewed-by: Alison Schofield Link: https://lore.kernel.org/r/167008768190.2516013.11918622906007677341.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/regs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index dfce37d11df1..1476a0299c9b 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -337,8 +337,8 @@ resource_size_t cxl_rcrb_to_component(struct device *dev, enum cxl_rcrb which) { resource_size_t component_reg_phys; + void __iomem *addr; u32 bar0, bar1; - void *addr; u16 cmd; u32 id; -- cgit v1.2.3 From cb4cdf74bd77af187085ab9d73ab9fd88283e3dc Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 5 Dec 2022 09:18:19 +0000 Subject: cxl/region: Fix spelling mistake "memergion" -> "memregion" There is a spelling mistake in a dev_warn message. Fix it. Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20221205091819.1943564-1-colin.i.king@gmail.com Signed-off-by: Dan Williams --- drivers/cxl/core/region.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index f75df35b9d3d..c56ddfed3393 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1969,7 +1969,7 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) { dev_warn( &cxlr->dev, - "Bypassing cpu_cache_invalidate_memergion() for testing!\n"); + "Bypassing cpu_cache_invalidate_memregion() for testing!\n"); clear_bit(CXL_REGION_F_INCOHERENT, &cxlr->flags); return 0; } else { -- cgit v1.2.3 From 14628aec8415e4847ae7e470b175412896716cd8 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Sun, 4 Dec 2022 16:29:51 -0800 Subject: cxl/acpi: Fail decoder add if CXIMS for HBIG is missing The BIOS provided CXIMS (CXL XOR Interleave Math Structure) is required for calculating a targets position in an interleave list during region creation. The CXL driver expects to discover a CXIMS that matches the HBIG (Host Bridge Interleave Granularity) and stores the xormaps found in that CXIMS for retrieval during region creation. If there is no CXIMS for an HBIG, no maps are stored. That leads to a NULL pointer dereference at xormap retrieval during region creation. Add a check during ACPI probe for the case of no matching CXIMS. Emit an error message and fail to add the decoder. Fixes: f9db85bfec0d ("cxl/acpi: Support CXL XOR Interleave Math (CXIMS)") Suggested-by: Dan Williams Signed-off-by: Alison Schofield Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/20221205002951.1788783-1-alison.schofield@intel.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 657ef250d848..420e322c85a1 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -282,6 +282,11 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, cxl_parse_cxims, &cxims_ctx); if (rc < 0) goto err_xormap; + if (!cxlrd->platform_data) { + dev_err(dev, "No CXIMS for HBIG %u\n", ig); + rc = -EINVAL; + goto err_xormap; + } } } rc = cxl_decoder_add(cxld, target_map); -- cgit v1.2.3 From 878b2de67af7095ddd35da859a9ef8db15a36163 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Dec 2022 11:44:41 -0800 Subject: tools/testing/cxl: Require cache invalidation bypass The typical environment where cxl_test is run, QEMU, does not support cpu_cache_invalidate_memregion(). Add the 'test' bypass symbols to the configuration check. Reported-by: Alison Schofield Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/167026948179.3527561.4535373655515827457.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/config_check.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/cxl/config_check.c b/tools/testing/cxl/config_check.c index de5e5b3652fd..c4c457e59841 100644 --- a/tools/testing/cxl/config_check.c +++ b/tools/testing/cxl/config_check.c @@ -10,4 +10,6 @@ void check(void) BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_BUS)); BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_ACPI)); BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_PMEM)); + BUILD_BUG_ON(!IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)); + BUILD_BUG_ON(!IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)); } -- cgit v1.2.3 From d3cdf4585f2f9122d1165acca40e801c75afa320 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 5 Dec 2022 12:21:40 -0800 Subject: cxl/acpi: Warn about an invalid CHBCR in an existing CHBS entry After parsing for a CHBCR in cxl_get_chbcr() the case of (ctx.chbcr == CXL_RESOURCE_NONE) is a slighly different error reason than the !ctx.chbcr case. In the first case the CHBS was found but the CHBCR was invalid or something else failed to determine it, while in the latter case no CHBS entry exists at all. Update the warning message to reflect this. The log messages for both cases can be differentiated now and the reason for a failure can be determined better. Signed-off-by: Robert Richter Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/167027170051.3542509.10494781536638424397.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 420e322c85a1..f495f3ce45d1 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -455,7 +455,8 @@ static int add_host_bridge_dport(struct device *match, void *arg) dev_dbg(match, "RCRB found for UID %lld: %pa\n", uid, &ctx.rcrb); if (ctx.chbcr == CXL_RESOURCE_NONE) { - dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n", uid); + dev_warn(match, "CHBCR invalid for Host Bridge (UID %lld)\n", + uid); return 0; } -- cgit v1.2.3 From 83351ddb786b30952485a5c302cd0cffbd62097e Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 5 Dec 2022 14:16:07 -0700 Subject: cxl: update names for interleave granularity conversion macros Change names for granularity macros to clearly indicate which variable is encoded and which is the actual granularity. granularity == interleave granularity eig == encoded interleave granularity Reviewed-by: Jonathan Cameron Reviewed-by: Alison Schofield Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/167027493237.3124429.8948852388671827664.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 4 ++-- drivers/cxl/core/hdm.c | 6 +++--- drivers/cxl/core/region.c | 6 +++--- drivers/cxl/cxl.h | 13 +++++++------ 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index f495f3ce45d1..99522e1b3993 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -86,7 +86,7 @@ static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg, unsigned int hbig, nr_maps; int rc; - rc = cxl_to_granularity(cxims->hbig, &hbig); + rc = eig_to_granularity(cxims->hbig, &hbig); if (rc) return rc; @@ -224,7 +224,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, rc = cxl_to_ways(cfmws->interleave_ways, &ways); if (rc) return rc; - rc = cxl_to_granularity(cfmws->granularity, &ig); + rc = eig_to_granularity(cfmws->granularity, &ig); if (rc) return rc; for (i = 0; i < ways; i++) diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 100d0881bde4..40b2ea99d92f 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -497,7 +497,7 @@ static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl) if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw), "invalid interleave_ways: %d\n", cxld->interleave_ways)) return; - if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig), + if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), "invalid interleave_granularity: %d\n", cxld->interleave_granularity)) return; @@ -749,8 +749,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, port->id, cxld->id, ctrl); return rc; } - rc = cxl_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), - &cxld->interleave_granularity); + rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), + &cxld->interleave_granularity); if (rc) return rc; diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index c56ddfed3393..6f3ca5c6f9f3 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -391,7 +391,7 @@ static ssize_t interleave_granularity_store(struct device *dev, if (rc) return rc; - rc = granularity_to_cxl(val, &ig); + rc = granularity_to_eig(val, &ig); if (rc) return rc; @@ -1028,7 +1028,7 @@ static int cxl_port_setup_targets(struct cxl_port *port, parent_iw = parent_cxld->interleave_ways; } - rc = granularity_to_cxl(parent_ig, &peig); + rc = granularity_to_eig(parent_ig, &peig); if (rc) { dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n", dev_name(parent_port->uport), @@ -1065,7 +1065,7 @@ static int cxl_port_setup_targets(struct cxl_port *port, eig = peig; } - rc = cxl_to_granularity(eig, &ig); + rc = eig_to_granularity(eig, &ig); if (rc) { dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n", dev_name(port->uport), dev_name(&port->dev), diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index ed2b0a2e80e2..7d0c81172a58 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -74,11 +74,11 @@ static inline int cxl_hdm_decoder_count(u32 cap_hdr) } /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */ -static inline int cxl_to_granularity(u16 ig, unsigned int *val) +static inline int eig_to_granularity(u16 eig, unsigned int *granularity) { - if (ig > CXL_DECODER_MAX_ENCODED_IG) + if (eig > CXL_DECODER_MAX_ENCODED_IG) return -EINVAL; - *val = CXL_DECODER_MIN_GRANULARITY << ig; + *granularity = CXL_DECODER_MIN_GRANULARITY << eig; return 0; } @@ -99,11 +99,12 @@ static inline int cxl_to_ways(u8 eniw, unsigned int *val) return 0; } -static inline int granularity_to_cxl(int g, u16 *ig) +static inline int granularity_to_eig(int granularity, u16 *eig) { - if (g > SZ_16K || g < CXL_DECODER_MIN_GRANULARITY || !is_power_of_2(g)) + if (granularity > SZ_16K || granularity < CXL_DECODER_MIN_GRANULARITY || + !is_power_of_2(granularity)) return -EINVAL; - *ig = ilog2(g) - 8; + *eig = ilog2(granularity) - 8; return 0; } -- cgit v1.2.3 From c99b2e8cf79616157eeab34828fbe6e41ef2271c Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 5 Dec 2022 14:20:01 -0700 Subject: cxl: update names for interleave ways conversion macros Change names for interleave ways macros to clearly indicate which variable is encoded and which is the actual ways value. ways == interleave ways eiw == encoded interleave ways Reviewed-by: Jonathan Cameron Signed-off-by: Dave Jiang Link: https://lore.kernel.org/r/167027516228.3124679.11265039496968588580.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 6 +++--- drivers/cxl/core/hdm.c | 6 +++--- drivers/cxl/core/region.c | 6 +++--- drivers/cxl/cxl.h | 14 +++++++------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 99522e1b3993..ad0849af42d7 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -34,7 +34,7 @@ static int cxl_xor_calc_n(u64 hpa, struct cxl_cxims_data *cximsd, int iw, } /* IW: 3,6,12 add a modulo calculation to 'n' */ if (!is_power_of_2(iw)) { - if (ways_to_cxl(iw, &eiw)) + if (ways_to_eiw(iw, &eiw)) return -1; hpa &= GENMASK_ULL(51, eiw + ig); n |= do_div(hpa, 3) << i; @@ -161,7 +161,7 @@ static int cxl_acpi_cfmws_verify(struct device *dev, return -EINVAL; } - rc = cxl_to_ways(cfmws->interleave_ways, &ways); + rc = eiw_to_ways(cfmws->interleave_ways, &ways); if (rc) { dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n", cfmws->interleave_ways); @@ -221,7 +221,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, return 0; } - rc = cxl_to_ways(cfmws->interleave_ways, &ways); + rc = eiw_to_ways(cfmws->interleave_ways, &ways); if (rc) return rc; rc = eig_to_granularity(cfmws->granularity, &ig); diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 40b2ea99d92f..dcc16d7cb8f3 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -494,7 +494,7 @@ static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl) * Input validation ensures these warns never fire, but otherwise * suppress unititalized variable usage warnings. */ - if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw), + if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw), "invalid interleave_ways: %d\n", cxld->interleave_ways)) return; if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), @@ -741,8 +741,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, } cxld->target_type = CXL_DECODER_EXPANDER; } - rc = cxl_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl), - &cxld->interleave_ways); + rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl), + &cxld->interleave_ways); if (rc) { dev_warn(&port->dev, "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n", diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 6f3ca5c6f9f3..f8c36f4a9422 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -324,7 +324,7 @@ static ssize_t interleave_ways_store(struct device *dev, if (rc) return rc; - rc = ways_to_cxl(val, &iw); + rc = ways_to_eiw(val, &iw); if (rc) return rc; @@ -1036,7 +1036,7 @@ static int cxl_port_setup_targets(struct cxl_port *port, return rc; } - rc = ways_to_cxl(parent_iw, &peiw); + rc = ways_to_eiw(parent_iw, &peiw); if (rc) { dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n", dev_name(parent_port->uport), @@ -1045,7 +1045,7 @@ static int cxl_port_setup_targets(struct cxl_port *port, } iw = cxl_rr->nr_targets; - rc = ways_to_cxl(iw, &eiw); + rc = ways_to_eiw(iw, &eiw); if (rc) { dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n", dev_name(port->uport), dev_name(&port->dev), iw); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 7d0c81172a58..1b1cf459ac77 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -83,14 +83,14 @@ static inline int eig_to_granularity(u16 eig, unsigned int *granularity) } /* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */ -static inline int cxl_to_ways(u8 eniw, unsigned int *val) +static inline int eiw_to_ways(u8 eiw, unsigned int *ways) { - switch (eniw) { + switch (eiw) { case 0 ... 4: - *val = 1 << eniw; + *ways = 1 << eiw; break; case 8 ... 10: - *val = 3 << (eniw - 8); + *ways = 3 << (eiw - 8); break; default: return -EINVAL; @@ -108,12 +108,12 @@ static inline int granularity_to_eig(int granularity, u16 *eig) return 0; } -static inline int ways_to_cxl(unsigned int ways, u8 *iw) +static inline int ways_to_eiw(unsigned int ways, u8 *eiw) { if (ways > 16) return -EINVAL; if (is_power_of_2(ways)) { - *iw = ilog2(ways); + *eiw = ilog2(ways); return 0; } if (ways % 3) @@ -121,7 +121,7 @@ static inline int ways_to_cxl(unsigned int ways, u8 *iw) ways /= 3; if (!is_power_of_2(ways)) return -EINVAL; - *iw = ilog2(ways) + 8; + *eiw = ilog2(ways) + 8; return 0; } -- cgit v1.2.3 From f5ee4cc19c3eff4459931b6fe8f29d1e0cc204a5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Dec 2022 20:22:28 -0800 Subject: cxl/security: Fix Get Security State output payload endian handling Multi-byte integer values in CXL mailbox payloads are little endian. Add a definition of the Get Security State output payload and convert the value before testing flags. Fixes: 328281155539 ("cxl/pmem: Introduce nvdimm_security_ops with ->get_flags() operation") Cc: Jonathan Cameron Cc: Dave Jiang Reviewed-by: Dave Jiang Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/167030054822.4044561.4917796262037689553.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/security.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index 5484d4eecfd1..ebb78b8944f5 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -16,14 +16,18 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; unsigned long security_flags = 0; + struct cxl_get_security_output { + __le32 flags; + } out; u32 sec_out; int rc; rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SECURITY_STATE, NULL, 0, - &sec_out, sizeof(sec_out)); + &out, sizeof(out)); if (rc < 0) return 0; + sec_out = le32_to_cpu(out.flags); if (ptype == NVDIMM_MASTER) { if (sec_out & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags); -- cgit v1.2.3 From 5331cdf44dc389ac56f7ba5c24ca52d13eaad8d7 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Dec 2022 20:22:33 -0800 Subject: cxl/mbox: Enable cxl_mbox_send_cmd() users to validate output size Internally cxl_mbox_send_cmd() converts all passed-in parameters to a 'struct cxl_mbox_cmd' instance and sends that to cxlds->mbox_send(). It then teases the possibilty that the caller can validate the output size. However, they cannot since the resulting output size is not conveyed to the called. Fix that by making the caller pass in a constructed 'struct cxl_mbox_cmd'. This prepares for a future patch to add output size validation on a per-command basis. Given the change in signature, also change the name to differentiate it from the user command submission path that performs more validation before generating the 'struct cxl_mbox_cmd' instance to execute. Reviewed-by: Dave Jiang Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/167030055370.4044561.17788093375112783036.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 86 ++++++++++++++++++++++++++++++------------------- drivers/cxl/cxlmem.h | 4 +-- drivers/cxl/pmem.c | 21 +++++++++--- drivers/cxl/security.c | 77 +++++++++++++++++++++++++++++++------------ 4 files changed, 126 insertions(+), 62 deletions(-) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 35dd889f1d3a..ed451ca60ce5 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -146,13 +146,9 @@ static const char *cxl_mem_opcode_to_name(u16 opcode) } /** - * cxl_mbox_send_cmd() - Send a mailbox command to a device. + * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command * @cxlds: The device data for the operation - * @opcode: Opcode for the mailbox command. - * @in: The input payload for the mailbox command. - * @in_size: The length of the input payload - * @out: Caller allocated buffer for the output. - * @out_size: Expected size of output. + * @mbox_cmd: initialized command to execute * * Context: Any context. * Return: @@ -167,40 +163,37 @@ static const char *cxl_mem_opcode_to_name(u16 opcode) * error. While this distinction can be useful for commands from userspace, the * kernel will only be able to use results when both are successful. */ -int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, - size_t in_size, void *out, size_t out_size) +int cxl_internal_send_cmd(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *mbox_cmd) { - const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - struct cxl_mbox_cmd mbox_cmd = { - .opcode = opcode, - .payload_in = in, - .size_in = in_size, - .size_out = out_size, - .payload_out = out, - }; + const struct cxl_mem_command *cmd = + cxl_mem_find_command(mbox_cmd->opcode); + size_t out_size; int rc; - if (in_size > cxlds->payload_size || out_size > cxlds->payload_size) + if (mbox_cmd->size_in > cxlds->payload_size || + mbox_cmd->size_out > cxlds->payload_size) return -E2BIG; - rc = cxlds->mbox_send(cxlds, &mbox_cmd); + out_size = mbox_cmd->size_out; + rc = cxlds->mbox_send(cxlds, mbox_cmd); if (rc) return rc; - if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS) - return cxl_mbox_cmd_rc2errno(&mbox_cmd); + if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) + return cxl_mbox_cmd_rc2errno(mbox_cmd); /* * Variable sized commands can't be validated and so it's up to the * caller to do that if they wish. */ if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) { - if (mbox_cmd.size_out != out_size) + if (mbox_cmd->size_out != out_size) return -EIO; } return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL); static bool cxl_mem_raw_command_allowed(u16 opcode) { @@ -567,15 +560,25 @@ static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 while (remaining) { u32 xfer_size = min_t(u32, remaining, cxlds->payload_size); - struct cxl_mbox_get_log log = { + struct cxl_mbox_cmd mbox_cmd; + struct cxl_mbox_get_log log; + int rc; + + log = (struct cxl_mbox_get_log) { .uuid = *uuid, .offset = cpu_to_le32(offset), - .length = cpu_to_le32(xfer_size) + .length = cpu_to_le32(xfer_size), + }; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_LOG, + .size_in = sizeof(log), + .payload_in = &log, + .size_out = xfer_size, + .payload_out = out, }; - int rc; - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log), - out, xfer_size); + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); if (rc < 0) return rc; @@ -621,19 +624,25 @@ static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel) static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds) { struct cxl_mbox_get_supported_logs *ret; + struct cxl_mbox_cmd mbox_cmd; int rc; ret = kvmalloc(cxlds->payload_size, GFP_KERNEL); if (!ret) return ERR_PTR(-ENOMEM); - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret, - cxlds->payload_size); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS, + .size_out = cxlds->payload_size, + .payload_out = ret, + }; + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); if (rc < 0) { kvfree(ret); return ERR_PTR(rc); } + return ret; } @@ -735,11 +744,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL); static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds) { struct cxl_mbox_get_partition_info pi; + struct cxl_mbox_cmd mbox_cmd; int rc; - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0, - &pi, sizeof(pi)); - + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_PARTITION_INFO, + .size_out = sizeof(pi), + .payload_out = &pi, + }; + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); if (rc) return rc; @@ -768,10 +781,15 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds) { /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ struct cxl_mbox_identify id; + struct cxl_mbox_cmd mbox_cmd; int rc; - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, - sizeof(id)); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_IDENTIFY, + .size_out = sizeof(id), + .payload_out = &id, + }; + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); if (rc < 0) return rc; diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 785c6c12515d..c447577f5ad5 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -430,8 +430,8 @@ enum { CXL_PMEM_SEC_PASS_USER, }; -int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, - size_t in_size, void *out, size_t out_size); +int cxl_internal_send_cmd(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *cmd); int cxl_dev_state_identify(struct cxl_dev_state *cxlds); int cxl_await_media_ready(struct cxl_dev_state *cxlds); int cxl_enumerate_cmds(struct cxl_dev_state *cxlds); diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 2fc8070b6a17..eedefebc4283 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -119,6 +119,7 @@ static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds, unsigned int buf_len) { struct cxl_mbox_get_lsa get_lsa; + struct cxl_mbox_cmd mbox_cmd; int rc; if (sizeof(*cmd) > buf_len) @@ -130,9 +131,15 @@ static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds, .offset = cpu_to_le32(cmd->in_offset), .length = cpu_to_le32(cmd->in_length), }; + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_LSA, + .payload_in = &get_lsa, + .size_in = sizeof(get_lsa), + .size_out = cmd->in_length, + .payload_out = cmd->out_buf, + }; - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa, - sizeof(get_lsa), cmd->out_buf, cmd->in_length); + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); cmd->status = 0; return rc; @@ -143,6 +150,7 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds, unsigned int buf_len) { struct cxl_mbox_set_lsa *set_lsa; + struct cxl_mbox_cmd mbox_cmd; int rc; if (sizeof(*cmd) > buf_len) @@ -161,10 +169,13 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds, .offset = cpu_to_le32(cmd->in_offset), }; memcpy(set_lsa->data, cmd->in_buf, cmd->in_length); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_SET_LSA, + .payload_in = set_lsa, + .size_in = struct_size(set_lsa, data, cmd->in_length), + }; - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa, - struct_size(set_lsa, data, cmd->in_length), - NULL, 0); + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); /* * Set "firmware" status (4-packed bytes at the end of the input diff --git a/drivers/cxl/security.c b/drivers/cxl/security.c index ebb78b8944f5..4ad4bda2d18e 100644 --- a/drivers/cxl/security.c +++ b/drivers/cxl/security.c @@ -19,11 +19,17 @@ static unsigned long cxl_pmem_get_security_flags(struct nvdimm *nvdimm, struct cxl_get_security_output { __le32 flags; } out; + struct cxl_mbox_cmd mbox_cmd; u32 sec_out; int rc; - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SECURITY_STATE, NULL, 0, - &out, sizeof(out)); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_SECURITY_STATE, + .size_out = sizeof(out), + .payload_out = &out, + }; + + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); if (rc < 0) return 0; @@ -62,17 +68,23 @@ static int cxl_pmem_security_change_key(struct nvdimm *nvdimm, struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mbox_cmd mbox_cmd; struct cxl_set_pass set_pass; - int rc; - set_pass.type = ptype == NVDIMM_MASTER ? - CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; + set_pass = (struct cxl_set_pass) { + .type = ptype == NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER : + CXL_PMEM_SEC_PASS_USER, + }; memcpy(set_pass.old_pass, old_data->data, NVDIMM_PASSPHRASE_LEN); memcpy(set_pass.new_pass, new_data->data, NVDIMM_PASSPHRASE_LEN); - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_PASSPHRASE, - &set_pass, sizeof(set_pass), NULL, 0); - return rc; + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_SET_PASSPHRASE, + .size_in = sizeof(set_pass), + .payload_in = &set_pass, + }; + + return cxl_internal_send_cmd(cxlds, &mbox_cmd); } static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, @@ -83,15 +95,21 @@ static int __cxl_pmem_security_disable(struct nvdimm *nvdimm, struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_disable_pass dis_pass; - int rc; + struct cxl_mbox_cmd mbox_cmd; - dis_pass.type = ptype == NVDIMM_MASTER ? - CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; + dis_pass = (struct cxl_disable_pass) { + .type = ptype == NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER : + CXL_PMEM_SEC_PASS_USER, + }; memcpy(dis_pass.pass, key_data->data, NVDIMM_PASSPHRASE_LEN); - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_DISABLE_PASSPHRASE, - &dis_pass, sizeof(dis_pass), NULL, 0); - return rc; + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_DISABLE_PASSPHRASE, + .size_in = sizeof(dis_pass), + .payload_in = &dis_pass, + }; + + return cxl_internal_send_cmd(cxlds, &mbox_cmd); } static int cxl_pmem_security_disable(struct nvdimm *nvdimm, @@ -111,8 +129,11 @@ static int cxl_pmem_security_freeze(struct nvdimm *nvdimm) struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mbox_cmd mbox_cmd = { + .opcode = CXL_MBOX_OP_FREEZE_SECURITY, + }; - return cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_FREEZE_SECURITY, NULL, 0, NULL, 0); + return cxl_internal_send_cmd(cxlds, &mbox_cmd); } static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, @@ -122,11 +143,17 @@ static int cxl_pmem_security_unlock(struct nvdimm *nvdimm, struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; u8 pass[NVDIMM_PASSPHRASE_LEN]; + struct cxl_mbox_cmd mbox_cmd; int rc; memcpy(pass, key_data->data, NVDIMM_PASSPHRASE_LEN); - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_UNLOCK, - pass, NVDIMM_PASSPHRASE_LEN, NULL, 0); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_UNLOCK, + .size_in = NVDIMM_PASSPHRASE_LEN, + .payload_in = pass, + }; + + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); if (rc < 0) return rc; @@ -140,14 +167,22 @@ static int cxl_pmem_security_passphrase_erase(struct nvdimm *nvdimm, struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_memdev *cxlmd = cxl_nvd->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mbox_cmd mbox_cmd; struct cxl_pass_erase erase; int rc; - erase.type = ptype == NVDIMM_MASTER ? - CXL_PMEM_SEC_PASS_MASTER : CXL_PMEM_SEC_PASS_USER; + erase = (struct cxl_pass_erase) { + .type = ptype == NVDIMM_MASTER ? CXL_PMEM_SEC_PASS_MASTER : + CXL_PMEM_SEC_PASS_USER, + }; memcpy(erase.pass, key->data, NVDIMM_PASSPHRASE_LEN); - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE, - &erase, sizeof(erase), NULL, 0); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE, + .size_in = sizeof(erase), + .payload_in = &erase, + }; + + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); if (rc < 0) return rc; -- cgit v1.2.3 From 2aeaf663b85e436dc6287692b7561ffbf0aa4381 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Dec 2022 20:22:39 -0800 Subject: cxl/mbox: Add variable output size validation for internal commands cxl_internal_send_cmd() skips output size validation for variable output commands which is not ideal. Most of the time internal usages want to fail if the output size does not match what was requested. For other commands where the caller cannot predict the size there is usually a a header that conveys how much vaild data is in the payload. For those cases add @min_out as a parameter to specify what the minimum response payload needs to be for the caller to parse the rest of the payload. In this patch only Get Supported Logs has that behavior, but going forward records retrieval commands like Get Poison List and Get Event Records can use @min_out to retrieve a variable amount of records. Critically, this validation scheme skips the needs to interrogate the cxl_mem_commands array which in turn frees up the implementation to support internal command enabling without also enabling external / user commands. Reviewed-by: Dave Jiang Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/167030055918.4044561.10339573829837910505.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 23 ++++++++++++++--------- drivers/cxl/cxlmem.h | 2 ++ 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index ed451ca60ce5..c36a3589377a 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -166,9 +166,7 @@ static const char *cxl_mem_opcode_to_name(u16 opcode) int cxl_internal_send_cmd(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *mbox_cmd) { - const struct cxl_mem_command *cmd = - cxl_mem_find_command(mbox_cmd->opcode); - size_t out_size; + size_t out_size, min_out; int rc; if (mbox_cmd->size_in > cxlds->payload_size || @@ -176,6 +174,7 @@ int cxl_internal_send_cmd(struct cxl_dev_state *cxlds, return -E2BIG; out_size = mbox_cmd->size_out; + min_out = mbox_cmd->min_out; rc = cxlds->mbox_send(cxlds, mbox_cmd); if (rc) return rc; @@ -183,14 +182,18 @@ int cxl_internal_send_cmd(struct cxl_dev_state *cxlds, if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) return cxl_mbox_cmd_rc2errno(mbox_cmd); + if (!out_size) + return 0; + /* - * Variable sized commands can't be validated and so it's up to the - * caller to do that if they wish. + * Variable sized output needs to at least satisfy the caller's + * minimum if not the fully requested size. */ - if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) { - if (mbox_cmd->size_out != out_size) - return -EIO; - } + if (min_out == 0) + min_out = out_size; + + if (mbox_cmd->size_out < min_out) + return -EIO; return 0; } EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL); @@ -635,6 +638,8 @@ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxl .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS, .size_out = cxlds->payload_size, .payload_out = ret, + /* At least the record number field must be valid */ + .min_out = 2, }; rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); if (rc < 0) { diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index c447577f5ad5..ab138004f644 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -101,6 +101,7 @@ static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, * outputs commands this is always expected to be deterministic. For * variable sized output commands, it tells the exact number of bytes * written. + * @min_out: (input) internal command output payload size validation * @return_code: (output) Error code returned from hardware. * * This is the primary mechanism used to send commands to the hardware. @@ -115,6 +116,7 @@ struct cxl_mbox_cmd { void *payload_out; size_t size_in; size_t size_out; + size_t min_out; u16 return_code; }; -- cgit v1.2.3 From 7fe898041fb0c8e630504ecc2cb8805651ac85c1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Dec 2022 20:22:44 -0800 Subject: cxl/security: Drop security command ioctl uapi CXL PMEM security operations are routed through the NVDIMM sysfs interface. For this reason the corresponding commands are marked "exclusive" to preclude collisions between the ioctl ABI and the sysfs ABI. However, a better way to preclude that collision is to simply remove the ioctl ABI (command-id definitions) for those operations. Now that cxl_internal_send_cmd() (formerly cxl_mbox_send_cmd()) no longer needs to talk the cxl_mem_commands array, all of the uapi definitions for the security commands can be dropped. These never appeared in a released kernel, so no regression risk. Reviewed-by: Dave Jiang Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/167030056464.4044561.11486507095384253833.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 17 ----------------- include/uapi/linux/cxl_mem.h | 6 ------ 2 files changed, 23 deletions(-) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index c36a3589377a..b03fba212799 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -65,12 +65,6 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), - CXL_CMD(GET_SECURITY_STATE, 0, 0x4, 0), - CXL_CMD(SET_PASSPHRASE, 0x60, 0, 0), - CXL_CMD(DISABLE_PASSPHRASE, 0x40, 0, 0), - CXL_CMD(FREEZE_SECURITY, 0, 0, 0), - CXL_CMD(UNLOCK, 0x20, 0, 0), - CXL_CMD(PASSPHRASE_SECURE_ERASE, 0x40, 0, 0), }; /* @@ -717,17 +711,6 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds) /* Found the required CEL */ rc = 0; } - - /* - * Setup permanently kernel exclusive commands, i.e. the - * mechanism is driven through sysfs, keyctl, etc... - */ - set_bit(CXL_MEM_COMMAND_ID_SET_PASSPHRASE, cxlds->exclusive_cmds); - set_bit(CXL_MEM_COMMAND_ID_DISABLE_PASSPHRASE, cxlds->exclusive_cmds); - set_bit(CXL_MEM_COMMAND_ID_UNLOCK, cxlds->exclusive_cmds); - set_bit(CXL_MEM_COMMAND_ID_PASSPHRASE_SECURE_ERASE, - cxlds->exclusive_cmds); - out: kvfree(gsl); return rc; diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 82bdad4ce5de..c71021a2a9ed 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -41,12 +41,6 @@ ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \ ___C(SCAN_MEDIA, "Scan Media"), \ ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \ - ___C(GET_SECURITY_STATE, "Get Security State"), \ - ___C(SET_PASSPHRASE, "Set Passphrase"), \ - ___C(DISABLE_PASSPHRASE, "Disable Passphrase"), \ - ___C(FREEZE_SECURITY, "Freeze Security"), \ - ___C(UNLOCK, "Unlock"), \ - ___C(PASSPHRASE_SECURE_ERASE, "Passphrase Secure Erase"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a -- cgit v1.2.3 From 372ab3bc3711db46ae1205401c2aac2ed16fc348 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Dec 2022 20:28:34 -0800 Subject: cxl/pci: Add some type-safety to the AER trace points The first argument to the CXL AER trace points is the source device. Pass a 'const struct device *' rather than a 'const char *' for more type precision / safety. Cc: Jonathan Cameron Cc: Dave Jiang Cc: Steven Rostedt Reviewed-by: Dave Jiang Reviewed-by: Jonathan Cameron Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/167030091477.4045167.15174636482098463885.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 4 ++-- include/trace/events/cxl.h | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 6cec9fa9326c..cced4a0df3d1 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -562,7 +562,7 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) } header_log_copy(cxlds, hl); - trace_cxl_aer_uncorrectable_error(dev_name(dev), status, fe, hl); + trace_cxl_aer_uncorrectable_error(dev, status, fe, hl); writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr); return true; @@ -644,7 +644,7 @@ static void cxl_cor_error_detected(struct pci_dev *pdev) status = le32_to_cpu(readl(addr)); if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) { writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr); - trace_cxl_aer_correctable_error(dev_name(dev), status); + trace_cxl_aer_correctable_error(dev, status); } } diff --git a/include/trace/events/cxl.h b/include/trace/events/cxl.h index 72c3e2870a9e..ad085a2534ef 100644 --- a/include/trace/events/cxl.h +++ b/include/trace/events/cxl.h @@ -45,16 +45,16 @@ ) TRACE_EVENT(cxl_aer_uncorrectable_error, - TP_PROTO(const char *dev_name, u32 status, u32 fe, u32 *hl), - TP_ARGS(dev_name, status, fe, hl), + TP_PROTO(const struct device *dev, u32 status, u32 fe, u32 *hl), + TP_ARGS(dev, status, fe, hl), TP_STRUCT__entry( - __string(dev_name, dev_name) + __string(dev_name, dev_name(dev)) __field(u32, status) __field(u32, first_error) __array(u32, header_log, CXL_HEADERLOG_SIZE_U32) ), TP_fast_assign( - __assign_str(dev_name, dev_name); + __assign_str(dev_name, dev_name(dev)); __entry->status = status; __entry->first_error = fe; /* @@ -89,14 +89,14 @@ TRACE_EVENT(cxl_aer_uncorrectable_error, ) TRACE_EVENT(cxl_aer_correctable_error, - TP_PROTO(const char *dev_name, u32 status), - TP_ARGS(dev_name, status), + TP_PROTO(const struct device *dev, u32 status), + TP_ARGS(dev, status), TP_STRUCT__entry( - __string(dev_name, dev_name) + __string(dev_name, dev_name(dev)) __field(u32, status) ), TP_fast_assign( - __assign_str(dev_name, dev_name); + __assign_str(dev_name, dev_name(dev)); __entry->status = status; ), TP_printk("%s: status: '%s'", -- cgit v1.2.3 From 9b5f77efb0dc71d95403b528756e39b6cae0b948 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Dec 2022 20:28:40 -0800 Subject: cxl/pci: Remove endian confusion readl() already handles endian conversion. That's the main difference between readl() and __raw_readl(). This is benign on little-endian systems, but big endian systems will end up byte-swabbing twice. Fixes: 2905cb5236cb ("cxl/pci: Add (hopeful) error handling support") Cc: Jonathan Cameron Cc: Dave Jiang Reviewed-by: Dave Jiang Reviewed-by: Jonathan Cameron Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/167030092025.4045167.10651070153523351093.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index cced4a0df3d1..33083a522fd1 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -548,15 +548,14 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) return false; addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET; - status = le32_to_cpu((__force __le32)readl(addr)); + status = readl(addr); if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK)) return false; /* If multiple errors, log header points to first error from ctrl reg */ if (hweight32(status) > 1) { addr = cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET; - fe = BIT(le32_to_cpu((__force __le32)readl(addr)) & - CXL_RAS_CAP_CONTROL_FE_MASK); + fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, readl(addr))); } else { fe = status; } @@ -641,7 +640,7 @@ static void cxl_cor_error_detected(struct pci_dev *pdev) return; addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET; - status = le32_to_cpu(readl(addr)); + status = readl(addr); if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) { writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr); trace_cxl_aer_correctable_error(dev, status); -- cgit v1.2.3 From f04facfb993de47e2133b2b842d72b97b1c50162 Mon Sep 17 00:00:00 2001 From: Fan Ni Date: Mon, 7 Nov 2022 21:22:31 +0000 Subject: cxl/region: Fix memdev reuse check Due to a typo, the check of whether or not a memdev has already been used as a target for the region (above code piece) will always be skipped. Given a memdev with more than one HDM decoder, an interleaved region can be created that maps multiple HPAs to the same DPA. According to CXL spec 3.0 8.1.3.8.4, "Aliasing (mapping more than one Host Physical Address (HPA) to a single Device Physical Address) is forbidden." Fix this by using existing iterator for memdev reuse check. Cc: Fixes: 384e624bb211 ("cxl/region: Attach endpoint decoders") Signed-off-by: Fan Ni Link: https://lore.kernel.org/r/20221107212153.745993-1-fan.ni@samsung.com Signed-off-by: Dan Williams --- drivers/cxl/core/region.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index f8c36f4a9422..02f28da519e3 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -1226,7 +1226,7 @@ static int cxl_region_attach(struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled_target; struct cxl_memdev *cxlmd_target; - cxled_target = p->targets[pos]; + cxled_target = p->targets[i]; if (!cxled_target) continue; -- cgit v1.2.3