From 5161a55c069f53d88da49274cbef6e3c74eadea9 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 2 Aug 2021 10:29:38 -0700 Subject: cxl: Move cxl_core to new directory CXL core is growing, and it's already arguably unmanageable. To support future growth, move core functionality to a new directory and rename the file to represent just bus support. Future work will remove non-bus functionality. Note that mem.h is renamed to cxlmem.h to avoid a namespace collision with the global ARCH=um mem.h header. Reported-by: kernel test robot Signed-off-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162792537866.368511.8915631504621088321.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/Makefile | 4 +- drivers/cxl/core.c | 1067 --------------------------------------------- drivers/cxl/core/Makefile | 5 + drivers/cxl/core/bus.c | 1067 +++++++++++++++++++++++++++++++++++++++++++++ drivers/cxl/cxlmem.h | 81 ++++ drivers/cxl/mem.h | 81 ---- drivers/cxl/pci.c | 2 +- drivers/cxl/pmem.c | 2 +- 8 files changed, 1156 insertions(+), 1153 deletions(-) delete mode 100644 drivers/cxl/core.c create mode 100644 drivers/cxl/core/Makefile create mode 100644 drivers/cxl/core/bus.c create mode 100644 drivers/cxl/cxlmem.h delete mode 100644 drivers/cxl/mem.h (limited to 'drivers') diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile index 32954059b37b..d1aaabc940f3 100644 --- a/drivers/cxl/Makefile +++ b/drivers/cxl/Makefile @@ -1,11 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_CXL_BUS) += cxl_core.o +obj-$(CONFIG_CXL_BUS) += core/ obj-$(CONFIG_CXL_MEM) += cxl_pci.o obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o obj-$(CONFIG_CXL_PMEM) += cxl_pmem.o -ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -cxl_core-y := core.o cxl_pci-y := pci.o cxl_acpi-y := acpi.o cxl_pmem-y := pmem.o diff --git a/drivers/cxl/core.c b/drivers/cxl/core.c deleted file mode 100644 index a2e4d54fc7bc..000000000000 --- a/drivers/cxl/core.c +++ /dev/null @@ -1,1067 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright(c) 2020 Intel Corporation. All rights reserved. */ -#include -#include -#include -#include -#include -#include -#include "cxl.h" -#include "mem.h" - -/** - * DOC: cxl core - * - * The CXL core provides a sysfs hierarchy for control devices and a rendezvous - * point for cross-device interleave coordination through cxl ports. - */ - -static DEFINE_IDA(cxl_port_ida); - -static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%s\n", dev->type->name); -} -static DEVICE_ATTR_RO(devtype); - -static struct attribute *cxl_base_attributes[] = { - &dev_attr_devtype.attr, - NULL, -}; - -static struct attribute_group cxl_base_attribute_group = { - .attrs = cxl_base_attributes, -}; - -static ssize_t start_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct cxl_decoder *cxld = to_cxl_decoder(dev); - - return sysfs_emit(buf, "%#llx\n", cxld->range.start); -} -static DEVICE_ATTR_RO(start); - -static ssize_t size_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct cxl_decoder *cxld = to_cxl_decoder(dev); - - return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range)); -} -static DEVICE_ATTR_RO(size); - -#define CXL_DECODER_FLAG_ATTR(name, flag) \ -static ssize_t name##_show(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct cxl_decoder *cxld = to_cxl_decoder(dev); \ - \ - return sysfs_emit(buf, "%s\n", \ - (cxld->flags & (flag)) ? "1" : "0"); \ -} \ -static DEVICE_ATTR_RO(name) - -CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM); -CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM); -CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2); -CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3); -CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK); - -static ssize_t target_type_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_decoder *cxld = to_cxl_decoder(dev); - - switch (cxld->target_type) { - case CXL_DECODER_ACCELERATOR: - return sysfs_emit(buf, "accelerator\n"); - case CXL_DECODER_EXPANDER: - return sysfs_emit(buf, "expander\n"); - } - return -ENXIO; -} -static DEVICE_ATTR_RO(target_type); - -static ssize_t target_list_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_decoder *cxld = to_cxl_decoder(dev); - ssize_t offset = 0; - int i, rc = 0; - - device_lock(dev); - for (i = 0; i < cxld->interleave_ways; i++) { - struct cxl_dport *dport = cxld->target[i]; - struct cxl_dport *next = NULL; - - if (!dport) - break; - - if (i + 1 < cxld->interleave_ways) - next = cxld->target[i + 1]; - rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, - next ? "," : ""); - if (rc < 0) - break; - offset += rc; - } - device_unlock(dev); - - if (rc < 0) - return rc; - - rc = sysfs_emit_at(buf, offset, "\n"); - if (rc < 0) - return rc; - - return offset + rc; -} -static DEVICE_ATTR_RO(target_list); - -static struct attribute *cxl_decoder_base_attrs[] = { - &dev_attr_start.attr, - &dev_attr_size.attr, - &dev_attr_locked.attr, - &dev_attr_target_list.attr, - NULL, -}; - -static struct attribute_group cxl_decoder_base_attribute_group = { - .attrs = cxl_decoder_base_attrs, -}; - -static struct attribute *cxl_decoder_root_attrs[] = { - &dev_attr_cap_pmem.attr, - &dev_attr_cap_ram.attr, - &dev_attr_cap_type2.attr, - &dev_attr_cap_type3.attr, - NULL, -}; - -static struct attribute_group cxl_decoder_root_attribute_group = { - .attrs = cxl_decoder_root_attrs, -}; - -static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { - &cxl_decoder_root_attribute_group, - &cxl_decoder_base_attribute_group, - &cxl_base_attribute_group, - NULL, -}; - -static struct attribute *cxl_decoder_switch_attrs[] = { - &dev_attr_target_type.attr, - NULL, -}; - -static struct attribute_group cxl_decoder_switch_attribute_group = { - .attrs = cxl_decoder_switch_attrs, -}; - -static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { - &cxl_decoder_switch_attribute_group, - &cxl_decoder_base_attribute_group, - &cxl_base_attribute_group, - NULL, -}; - -static void cxl_decoder_release(struct device *dev) -{ - struct cxl_decoder *cxld = to_cxl_decoder(dev); - struct cxl_port *port = to_cxl_port(dev->parent); - - ida_free(&port->decoder_ida, cxld->id); - kfree(cxld); -} - -static const struct device_type cxl_decoder_switch_type = { - .name = "cxl_decoder_switch", - .release = cxl_decoder_release, - .groups = cxl_decoder_switch_attribute_groups, -}; - -static const struct device_type cxl_decoder_root_type = { - .name = "cxl_decoder_root", - .release = cxl_decoder_release, - .groups = cxl_decoder_root_attribute_groups, -}; - -bool is_root_decoder(struct device *dev) -{ - return dev->type == &cxl_decoder_root_type; -} -EXPORT_SYMBOL_GPL(is_root_decoder); - -struct cxl_decoder *to_cxl_decoder(struct device *dev) -{ - if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release, - "not a cxl_decoder device\n")) - return NULL; - return container_of(dev, struct cxl_decoder, dev); -} -EXPORT_SYMBOL_GPL(to_cxl_decoder); - -static void cxl_dport_release(struct cxl_dport *dport) -{ - list_del(&dport->list); - put_device(dport->dport); - kfree(dport); -} - -static void cxl_port_release(struct device *dev) -{ - struct cxl_port *port = to_cxl_port(dev); - struct cxl_dport *dport, *_d; - - device_lock(dev); - list_for_each_entry_safe(dport, _d, &port->dports, list) - cxl_dport_release(dport); - device_unlock(dev); - ida_free(&cxl_port_ida, port->id); - kfree(port); -} - -static const struct attribute_group *cxl_port_attribute_groups[] = { - &cxl_base_attribute_group, - NULL, -}; - -static const struct device_type cxl_port_type = { - .name = "cxl_port", - .release = cxl_port_release, - .groups = cxl_port_attribute_groups, -}; - -struct cxl_port *to_cxl_port(struct device *dev) -{ - if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, - "not a cxl_port device\n")) - return NULL; - return container_of(dev, struct cxl_port, dev); -} - -static void unregister_port(void *_port) -{ - struct cxl_port *port = _port; - struct cxl_dport *dport; - - device_lock(&port->dev); - list_for_each_entry(dport, &port->dports, list) { - char link_name[CXL_TARGET_STRLEN]; - - if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", - dport->port_id) >= CXL_TARGET_STRLEN) - continue; - sysfs_remove_link(&port->dev.kobj, link_name); - } - device_unlock(&port->dev); - device_unregister(&port->dev); -} - -static void cxl_unlink_uport(void *_port) -{ - struct cxl_port *port = _port; - - sysfs_remove_link(&port->dev.kobj, "uport"); -} - -static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) -{ - int rc; - - rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport"); - if (rc) - return rc; - return devm_add_action_or_reset(host, cxl_unlink_uport, port); -} - -static struct cxl_port *cxl_port_alloc(struct device *uport, - resource_size_t component_reg_phys, - struct cxl_port *parent_port) -{ - struct cxl_port *port; - struct device *dev; - int rc; - - port = kzalloc(sizeof(*port), GFP_KERNEL); - if (!port) - return ERR_PTR(-ENOMEM); - - rc = ida_alloc(&cxl_port_ida, GFP_KERNEL); - if (rc < 0) - goto err; - port->id = rc; - - /* - * The top-level cxl_port "cxl_root" does not have a cxl_port as - * its parent and it does not have any corresponding component - * registers as its decode is described by a fixed platform - * description. - */ - dev = &port->dev; - if (parent_port) - dev->parent = &parent_port->dev; - else - dev->parent = uport; - - port->uport = uport; - port->component_reg_phys = component_reg_phys; - ida_init(&port->decoder_ida); - INIT_LIST_HEAD(&port->dports); - - device_initialize(dev); - device_set_pm_not_required(dev); - dev->bus = &cxl_bus_type; - dev->type = &cxl_port_type; - - return port; - -err: - kfree(port); - return ERR_PTR(rc); -} - -/** - * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy - * @host: host device for devm operations - * @uport: "physical" device implementing this upstream port - * @component_reg_phys: (optional) for configurable cxl_port instances - * @parent_port: next hop up in the CXL memory decode hierarchy - */ -struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, - resource_size_t component_reg_phys, - struct cxl_port *parent_port) -{ - struct cxl_port *port; - struct device *dev; - int rc; - - port = cxl_port_alloc(uport, component_reg_phys, parent_port); - if (IS_ERR(port)) - return port; - - dev = &port->dev; - if (parent_port) - rc = dev_set_name(dev, "port%d", port->id); - else - rc = dev_set_name(dev, "root%d", port->id); - if (rc) - goto err; - - rc = device_add(dev); - if (rc) - goto err; - - rc = devm_add_action_or_reset(host, unregister_port, port); - if (rc) - return ERR_PTR(rc); - - rc = devm_cxl_link_uport(host, port); - if (rc) - return ERR_PTR(rc); - - return port; - -err: - put_device(dev); - return ERR_PTR(rc); -} -EXPORT_SYMBOL_GPL(devm_cxl_add_port); - -static struct cxl_dport *find_dport(struct cxl_port *port, int id) -{ - struct cxl_dport *dport; - - device_lock_assert(&port->dev); - list_for_each_entry (dport, &port->dports, list) - if (dport->port_id == id) - return dport; - return NULL; -} - -static int add_dport(struct cxl_port *port, struct cxl_dport *new) -{ - struct cxl_dport *dup; - - device_lock(&port->dev); - dup = find_dport(port, new->port_id); - if (dup) - dev_err(&port->dev, - "unable to add dport%d-%s non-unique port id (%s)\n", - new->port_id, dev_name(new->dport), - dev_name(dup->dport)); - else - list_add_tail(&new->list, &port->dports); - device_unlock(&port->dev); - - return dup ? -EEXIST : 0; -} - -/** - * cxl_add_dport - append downstream port data to a cxl_port - * @port: the cxl_port that references this dport - * @dport_dev: firmware or PCI device representing the dport - * @port_id: identifier for this dport in a decoder's target list - * @component_reg_phys: optional location of CXL component registers - * - * Note that all allocations and links are undone by cxl_port deletion - * and release. - */ -int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id, - resource_size_t component_reg_phys) -{ - char link_name[CXL_TARGET_STRLEN]; - struct cxl_dport *dport; - int rc; - - if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >= - CXL_TARGET_STRLEN) - return -EINVAL; - - dport = kzalloc(sizeof(*dport), GFP_KERNEL); - if (!dport) - return -ENOMEM; - - INIT_LIST_HEAD(&dport->list); - dport->dport = get_device(dport_dev); - dport->port_id = port_id; - dport->component_reg_phys = component_reg_phys; - dport->port = port; - - rc = add_dport(port, dport); - if (rc) - goto err; - - rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name); - if (rc) - goto err; - - return 0; -err: - cxl_dport_release(dport); - return rc; -} -EXPORT_SYMBOL_GPL(cxl_add_dport); - -static struct cxl_decoder * -cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base, - resource_size_t len, int interleave_ways, - int interleave_granularity, enum cxl_decoder_type type, - unsigned long flags) -{ - struct cxl_decoder *cxld; - struct device *dev; - int rc = 0; - - if (interleave_ways < 1) - return ERR_PTR(-EINVAL); - - device_lock(&port->dev); - if (list_empty(&port->dports)) - rc = -EINVAL; - device_unlock(&port->dev); - if (rc) - return ERR_PTR(rc); - - cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL); - if (!cxld) - return ERR_PTR(-ENOMEM); - - rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); - if (rc < 0) - goto err; - - *cxld = (struct cxl_decoder) { - .id = rc, - .range = { - .start = base, - .end = base + len - 1, - }, - .flags = flags, - .interleave_ways = interleave_ways, - .interleave_granularity = interleave_granularity, - .target_type = type, - }; - - /* handle implied target_list */ - if (interleave_ways == 1) - cxld->target[0] = - list_first_entry(&port->dports, struct cxl_dport, list); - dev = &cxld->dev; - device_initialize(dev); - device_set_pm_not_required(dev); - dev->parent = &port->dev; - dev->bus = &cxl_bus_type; - - /* root ports do not have a cxl_port_type parent */ - if (port->dev.parent->type == &cxl_port_type) - dev->type = &cxl_decoder_switch_type; - else - dev->type = &cxl_decoder_root_type; - - return cxld; -err: - kfree(cxld); - return ERR_PTR(rc); -} - -static void unregister_dev(void *dev) -{ - device_unregister(dev); -} - -struct cxl_decoder * -devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, - resource_size_t base, resource_size_t len, - int interleave_ways, int interleave_granularity, - enum cxl_decoder_type type, unsigned long flags) -{ - struct cxl_decoder *cxld; - struct device *dev; - int rc; - - cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways, - interleave_granularity, type, flags); - if (IS_ERR(cxld)) - return cxld; - - dev = &cxld->dev; - rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); - if (rc) - goto err; - - rc = device_add(dev); - if (rc) - goto err; - - rc = devm_add_action_or_reset(host, unregister_dev, dev); - if (rc) - return ERR_PTR(rc); - return cxld; - -err: - put_device(dev); - return ERR_PTR(rc); -} -EXPORT_SYMBOL_GPL(devm_cxl_add_decoder); - -/** - * cxl_probe_component_regs() - Detect CXL Component register blocks - * @dev: Host device of the @base mapping - * @base: Mapping containing the HDM Decoder Capability Header - * @map: Map object describing the register block information found - * - * See CXL 2.0 8.2.4 Component Register Layout and Definition - * See CXL 2.0 8.2.5.5 CXL Device Register Interface - * - * Probe for component register information and return it in map object. - */ -void cxl_probe_component_regs(struct device *dev, void __iomem *base, - struct cxl_component_reg_map *map) -{ - int cap, cap_count; - u64 cap_array; - - *map = (struct cxl_component_reg_map) { 0 }; - - /* - * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in - * CXL 2.0 8.2.4 Table 141. - */ - base += CXL_CM_OFFSET; - - cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET); - - if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { - dev_err(dev, - "Couldn't locate the CXL.cache and CXL.mem capability array header./n"); - return; - } - - /* It's assumed that future versions will be backward compatible */ - cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array); - - for (cap = 1; cap <= cap_count; cap++) { - void __iomem *register_block; - u32 hdr; - int decoder_cnt; - u16 cap_id, offset; - u32 length; - - hdr = readl(base + cap * 0x4); - - cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr); - offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr); - register_block = base + offset; - - switch (cap_id) { - case CXL_CM_CAP_CAP_ID_HDM: - dev_dbg(dev, "found HDM decoder capability (0x%x)\n", - offset); - - hdr = readl(register_block); - - decoder_cnt = cxl_hdm_decoder_count(hdr); - length = 0x20 * decoder_cnt + 0x10; - - map->hdm_decoder.valid = true; - map->hdm_decoder.offset = CXL_CM_OFFSET + offset; - map->hdm_decoder.size = length; - break; - default: - dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id, - offset); - break; - } - } -} -EXPORT_SYMBOL_GPL(cxl_probe_component_regs); - -static void cxl_nvdimm_bridge_release(struct device *dev) -{ - struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); - - kfree(cxl_nvb); -} - -static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = { - &cxl_base_attribute_group, - NULL, -}; - -static const struct device_type cxl_nvdimm_bridge_type = { - .name = "cxl_nvdimm_bridge", - .release = cxl_nvdimm_bridge_release, - .groups = cxl_nvdimm_bridge_attribute_groups, -}; - -struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) -{ - if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type, - "not a cxl_nvdimm_bridge device\n")) - return NULL; - return container_of(dev, struct cxl_nvdimm_bridge, dev); -} -EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge); - -static struct cxl_nvdimm_bridge * -cxl_nvdimm_bridge_alloc(struct cxl_port *port) -{ - struct cxl_nvdimm_bridge *cxl_nvb; - struct device *dev; - - cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL); - if (!cxl_nvb) - return ERR_PTR(-ENOMEM); - - dev = &cxl_nvb->dev; - cxl_nvb->port = port; - cxl_nvb->state = CXL_NVB_NEW; - device_initialize(dev); - device_set_pm_not_required(dev); - dev->parent = &port->dev; - dev->bus = &cxl_bus_type; - dev->type = &cxl_nvdimm_bridge_type; - - return cxl_nvb; -} - -static void unregister_nvb(void *_cxl_nvb) -{ - struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb; - bool flush; - - /* - * If the bridge was ever activated then there might be in-flight state - * work to flush. Once the state has been changed to 'dead' then no new - * work can be queued by user-triggered bind. - */ - device_lock(&cxl_nvb->dev); - flush = cxl_nvb->state != CXL_NVB_NEW; - cxl_nvb->state = CXL_NVB_DEAD; - device_unlock(&cxl_nvb->dev); - - /* - * Even though the device core will trigger device_release_driver() - * before the unregister, it does not know about the fact that - * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver - * release not and flush it before tearing down the nvdimm device - * hierarchy. - */ - device_release_driver(&cxl_nvb->dev); - if (flush) - flush_work(&cxl_nvb->state_work); - device_unregister(&cxl_nvb->dev); -} - -struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, - struct cxl_port *port) -{ - struct cxl_nvdimm_bridge *cxl_nvb; - struct device *dev; - int rc; - - if (!IS_ENABLED(CONFIG_CXL_PMEM)) - return ERR_PTR(-ENXIO); - - cxl_nvb = cxl_nvdimm_bridge_alloc(port); - if (IS_ERR(cxl_nvb)) - return cxl_nvb; - - dev = &cxl_nvb->dev; - rc = dev_set_name(dev, "nvdimm-bridge"); - if (rc) - goto err; - - rc = device_add(dev); - if (rc) - goto err; - - rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb); - if (rc) - return ERR_PTR(rc); - - return cxl_nvb; - -err: - put_device(dev); - return ERR_PTR(rc); -} -EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge); - -static void cxl_nvdimm_release(struct device *dev) -{ - struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); - - kfree(cxl_nvd); -} - -static const struct attribute_group *cxl_nvdimm_attribute_groups[] = { - &cxl_base_attribute_group, - NULL, -}; - -static const struct device_type cxl_nvdimm_type = { - .name = "cxl_nvdimm", - .release = cxl_nvdimm_release, - .groups = cxl_nvdimm_attribute_groups, -}; - -bool is_cxl_nvdimm(struct device *dev) -{ - return dev->type == &cxl_nvdimm_type; -} -EXPORT_SYMBOL_GPL(is_cxl_nvdimm); - -struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) -{ - if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev), - "not a cxl_nvdimm device\n")) - return NULL; - return container_of(dev, struct cxl_nvdimm, dev); -} -EXPORT_SYMBOL_GPL(to_cxl_nvdimm); - -static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) -{ - struct cxl_nvdimm *cxl_nvd; - struct device *dev; - - cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL); - if (!cxl_nvd) - return ERR_PTR(-ENOMEM); - - dev = &cxl_nvd->dev; - cxl_nvd->cxlmd = cxlmd; - device_initialize(dev); - device_set_pm_not_required(dev); - dev->parent = &cxlmd->dev; - dev->bus = &cxl_bus_type; - dev->type = &cxl_nvdimm_type; - - return cxl_nvd; -} - -int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) -{ - struct cxl_nvdimm *cxl_nvd; - struct device *dev; - int rc; - - cxl_nvd = cxl_nvdimm_alloc(cxlmd); - if (IS_ERR(cxl_nvd)) - return PTR_ERR(cxl_nvd); - - dev = &cxl_nvd->dev; - rc = dev_set_name(dev, "pmem%d", cxlmd->id); - if (rc) - goto err; - - rc = device_add(dev); - if (rc) - goto err; - - dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), - dev_name(dev)); - - return devm_add_action_or_reset(host, unregister_dev, dev); - -err: - put_device(dev); - return rc; -} -EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm); - -/** - * cxl_probe_device_regs() - Detect CXL Device register blocks - * @dev: Host device of the @base mapping - * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface - * @map: Map object describing the register block information found - * - * Probe for device register information and return it in map object. - */ -void cxl_probe_device_regs(struct device *dev, void __iomem *base, - struct cxl_device_reg_map *map) -{ - int cap, cap_count; - u64 cap_array; - - *map = (struct cxl_device_reg_map){ 0 }; - - cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET); - if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) != - CXLDEV_CAP_ARRAY_CAP_ID) - return; - - cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array); - - for (cap = 1; cap <= cap_count; cap++) { - u32 offset, length; - u16 cap_id; - - cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK, - readl(base + cap * 0x10)); - offset = readl(base + cap * 0x10 + 0x4); - length = readl(base + cap * 0x10 + 0x8); - - switch (cap_id) { - case CXLDEV_CAP_CAP_ID_DEVICE_STATUS: - dev_dbg(dev, "found Status capability (0x%x)\n", offset); - - map->status.valid = true; - map->status.offset = offset; - map->status.size = length; - break; - case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX: - dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset); - map->mbox.valid = true; - map->mbox.offset = offset; - map->mbox.size = length; - break; - case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX: - dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset); - break; - case CXLDEV_CAP_CAP_ID_MEMDEV: - dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset); - map->memdev.valid = true; - map->memdev.offset = offset; - map->memdev.size = length; - break; - default: - if (cap_id >= 0x8000) - dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset); - else - dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset); - break; - } - } -} -EXPORT_SYMBOL_GPL(cxl_probe_device_regs); - -static void __iomem *devm_cxl_iomap_block(struct device *dev, - resource_size_t addr, - resource_size_t length) -{ - void __iomem *ret_val; - struct resource *res; - - res = devm_request_mem_region(dev, addr, length, dev_name(dev)); - if (!res) { - resource_size_t end = addr + length - 1; - - dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end); - return NULL; - } - - ret_val = devm_ioremap(dev, addr, length); - if (!ret_val) - dev_err(dev, "Failed to map region %pr\n", res); - - return ret_val; -} - -int cxl_map_component_regs(struct pci_dev *pdev, - struct cxl_component_regs *regs, - struct cxl_register_map *map) -{ - struct device *dev = &pdev->dev; - resource_size_t phys_addr; - resource_size_t length; - - phys_addr = pci_resource_start(pdev, map->barno); - phys_addr += map->block_offset; - - phys_addr += map->component_map.hdm_decoder.offset; - length = map->component_map.hdm_decoder.size; - regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length); - if (!regs->hdm_decoder) - return -ENOMEM; - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_map_component_regs); - -int cxl_map_device_regs(struct pci_dev *pdev, - struct cxl_device_regs *regs, - struct cxl_register_map *map) -{ - struct device *dev = &pdev->dev; - resource_size_t phys_addr; - - phys_addr = pci_resource_start(pdev, map->barno); - phys_addr += map->block_offset; - - if (map->device_map.status.valid) { - resource_size_t addr; - resource_size_t length; - - addr = phys_addr + map->device_map.status.offset; - length = map->device_map.status.size; - regs->status = devm_cxl_iomap_block(dev, addr, length); - if (!regs->status) - return -ENOMEM; - } - - if (map->device_map.mbox.valid) { - resource_size_t addr; - resource_size_t length; - - addr = phys_addr + map->device_map.mbox.offset; - length = map->device_map.mbox.size; - regs->mbox = devm_cxl_iomap_block(dev, addr, length); - if (!regs->mbox) - return -ENOMEM; - } - - if (map->device_map.memdev.valid) { - resource_size_t addr; - resource_size_t length; - - addr = phys_addr + map->device_map.memdev.offset; - length = map->device_map.memdev.size; - regs->memdev = devm_cxl_iomap_block(dev, addr, length); - if (!regs->memdev) - return -ENOMEM; - } - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_map_device_regs); - -/** - * __cxl_driver_register - register a driver for the cxl bus - * @cxl_drv: cxl driver structure to attach - * @owner: owning module/driver - * @modname: KBUILD_MODNAME for parent driver - */ -int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, - const char *modname) -{ - if (!cxl_drv->probe) { - pr_debug("%s ->probe() must be specified\n", modname); - return -EINVAL; - } - - if (!cxl_drv->name) { - pr_debug("%s ->name must be specified\n", modname); - return -EINVAL; - } - - if (!cxl_drv->id) { - pr_debug("%s ->id must be specified\n", modname); - return -EINVAL; - } - - cxl_drv->drv.bus = &cxl_bus_type; - cxl_drv->drv.owner = owner; - cxl_drv->drv.mod_name = modname; - cxl_drv->drv.name = cxl_drv->name; - - return driver_register(&cxl_drv->drv); -} -EXPORT_SYMBOL_GPL(__cxl_driver_register); - -void cxl_driver_unregister(struct cxl_driver *cxl_drv) -{ - driver_unregister(&cxl_drv->drv); -} -EXPORT_SYMBOL_GPL(cxl_driver_unregister); - -static int cxl_device_id(struct device *dev) -{ - if (dev->type == &cxl_nvdimm_bridge_type) - return CXL_DEVICE_NVDIMM_BRIDGE; - if (dev->type == &cxl_nvdimm_type) - return CXL_DEVICE_NVDIMM; - return 0; -} - -static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env) -{ - return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT, - cxl_device_id(dev)); -} - -static int cxl_bus_match(struct device *dev, struct device_driver *drv) -{ - return cxl_device_id(dev) == to_cxl_drv(drv)->id; -} - -static int cxl_bus_probe(struct device *dev) -{ - return to_cxl_drv(dev->driver)->probe(dev); -} - -static int cxl_bus_remove(struct device *dev) -{ - struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); - - if (cxl_drv->remove) - cxl_drv->remove(dev); - return 0; -} - -struct bus_type cxl_bus_type = { - .name = "cxl", - .uevent = cxl_bus_uevent, - .match = cxl_bus_match, - .probe = cxl_bus_probe, - .remove = cxl_bus_remove, -}; -EXPORT_SYMBOL_GPL(cxl_bus_type); - -static __init int cxl_core_init(void) -{ - return bus_register(&cxl_bus_type); -} - -static void cxl_core_exit(void) -{ - bus_unregister(&cxl_bus_type); -} - -module_init(cxl_core_init); -module_exit(cxl_core_exit); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile new file mode 100644 index 000000000000..ad137f96e5c8 --- /dev/null +++ b/drivers/cxl/core/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CXL_BUS) += cxl_core.o + +ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl +cxl_core-y := bus.o diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c new file mode 100644 index 000000000000..0815eec23944 --- /dev/null +++ b/drivers/cxl/core/bus.c @@ -0,0 +1,1067 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2020 Intel Corporation. All rights reserved. */ +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * DOC: cxl core + * + * The CXL core provides a sysfs hierarchy for control devices and a rendezvous + * point for cross-device interleave coordination through cxl ports. + */ + +static DEFINE_IDA(cxl_port_ida); + +static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", dev->type->name); +} +static DEVICE_ATTR_RO(devtype); + +static struct attribute *cxl_base_attributes[] = { + &dev_attr_devtype.attr, + NULL, +}; + +static struct attribute_group cxl_base_attribute_group = { + .attrs = cxl_base_attributes, +}; + +static ssize_t start_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_decoder *cxld = to_cxl_decoder(dev); + + return sysfs_emit(buf, "%#llx\n", cxld->range.start); +} +static DEVICE_ATTR_RO(start); + +static ssize_t size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_decoder *cxld = to_cxl_decoder(dev); + + return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range)); +} +static DEVICE_ATTR_RO(size); + +#define CXL_DECODER_FLAG_ATTR(name, flag) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct cxl_decoder *cxld = to_cxl_decoder(dev); \ + \ + return sysfs_emit(buf, "%s\n", \ + (cxld->flags & (flag)) ? "1" : "0"); \ +} \ +static DEVICE_ATTR_RO(name) + +CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM); +CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM); +CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2); +CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3); +CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK); + +static ssize_t target_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_decoder *cxld = to_cxl_decoder(dev); + + switch (cxld->target_type) { + case CXL_DECODER_ACCELERATOR: + return sysfs_emit(buf, "accelerator\n"); + case CXL_DECODER_EXPANDER: + return sysfs_emit(buf, "expander\n"); + } + return -ENXIO; +} +static DEVICE_ATTR_RO(target_type); + +static ssize_t target_list_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_decoder *cxld = to_cxl_decoder(dev); + ssize_t offset = 0; + int i, rc = 0; + + device_lock(dev); + for (i = 0; i < cxld->interleave_ways; i++) { + struct cxl_dport *dport = cxld->target[i]; + struct cxl_dport *next = NULL; + + if (!dport) + break; + + if (i + 1 < cxld->interleave_ways) + next = cxld->target[i + 1]; + rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, + next ? "," : ""); + if (rc < 0) + break; + offset += rc; + } + device_unlock(dev); + + if (rc < 0) + return rc; + + rc = sysfs_emit_at(buf, offset, "\n"); + if (rc < 0) + return rc; + + return offset + rc; +} +static DEVICE_ATTR_RO(target_list); + +static struct attribute *cxl_decoder_base_attrs[] = { + &dev_attr_start.attr, + &dev_attr_size.attr, + &dev_attr_locked.attr, + &dev_attr_target_list.attr, + NULL, +}; + +static struct attribute_group cxl_decoder_base_attribute_group = { + .attrs = cxl_decoder_base_attrs, +}; + +static struct attribute *cxl_decoder_root_attrs[] = { + &dev_attr_cap_pmem.attr, + &dev_attr_cap_ram.attr, + &dev_attr_cap_type2.attr, + &dev_attr_cap_type3.attr, + NULL, +}; + +static struct attribute_group cxl_decoder_root_attribute_group = { + .attrs = cxl_decoder_root_attrs, +}; + +static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { + &cxl_decoder_root_attribute_group, + &cxl_decoder_base_attribute_group, + &cxl_base_attribute_group, + NULL, +}; + +static struct attribute *cxl_decoder_switch_attrs[] = { + &dev_attr_target_type.attr, + NULL, +}; + +static struct attribute_group cxl_decoder_switch_attribute_group = { + .attrs = cxl_decoder_switch_attrs, +}; + +static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { + &cxl_decoder_switch_attribute_group, + &cxl_decoder_base_attribute_group, + &cxl_base_attribute_group, + NULL, +}; + +static void cxl_decoder_release(struct device *dev) +{ + struct cxl_decoder *cxld = to_cxl_decoder(dev); + struct cxl_port *port = to_cxl_port(dev->parent); + + ida_free(&port->decoder_ida, cxld->id); + kfree(cxld); +} + +static const struct device_type cxl_decoder_switch_type = { + .name = "cxl_decoder_switch", + .release = cxl_decoder_release, + .groups = cxl_decoder_switch_attribute_groups, +}; + +static const struct device_type cxl_decoder_root_type = { + .name = "cxl_decoder_root", + .release = cxl_decoder_release, + .groups = cxl_decoder_root_attribute_groups, +}; + +bool is_root_decoder(struct device *dev) +{ + return dev->type == &cxl_decoder_root_type; +} +EXPORT_SYMBOL_GPL(is_root_decoder); + +struct cxl_decoder *to_cxl_decoder(struct device *dev) +{ + if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release, + "not a cxl_decoder device\n")) + return NULL; + return container_of(dev, struct cxl_decoder, dev); +} +EXPORT_SYMBOL_GPL(to_cxl_decoder); + +static void cxl_dport_release(struct cxl_dport *dport) +{ + list_del(&dport->list); + put_device(dport->dport); + kfree(dport); +} + +static void cxl_port_release(struct device *dev) +{ + struct cxl_port *port = to_cxl_port(dev); + struct cxl_dport *dport, *_d; + + device_lock(dev); + list_for_each_entry_safe(dport, _d, &port->dports, list) + cxl_dport_release(dport); + device_unlock(dev); + ida_free(&cxl_port_ida, port->id); + kfree(port); +} + +static const struct attribute_group *cxl_port_attribute_groups[] = { + &cxl_base_attribute_group, + NULL, +}; + +static const struct device_type cxl_port_type = { + .name = "cxl_port", + .release = cxl_port_release, + .groups = cxl_port_attribute_groups, +}; + +struct cxl_port *to_cxl_port(struct device *dev) +{ + if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, + "not a cxl_port device\n")) + return NULL; + return container_of(dev, struct cxl_port, dev); +} + +static void unregister_port(void *_port) +{ + struct cxl_port *port = _port; + struct cxl_dport *dport; + + device_lock(&port->dev); + list_for_each_entry(dport, &port->dports, list) { + char link_name[CXL_TARGET_STRLEN]; + + if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", + dport->port_id) >= CXL_TARGET_STRLEN) + continue; + sysfs_remove_link(&port->dev.kobj, link_name); + } + device_unlock(&port->dev); + device_unregister(&port->dev); +} + +static void cxl_unlink_uport(void *_port) +{ + struct cxl_port *port = _port; + + sysfs_remove_link(&port->dev.kobj, "uport"); +} + +static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) +{ + int rc; + + rc = sysfs_create_link(&port->dev.kobj, &port->uport->kobj, "uport"); + if (rc) + return rc; + return devm_add_action_or_reset(host, cxl_unlink_uport, port); +} + +static struct cxl_port *cxl_port_alloc(struct device *uport, + resource_size_t component_reg_phys, + struct cxl_port *parent_port) +{ + struct cxl_port *port; + struct device *dev; + int rc; + + port = kzalloc(sizeof(*port), GFP_KERNEL); + if (!port) + return ERR_PTR(-ENOMEM); + + rc = ida_alloc(&cxl_port_ida, GFP_KERNEL); + if (rc < 0) + goto err; + port->id = rc; + + /* + * The top-level cxl_port "cxl_root" does not have a cxl_port as + * its parent and it does not have any corresponding component + * registers as its decode is described by a fixed platform + * description. + */ + dev = &port->dev; + if (parent_port) + dev->parent = &parent_port->dev; + else + dev->parent = uport; + + port->uport = uport; + port->component_reg_phys = component_reg_phys; + ida_init(&port->decoder_ida); + INIT_LIST_HEAD(&port->dports); + + device_initialize(dev); + device_set_pm_not_required(dev); + dev->bus = &cxl_bus_type; + dev->type = &cxl_port_type; + + return port; + +err: + kfree(port); + return ERR_PTR(rc); +} + +/** + * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy + * @host: host device for devm operations + * @uport: "physical" device implementing this upstream port + * @component_reg_phys: (optional) for configurable cxl_port instances + * @parent_port: next hop up in the CXL memory decode hierarchy + */ +struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, + resource_size_t component_reg_phys, + struct cxl_port *parent_port) +{ + struct cxl_port *port; + struct device *dev; + int rc; + + port = cxl_port_alloc(uport, component_reg_phys, parent_port); + if (IS_ERR(port)) + return port; + + dev = &port->dev; + if (parent_port) + rc = dev_set_name(dev, "port%d", port->id); + else + rc = dev_set_name(dev, "root%d", port->id); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + rc = devm_add_action_or_reset(host, unregister_port, port); + if (rc) + return ERR_PTR(rc); + + rc = devm_cxl_link_uport(host, port); + if (rc) + return ERR_PTR(rc); + + return port; + +err: + put_device(dev); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(devm_cxl_add_port); + +static struct cxl_dport *find_dport(struct cxl_port *port, int id) +{ + struct cxl_dport *dport; + + device_lock_assert(&port->dev); + list_for_each_entry (dport, &port->dports, list) + if (dport->port_id == id) + return dport; + return NULL; +} + +static int add_dport(struct cxl_port *port, struct cxl_dport *new) +{ + struct cxl_dport *dup; + + device_lock(&port->dev); + dup = find_dport(port, new->port_id); + if (dup) + dev_err(&port->dev, + "unable to add dport%d-%s non-unique port id (%s)\n", + new->port_id, dev_name(new->dport), + dev_name(dup->dport)); + else + list_add_tail(&new->list, &port->dports); + device_unlock(&port->dev); + + return dup ? -EEXIST : 0; +} + +/** + * cxl_add_dport - append downstream port data to a cxl_port + * @port: the cxl_port that references this dport + * @dport_dev: firmware or PCI device representing the dport + * @port_id: identifier for this dport in a decoder's target list + * @component_reg_phys: optional location of CXL component registers + * + * Note that all allocations and links are undone by cxl_port deletion + * and release. + */ +int cxl_add_dport(struct cxl_port *port, struct device *dport_dev, int port_id, + resource_size_t component_reg_phys) +{ + char link_name[CXL_TARGET_STRLEN]; + struct cxl_dport *dport; + int rc; + + if (snprintf(link_name, CXL_TARGET_STRLEN, "dport%d", port_id) >= + CXL_TARGET_STRLEN) + return -EINVAL; + + dport = kzalloc(sizeof(*dport), GFP_KERNEL); + if (!dport) + return -ENOMEM; + + INIT_LIST_HEAD(&dport->list); + dport->dport = get_device(dport_dev); + dport->port_id = port_id; + dport->component_reg_phys = component_reg_phys; + dport->port = port; + + rc = add_dport(port, dport); + if (rc) + goto err; + + rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name); + if (rc) + goto err; + + return 0; +err: + cxl_dport_release(dport); + return rc; +} +EXPORT_SYMBOL_GPL(cxl_add_dport); + +static struct cxl_decoder * +cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base, + resource_size_t len, int interleave_ways, + int interleave_granularity, enum cxl_decoder_type type, + unsigned long flags) +{ + struct cxl_decoder *cxld; + struct device *dev; + int rc = 0; + + if (interleave_ways < 1) + return ERR_PTR(-EINVAL); + + device_lock(&port->dev); + if (list_empty(&port->dports)) + rc = -EINVAL; + device_unlock(&port->dev); + if (rc) + return ERR_PTR(rc); + + cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL); + if (!cxld) + return ERR_PTR(-ENOMEM); + + rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); + if (rc < 0) + goto err; + + *cxld = (struct cxl_decoder) { + .id = rc, + .range = { + .start = base, + .end = base + len - 1, + }, + .flags = flags, + .interleave_ways = interleave_ways, + .interleave_granularity = interleave_granularity, + .target_type = type, + }; + + /* handle implied target_list */ + if (interleave_ways == 1) + cxld->target[0] = + list_first_entry(&port->dports, struct cxl_dport, list); + dev = &cxld->dev; + device_initialize(dev); + device_set_pm_not_required(dev); + dev->parent = &port->dev; + dev->bus = &cxl_bus_type; + + /* root ports do not have a cxl_port_type parent */ + if (port->dev.parent->type == &cxl_port_type) + dev->type = &cxl_decoder_switch_type; + else + dev->type = &cxl_decoder_root_type; + + return cxld; +err: + kfree(cxld); + return ERR_PTR(rc); +} + +static void unregister_dev(void *dev) +{ + device_unregister(dev); +} + +struct cxl_decoder * +devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, + resource_size_t base, resource_size_t len, + int interleave_ways, int interleave_granularity, + enum cxl_decoder_type type, unsigned long flags) +{ + struct cxl_decoder *cxld; + struct device *dev; + int rc; + + cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways, + interleave_granularity, type, flags); + if (IS_ERR(cxld)) + return cxld; + + dev = &cxld->dev; + rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + rc = devm_add_action_or_reset(host, unregister_dev, dev); + if (rc) + return ERR_PTR(rc); + return cxld; + +err: + put_device(dev); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(devm_cxl_add_decoder); + +/** + * cxl_probe_component_regs() - Detect CXL Component register blocks + * @dev: Host device of the @base mapping + * @base: Mapping containing the HDM Decoder Capability Header + * @map: Map object describing the register block information found + * + * See CXL 2.0 8.2.4 Component Register Layout and Definition + * See CXL 2.0 8.2.5.5 CXL Device Register Interface + * + * Probe for component register information and return it in map object. + */ +void cxl_probe_component_regs(struct device *dev, void __iomem *base, + struct cxl_component_reg_map *map) +{ + int cap, cap_count; + u64 cap_array; + + *map = (struct cxl_component_reg_map) { 0 }; + + /* + * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in + * CXL 2.0 8.2.4 Table 141. + */ + base += CXL_CM_OFFSET; + + cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET); + + if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { + dev_err(dev, + "Couldn't locate the CXL.cache and CXL.mem capability array header./n"); + return; + } + + /* It's assumed that future versions will be backward compatible */ + cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array); + + for (cap = 1; cap <= cap_count; cap++) { + void __iomem *register_block; + u32 hdr; + int decoder_cnt; + u16 cap_id, offset; + u32 length; + + hdr = readl(base + cap * 0x4); + + cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr); + offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr); + register_block = base + offset; + + switch (cap_id) { + case CXL_CM_CAP_CAP_ID_HDM: + dev_dbg(dev, "found HDM decoder capability (0x%x)\n", + offset); + + hdr = readl(register_block); + + decoder_cnt = cxl_hdm_decoder_count(hdr); + length = 0x20 * decoder_cnt + 0x10; + + map->hdm_decoder.valid = true; + map->hdm_decoder.offset = CXL_CM_OFFSET + offset; + map->hdm_decoder.size = length; + break; + default: + dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id, + offset); + break; + } + } +} +EXPORT_SYMBOL_GPL(cxl_probe_component_regs); + +static void cxl_nvdimm_bridge_release(struct device *dev) +{ + struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); + + kfree(cxl_nvb); +} + +static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = { + &cxl_base_attribute_group, + NULL, +}; + +static const struct device_type cxl_nvdimm_bridge_type = { + .name = "cxl_nvdimm_bridge", + .release = cxl_nvdimm_bridge_release, + .groups = cxl_nvdimm_bridge_attribute_groups, +}; + +struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) +{ + if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type, + "not a cxl_nvdimm_bridge device\n")) + return NULL; + return container_of(dev, struct cxl_nvdimm_bridge, dev); +} +EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge); + +static struct cxl_nvdimm_bridge * +cxl_nvdimm_bridge_alloc(struct cxl_port *port) +{ + struct cxl_nvdimm_bridge *cxl_nvb; + struct device *dev; + + cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL); + if (!cxl_nvb) + return ERR_PTR(-ENOMEM); + + dev = &cxl_nvb->dev; + cxl_nvb->port = port; + cxl_nvb->state = CXL_NVB_NEW; + device_initialize(dev); + device_set_pm_not_required(dev); + dev->parent = &port->dev; + dev->bus = &cxl_bus_type; + dev->type = &cxl_nvdimm_bridge_type; + + return cxl_nvb; +} + +static void unregister_nvb(void *_cxl_nvb) +{ + struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb; + bool flush; + + /* + * If the bridge was ever activated then there might be in-flight state + * work to flush. Once the state has been changed to 'dead' then no new + * work can be queued by user-triggered bind. + */ + device_lock(&cxl_nvb->dev); + flush = cxl_nvb->state != CXL_NVB_NEW; + cxl_nvb->state = CXL_NVB_DEAD; + device_unlock(&cxl_nvb->dev); + + /* + * Even though the device core will trigger device_release_driver() + * before the unregister, it does not know about the fact that + * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver + * release not and flush it before tearing down the nvdimm device + * hierarchy. + */ + device_release_driver(&cxl_nvb->dev); + if (flush) + flush_work(&cxl_nvb->state_work); + device_unregister(&cxl_nvb->dev); +} + +struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, + struct cxl_port *port) +{ + struct cxl_nvdimm_bridge *cxl_nvb; + struct device *dev; + int rc; + + if (!IS_ENABLED(CONFIG_CXL_PMEM)) + return ERR_PTR(-ENXIO); + + cxl_nvb = cxl_nvdimm_bridge_alloc(port); + if (IS_ERR(cxl_nvb)) + return cxl_nvb; + + dev = &cxl_nvb->dev; + rc = dev_set_name(dev, "nvdimm-bridge"); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb); + if (rc) + return ERR_PTR(rc); + + return cxl_nvb; + +err: + put_device(dev); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge); + +static void cxl_nvdimm_release(struct device *dev) +{ + struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); + + kfree(cxl_nvd); +} + +static const struct attribute_group *cxl_nvdimm_attribute_groups[] = { + &cxl_base_attribute_group, + NULL, +}; + +static const struct device_type cxl_nvdimm_type = { + .name = "cxl_nvdimm", + .release = cxl_nvdimm_release, + .groups = cxl_nvdimm_attribute_groups, +}; + +bool is_cxl_nvdimm(struct device *dev) +{ + return dev->type == &cxl_nvdimm_type; +} +EXPORT_SYMBOL_GPL(is_cxl_nvdimm); + +struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) +{ + if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev), + "not a cxl_nvdimm device\n")) + return NULL; + return container_of(dev, struct cxl_nvdimm, dev); +} +EXPORT_SYMBOL_GPL(to_cxl_nvdimm); + +static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) +{ + struct cxl_nvdimm *cxl_nvd; + struct device *dev; + + cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL); + if (!cxl_nvd) + return ERR_PTR(-ENOMEM); + + dev = &cxl_nvd->dev; + cxl_nvd->cxlmd = cxlmd; + device_initialize(dev); + device_set_pm_not_required(dev); + dev->parent = &cxlmd->dev; + dev->bus = &cxl_bus_type; + dev->type = &cxl_nvdimm_type; + + return cxl_nvd; +} + +int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) +{ + struct cxl_nvdimm *cxl_nvd; + struct device *dev; + int rc; + + cxl_nvd = cxl_nvdimm_alloc(cxlmd); + if (IS_ERR(cxl_nvd)) + return PTR_ERR(cxl_nvd); + + dev = &cxl_nvd->dev; + rc = dev_set_name(dev, "pmem%d", cxlmd->id); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), + dev_name(dev)); + + return devm_add_action_or_reset(host, unregister_dev, dev); + +err: + put_device(dev); + return rc; +} +EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm); + +/** + * cxl_probe_device_regs() - Detect CXL Device register blocks + * @dev: Host device of the @base mapping + * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface + * @map: Map object describing the register block information found + * + * Probe for device register information and return it in map object. + */ +void cxl_probe_device_regs(struct device *dev, void __iomem *base, + struct cxl_device_reg_map *map) +{ + int cap, cap_count; + u64 cap_array; + + *map = (struct cxl_device_reg_map){ 0 }; + + cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET); + if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) != + CXLDEV_CAP_ARRAY_CAP_ID) + return; + + cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array); + + for (cap = 1; cap <= cap_count; cap++) { + u32 offset, length; + u16 cap_id; + + cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK, + readl(base + cap * 0x10)); + offset = readl(base + cap * 0x10 + 0x4); + length = readl(base + cap * 0x10 + 0x8); + + switch (cap_id) { + case CXLDEV_CAP_CAP_ID_DEVICE_STATUS: + dev_dbg(dev, "found Status capability (0x%x)\n", offset); + + map->status.valid = true; + map->status.offset = offset; + map->status.size = length; + break; + case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX: + dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset); + map->mbox.valid = true; + map->mbox.offset = offset; + map->mbox.size = length; + break; + case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX: + dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset); + break; + case CXLDEV_CAP_CAP_ID_MEMDEV: + dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset); + map->memdev.valid = true; + map->memdev.offset = offset; + map->memdev.size = length; + break; + default: + if (cap_id >= 0x8000) + dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset); + else + dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset); + break; + } + } +} +EXPORT_SYMBOL_GPL(cxl_probe_device_regs); + +static void __iomem *devm_cxl_iomap_block(struct device *dev, + resource_size_t addr, + resource_size_t length) +{ + void __iomem *ret_val; + struct resource *res; + + res = devm_request_mem_region(dev, addr, length, dev_name(dev)); + if (!res) { + resource_size_t end = addr + length - 1; + + dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end); + return NULL; + } + + ret_val = devm_ioremap(dev, addr, length); + if (!ret_val) + dev_err(dev, "Failed to map region %pr\n", res); + + return ret_val; +} + +int cxl_map_component_regs(struct pci_dev *pdev, + struct cxl_component_regs *regs, + struct cxl_register_map *map) +{ + struct device *dev = &pdev->dev; + resource_size_t phys_addr; + resource_size_t length; + + phys_addr = pci_resource_start(pdev, map->barno); + phys_addr += map->block_offset; + + phys_addr += map->component_map.hdm_decoder.offset; + length = map->component_map.hdm_decoder.size; + regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length); + if (!regs->hdm_decoder) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_map_component_regs); + +int cxl_map_device_regs(struct pci_dev *pdev, + struct cxl_device_regs *regs, + struct cxl_register_map *map) +{ + struct device *dev = &pdev->dev; + resource_size_t phys_addr; + + phys_addr = pci_resource_start(pdev, map->barno); + phys_addr += map->block_offset; + + if (map->device_map.status.valid) { + resource_size_t addr; + resource_size_t length; + + addr = phys_addr + map->device_map.status.offset; + length = map->device_map.status.size; + regs->status = devm_cxl_iomap_block(dev, addr, length); + if (!regs->status) + return -ENOMEM; + } + + if (map->device_map.mbox.valid) { + resource_size_t addr; + resource_size_t length; + + addr = phys_addr + map->device_map.mbox.offset; + length = map->device_map.mbox.size; + regs->mbox = devm_cxl_iomap_block(dev, addr, length); + if (!regs->mbox) + return -ENOMEM; + } + + if (map->device_map.memdev.valid) { + resource_size_t addr; + resource_size_t length; + + addr = phys_addr + map->device_map.memdev.offset; + length = map->device_map.memdev.size; + regs->memdev = devm_cxl_iomap_block(dev, addr, length); + if (!regs->memdev) + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_map_device_regs); + +/** + * __cxl_driver_register - register a driver for the cxl bus + * @cxl_drv: cxl driver structure to attach + * @owner: owning module/driver + * @modname: KBUILD_MODNAME for parent driver + */ +int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, + const char *modname) +{ + if (!cxl_drv->probe) { + pr_debug("%s ->probe() must be specified\n", modname); + return -EINVAL; + } + + if (!cxl_drv->name) { + pr_debug("%s ->name must be specified\n", modname); + return -EINVAL; + } + + if (!cxl_drv->id) { + pr_debug("%s ->id must be specified\n", modname); + return -EINVAL; + } + + cxl_drv->drv.bus = &cxl_bus_type; + cxl_drv->drv.owner = owner; + cxl_drv->drv.mod_name = modname; + cxl_drv->drv.name = cxl_drv->name; + + return driver_register(&cxl_drv->drv); +} +EXPORT_SYMBOL_GPL(__cxl_driver_register); + +void cxl_driver_unregister(struct cxl_driver *cxl_drv) +{ + driver_unregister(&cxl_drv->drv); +} +EXPORT_SYMBOL_GPL(cxl_driver_unregister); + +static int cxl_device_id(struct device *dev) +{ + if (dev->type == &cxl_nvdimm_bridge_type) + return CXL_DEVICE_NVDIMM_BRIDGE; + if (dev->type == &cxl_nvdimm_type) + return CXL_DEVICE_NVDIMM; + return 0; +} + +static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT, + cxl_device_id(dev)); +} + +static int cxl_bus_match(struct device *dev, struct device_driver *drv) +{ + return cxl_device_id(dev) == to_cxl_drv(drv)->id; +} + +static int cxl_bus_probe(struct device *dev) +{ + return to_cxl_drv(dev->driver)->probe(dev); +} + +static int cxl_bus_remove(struct device *dev) +{ + struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); + + if (cxl_drv->remove) + cxl_drv->remove(dev); + return 0; +} + +struct bus_type cxl_bus_type = { + .name = "cxl", + .uevent = cxl_bus_uevent, + .match = cxl_bus_match, + .probe = cxl_bus_probe, + .remove = cxl_bus_remove, +}; +EXPORT_SYMBOL_GPL(cxl_bus_type); + +static __init int cxl_core_init(void) +{ + return bus_register(&cxl_bus_type); +} + +static void cxl_core_exit(void) +{ + bus_unregister(&cxl_bus_type); +} + +module_init(cxl_core_init); +module_exit(cxl_core_exit); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h new file mode 100644 index 000000000000..8f02d02b26b4 --- /dev/null +++ b/drivers/cxl/cxlmem.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2020-2021 Intel Corporation. */ +#ifndef __CXL_MEM_H__ +#define __CXL_MEM_H__ +#include +#include "cxl.h" + +/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */ +#define CXLMDEV_STATUS_OFFSET 0x0 +#define CXLMDEV_DEV_FATAL BIT(0) +#define CXLMDEV_FW_HALT BIT(1) +#define CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2) +#define CXLMDEV_MS_NOT_READY 0 +#define CXLMDEV_MS_READY 1 +#define CXLMDEV_MS_ERROR 2 +#define CXLMDEV_MS_DISABLED 3 +#define CXLMDEV_READY(status) \ + (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) == \ + CXLMDEV_MS_READY) +#define CXLMDEV_MBOX_IF_READY BIT(4) +#define CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5) +#define CXLMDEV_RESET_NEEDED_NOT 0 +#define CXLMDEV_RESET_NEEDED_COLD 1 +#define CXLMDEV_RESET_NEEDED_WARM 2 +#define CXLMDEV_RESET_NEEDED_HOT 3 +#define CXLMDEV_RESET_NEEDED_CXL 4 +#define CXLMDEV_RESET_NEEDED(status) \ + (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \ + CXLMDEV_RESET_NEEDED_NOT) + +/* + * An entire PCI topology full of devices should be enough for any + * config + */ +#define CXL_MEM_MAX_DEVS 65536 + +/** + * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device + * @dev: driver core device object + * @cdev: char dev core object for ioctl operations + * @cxlm: pointer to the parent device driver data + * @id: id number of this memdev instance. + */ +struct cxl_memdev { + struct device dev; + struct cdev cdev; + struct cxl_mem *cxlm; + int id; +}; + +/** + * struct cxl_mem - A CXL memory device + * @pdev: The PCI device associated with this CXL device. + * @cxlmd: Logical memory device chardev / interface + * @regs: Parsed register blocks + * @payload_size: Size of space for payload + * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register) + * @lsa_size: Size of Label Storage Area + * (CXL 2.0 8.2.9.5.1.1 Identify Memory Device) + * @mbox_mutex: Mutex to synchronize mailbox access. + * @firmware_version: Firmware version for the memory device. + * @enabled_cmds: Hardware commands found enabled in CEL. + * @pmem_range: Persistent memory capacity information. + * @ram_range: Volatile memory capacity information. + */ +struct cxl_mem { + struct pci_dev *pdev; + struct cxl_memdev *cxlmd; + + struct cxl_regs regs; + + size_t payload_size; + size_t lsa_size; + struct mutex mbox_mutex; /* Protects device mailbox and firmware */ + char firmware_version[0x10]; + unsigned long *enabled_cmds; + + struct range pmem_range; + struct range ram_range; +}; +#endif /* __CXL_MEM_H__ */ diff --git a/drivers/cxl/mem.h b/drivers/cxl/mem.h deleted file mode 100644 index 8f02d02b26b4..000000000000 --- a/drivers/cxl/mem.h +++ /dev/null @@ -1,81 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright(c) 2020-2021 Intel Corporation. */ -#ifndef __CXL_MEM_H__ -#define __CXL_MEM_H__ -#include -#include "cxl.h" - -/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */ -#define CXLMDEV_STATUS_OFFSET 0x0 -#define CXLMDEV_DEV_FATAL BIT(0) -#define CXLMDEV_FW_HALT BIT(1) -#define CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2) -#define CXLMDEV_MS_NOT_READY 0 -#define CXLMDEV_MS_READY 1 -#define CXLMDEV_MS_ERROR 2 -#define CXLMDEV_MS_DISABLED 3 -#define CXLMDEV_READY(status) \ - (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) == \ - CXLMDEV_MS_READY) -#define CXLMDEV_MBOX_IF_READY BIT(4) -#define CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5) -#define CXLMDEV_RESET_NEEDED_NOT 0 -#define CXLMDEV_RESET_NEEDED_COLD 1 -#define CXLMDEV_RESET_NEEDED_WARM 2 -#define CXLMDEV_RESET_NEEDED_HOT 3 -#define CXLMDEV_RESET_NEEDED_CXL 4 -#define CXLMDEV_RESET_NEEDED(status) \ - (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \ - CXLMDEV_RESET_NEEDED_NOT) - -/* - * An entire PCI topology full of devices should be enough for any - * config - */ -#define CXL_MEM_MAX_DEVS 65536 - -/** - * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device - * @dev: driver core device object - * @cdev: char dev core object for ioctl operations - * @cxlm: pointer to the parent device driver data - * @id: id number of this memdev instance. - */ -struct cxl_memdev { - struct device dev; - struct cdev cdev; - struct cxl_mem *cxlm; - int id; -}; - -/** - * struct cxl_mem - A CXL memory device - * @pdev: The PCI device associated with this CXL device. - * @cxlmd: Logical memory device chardev / interface - * @regs: Parsed register blocks - * @payload_size: Size of space for payload - * (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register) - * @lsa_size: Size of Label Storage Area - * (CXL 2.0 8.2.9.5.1.1 Identify Memory Device) - * @mbox_mutex: Mutex to synchronize mailbox access. - * @firmware_version: Firmware version for the memory device. - * @enabled_cmds: Hardware commands found enabled in CEL. - * @pmem_range: Persistent memory capacity information. - * @ram_range: Volatile memory capacity information. - */ -struct cxl_mem { - struct pci_dev *pdev; - struct cxl_memdev *cxlmd; - - struct cxl_regs regs; - - size_t payload_size; - size_t lsa_size; - struct mutex mbox_mutex; /* Protects device mailbox and firmware */ - char firmware_version[0x10]; - unsigned long *enabled_cmds; - - struct range pmem_range; - struct range ram_range; -}; -#endif /* __CXL_MEM_H__ */ diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 4cf351a3cf99..a945c5fda292 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -12,9 +12,9 @@ #include #include #include +#include "cxlmem.h" #include "pci.h" #include "cxl.h" -#include "mem.h" /** * DOC: cxl pci diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 0088e41dd2f3..9652c3ee41e7 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -6,7 +6,7 @@ #include #include #include -#include "mem.h" +#include "cxlmem.h" #include "cxl.h" /* -- cgit v1.2.3 From 95aaed266801a801add6d17cd3a4f7deb610af2e Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 2 Aug 2021 10:29:43 -0700 Subject: cxl/core: Improve CXL core kernel docs Now that CXL core's role is well understood, the documentation should reflect that information. Signed-off-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162792538379.368511.9055351193841619781.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/bus.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 0815eec23944..6ea69d70086b 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -12,8 +12,15 @@ /** * DOC: cxl core * - * The CXL core provides a sysfs hierarchy for control devices and a rendezvous - * point for cross-device interleave coordination through cxl ports. + * The CXL core provides a set of interfaces that can be consumed by CXL aware + * drivers. The interfaces allow for creation, modification, and destruction of + * regions, memory devices, ports, and decoders. CXL aware drivers must register + * with the CXL core via these interfaces in order to be able to participate in + * cross-device interleave coordination. The CXL core also establishes and + * maintains the bridge to the nvdimm subsystem. + * + * CXL core introduces sysfs hierarchy to control the devices that are + * instantiated by the core. */ static DEFINE_IDA(cxl_port_ida); -- cgit v1.2.3 From 06737cd0d216be1cf6e8052e4fca0d391298f184 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 2 Aug 2021 10:29:49 -0700 Subject: cxl/core: Move pmem functionality Refactor the pmem / nvdimm-bridge functionality from core/bus.c to core/pmem.c. Introduce drivers/core/core.h to communicate data structures and helpers between the core bus and other functionality that registers devices on the bus. Signed-off-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162792538899.368511.3881663908293411300.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- Documentation/driver-api/cxl/memory-devices.rst | 3 + drivers/cxl/core/Makefile | 1 + drivers/cxl/core/bus.c | 205 +----------------------- drivers/cxl/core/core.h | 17 ++ drivers/cxl/core/pmem.c | 204 +++++++++++++++++++++++ 5 files changed, 228 insertions(+), 202 deletions(-) create mode 100644 drivers/cxl/core/core.h create mode 100644 drivers/cxl/core/pmem.c (limited to 'drivers') diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst index a86e2c7c551a..e65c0ba82229 100644 --- a/Documentation/driver-api/cxl/memory-devices.rst +++ b/Documentation/driver-api/cxl/memory-devices.rst @@ -39,6 +39,9 @@ CXL Core .. kernel-doc:: drivers/cxl/core/bus.c :doc: cxl core +.. kernel-doc:: drivers/cxl/core/pmem.c + :internal: + External Interfaces =================== diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile index ad137f96e5c8..e037521fe02b 100644 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_CXL_BUS) += cxl_core.o ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl cxl_core-y := bus.o +cxl_core-y += pmem.o diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 6ea69d70086b..408654ad70db 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -8,6 +8,7 @@ #include #include #include +#include "core.h" /** * DOC: cxl core @@ -37,7 +38,7 @@ static struct attribute *cxl_base_attributes[] = { NULL, }; -static struct attribute_group cxl_base_attribute_group = { +struct attribute_group cxl_base_attribute_group = { .attrs = cxl_base_attributes, }; @@ -514,11 +515,6 @@ err: return ERR_PTR(rc); } -static void unregister_dev(void *dev) -{ - device_unregister(dev); -} - struct cxl_decoder * devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, resource_size_t base, resource_size_t len, @@ -543,7 +539,7 @@ devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets, if (rc) goto err; - rc = devm_add_action_or_reset(host, unregister_dev, dev); + rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev); if (rc) return ERR_PTR(rc); return cxld; @@ -626,201 +622,6 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, } EXPORT_SYMBOL_GPL(cxl_probe_component_regs); -static void cxl_nvdimm_bridge_release(struct device *dev) -{ - struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); - - kfree(cxl_nvb); -} - -static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = { - &cxl_base_attribute_group, - NULL, -}; - -static const struct device_type cxl_nvdimm_bridge_type = { - .name = "cxl_nvdimm_bridge", - .release = cxl_nvdimm_bridge_release, - .groups = cxl_nvdimm_bridge_attribute_groups, -}; - -struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) -{ - if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type, - "not a cxl_nvdimm_bridge device\n")) - return NULL; - return container_of(dev, struct cxl_nvdimm_bridge, dev); -} -EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge); - -static struct cxl_nvdimm_bridge * -cxl_nvdimm_bridge_alloc(struct cxl_port *port) -{ - struct cxl_nvdimm_bridge *cxl_nvb; - struct device *dev; - - cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL); - if (!cxl_nvb) - return ERR_PTR(-ENOMEM); - - dev = &cxl_nvb->dev; - cxl_nvb->port = port; - cxl_nvb->state = CXL_NVB_NEW; - device_initialize(dev); - device_set_pm_not_required(dev); - dev->parent = &port->dev; - dev->bus = &cxl_bus_type; - dev->type = &cxl_nvdimm_bridge_type; - - return cxl_nvb; -} - -static void unregister_nvb(void *_cxl_nvb) -{ - struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb; - bool flush; - - /* - * If the bridge was ever activated then there might be in-flight state - * work to flush. Once the state has been changed to 'dead' then no new - * work can be queued by user-triggered bind. - */ - device_lock(&cxl_nvb->dev); - flush = cxl_nvb->state != CXL_NVB_NEW; - cxl_nvb->state = CXL_NVB_DEAD; - device_unlock(&cxl_nvb->dev); - - /* - * Even though the device core will trigger device_release_driver() - * before the unregister, it does not know about the fact that - * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver - * release not and flush it before tearing down the nvdimm device - * hierarchy. - */ - device_release_driver(&cxl_nvb->dev); - if (flush) - flush_work(&cxl_nvb->state_work); - device_unregister(&cxl_nvb->dev); -} - -struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, - struct cxl_port *port) -{ - struct cxl_nvdimm_bridge *cxl_nvb; - struct device *dev; - int rc; - - if (!IS_ENABLED(CONFIG_CXL_PMEM)) - return ERR_PTR(-ENXIO); - - cxl_nvb = cxl_nvdimm_bridge_alloc(port); - if (IS_ERR(cxl_nvb)) - return cxl_nvb; - - dev = &cxl_nvb->dev; - rc = dev_set_name(dev, "nvdimm-bridge"); - if (rc) - goto err; - - rc = device_add(dev); - if (rc) - goto err; - - rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb); - if (rc) - return ERR_PTR(rc); - - return cxl_nvb; - -err: - put_device(dev); - return ERR_PTR(rc); -} -EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge); - -static void cxl_nvdimm_release(struct device *dev) -{ - struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); - - kfree(cxl_nvd); -} - -static const struct attribute_group *cxl_nvdimm_attribute_groups[] = { - &cxl_base_attribute_group, - NULL, -}; - -static const struct device_type cxl_nvdimm_type = { - .name = "cxl_nvdimm", - .release = cxl_nvdimm_release, - .groups = cxl_nvdimm_attribute_groups, -}; - -bool is_cxl_nvdimm(struct device *dev) -{ - return dev->type == &cxl_nvdimm_type; -} -EXPORT_SYMBOL_GPL(is_cxl_nvdimm); - -struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) -{ - if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev), - "not a cxl_nvdimm device\n")) - return NULL; - return container_of(dev, struct cxl_nvdimm, dev); -} -EXPORT_SYMBOL_GPL(to_cxl_nvdimm); - -static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) -{ - struct cxl_nvdimm *cxl_nvd; - struct device *dev; - - cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL); - if (!cxl_nvd) - return ERR_PTR(-ENOMEM); - - dev = &cxl_nvd->dev; - cxl_nvd->cxlmd = cxlmd; - device_initialize(dev); - device_set_pm_not_required(dev); - dev->parent = &cxlmd->dev; - dev->bus = &cxl_bus_type; - dev->type = &cxl_nvdimm_type; - - return cxl_nvd; -} - -int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) -{ - struct cxl_nvdimm *cxl_nvd; - struct device *dev; - int rc; - - cxl_nvd = cxl_nvdimm_alloc(cxlmd); - if (IS_ERR(cxl_nvd)) - return PTR_ERR(cxl_nvd); - - dev = &cxl_nvd->dev; - rc = dev_set_name(dev, "pmem%d", cxlmd->id); - if (rc) - goto err; - - rc = device_add(dev); - if (rc) - goto err; - - dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), - dev_name(dev)); - - return devm_add_action_or_reset(host, unregister_dev, dev); - -err: - put_device(dev); - return rc; -} -EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm); - /** * cxl_probe_device_regs() - Detect CXL Device register blocks * @dev: Host device of the @base mapping diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h new file mode 100644 index 000000000000..49045daf8bd7 --- /dev/null +++ b/drivers/cxl/core/core.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2020 Intel Corporation. */ + +#ifndef __CXL_CORE_H__ +#define __CXL_CORE_H__ + +extern const struct device_type cxl_nvdimm_bridge_type; +extern const struct device_type cxl_nvdimm_type; + +extern struct attribute_group cxl_base_attribute_group; + +static inline void unregister_cxl_dev(void *dev) +{ + device_unregister(dev); +} + +#endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c new file mode 100644 index 000000000000..69c97cc0d945 --- /dev/null +++ b/drivers/cxl/core/pmem.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2020 Intel Corporation. */ + +#include +#include +#include +#include + +#include "core.h" + +static void cxl_nvdimm_bridge_release(struct device *dev) +{ + struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); + + kfree(cxl_nvb); +} + +static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = { + &cxl_base_attribute_group, + NULL, +}; + +const struct device_type cxl_nvdimm_bridge_type = { + .name = "cxl_nvdimm_bridge", + .release = cxl_nvdimm_bridge_release, + .groups = cxl_nvdimm_bridge_attribute_groups, +}; + +struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) +{ + if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type, + "not a cxl_nvdimm_bridge device\n")) + return NULL; + return container_of(dev, struct cxl_nvdimm_bridge, dev); +} +EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge); + +static struct cxl_nvdimm_bridge * +cxl_nvdimm_bridge_alloc(struct cxl_port *port) +{ + struct cxl_nvdimm_bridge *cxl_nvb; + struct device *dev; + + cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL); + if (!cxl_nvb) + return ERR_PTR(-ENOMEM); + + dev = &cxl_nvb->dev; + cxl_nvb->port = port; + cxl_nvb->state = CXL_NVB_NEW; + device_initialize(dev); + device_set_pm_not_required(dev); + dev->parent = &port->dev; + dev->bus = &cxl_bus_type; + dev->type = &cxl_nvdimm_bridge_type; + + return cxl_nvb; +} + +static void unregister_nvb(void *_cxl_nvb) +{ + struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb; + bool flush; + + /* + * If the bridge was ever activated then there might be in-flight state + * work to flush. Once the state has been changed to 'dead' then no new + * work can be queued by user-triggered bind. + */ + device_lock(&cxl_nvb->dev); + flush = cxl_nvb->state != CXL_NVB_NEW; + cxl_nvb->state = CXL_NVB_DEAD; + device_unlock(&cxl_nvb->dev); + + /* + * Even though the device core will trigger device_release_driver() + * before the unregister, it does not know about the fact that + * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver + * release not and flush it before tearing down the nvdimm device + * hierarchy. + */ + device_release_driver(&cxl_nvb->dev); + if (flush) + flush_work(&cxl_nvb->state_work); + device_unregister(&cxl_nvb->dev); +} + +struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, + struct cxl_port *port) +{ + struct cxl_nvdimm_bridge *cxl_nvb; + struct device *dev; + int rc; + + if (!IS_ENABLED(CONFIG_CXL_PMEM)) + return ERR_PTR(-ENXIO); + + cxl_nvb = cxl_nvdimm_bridge_alloc(port); + if (IS_ERR(cxl_nvb)) + return cxl_nvb; + + dev = &cxl_nvb->dev; + rc = dev_set_name(dev, "nvdimm-bridge"); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb); + if (rc) + return ERR_PTR(rc); + + return cxl_nvb; + +err: + put_device(dev); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge); + +static void cxl_nvdimm_release(struct device *dev) +{ + struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); + + kfree(cxl_nvd); +} + +static const struct attribute_group *cxl_nvdimm_attribute_groups[] = { + &cxl_base_attribute_group, + NULL, +}; + +const struct device_type cxl_nvdimm_type = { + .name = "cxl_nvdimm", + .release = cxl_nvdimm_release, + .groups = cxl_nvdimm_attribute_groups, +}; + +bool is_cxl_nvdimm(struct device *dev) +{ + return dev->type == &cxl_nvdimm_type; +} +EXPORT_SYMBOL_GPL(is_cxl_nvdimm); + +struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) +{ + if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev), + "not a cxl_nvdimm device\n")) + return NULL; + return container_of(dev, struct cxl_nvdimm, dev); +} +EXPORT_SYMBOL_GPL(to_cxl_nvdimm); + +static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) +{ + struct cxl_nvdimm *cxl_nvd; + struct device *dev; + + cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL); + if (!cxl_nvd) + return ERR_PTR(-ENOMEM); + + dev = &cxl_nvd->dev; + cxl_nvd->cxlmd = cxlmd; + device_initialize(dev); + device_set_pm_not_required(dev); + dev->parent = &cxlmd->dev; + dev->bus = &cxl_bus_type; + dev->type = &cxl_nvdimm_type; + + return cxl_nvd; +} + +int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) +{ + struct cxl_nvdimm *cxl_nvd; + struct device *dev; + int rc; + + cxl_nvd = cxl_nvdimm_alloc(cxlmd); + if (IS_ERR(cxl_nvd)) + return PTR_ERR(cxl_nvd); + + dev = &cxl_nvd->dev; + rc = dev_set_name(dev, "pmem%d", cxlmd->id); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + dev_dbg(host, "%s: register %s\n", dev_name(dev->parent), + dev_name(dev)); + + return devm_add_action_or_reset(host, unregister_cxl_dev, dev); + +err: + put_device(dev); + return rc; +} +EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm); -- cgit v1.2.3 From 0f06157e0135f5563efbc9aadbd93ba3d9322cab Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 3 Aug 2021 07:25:38 -0700 Subject: cxl/core: Move register mapping infrastructure The register mapping infrastructure is large enough to move to its own compilation unit. This also cleans up an unnecessary include of core/bus.c. Reported-by: kernel test robot Signed-off-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162800068975.665205.12895551621746585289.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- Documentation/driver-api/cxl/memory-devices.rst | 3 + drivers/cxl/core/Makefile | 1 + drivers/cxl/core/bus.c | 228 ----------------------- drivers/cxl/core/regs.c | 236 ++++++++++++++++++++++++ 4 files changed, 240 insertions(+), 228 deletions(-) create mode 100644 drivers/cxl/core/regs.c (limited to 'drivers') diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst index e65c0ba82229..46847d8c70a0 100644 --- a/Documentation/driver-api/cxl/memory-devices.rst +++ b/Documentation/driver-api/cxl/memory-devices.rst @@ -42,6 +42,9 @@ CXL Core .. kernel-doc:: drivers/cxl/core/pmem.c :internal: +.. kernel-doc:: drivers/cxl/core/regs.c + :internal: + External Interfaces =================== diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile index e037521fe02b..a3522d2fbf5b 100644 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_CXL_BUS) += cxl_core.o ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl cxl_core-y := bus.o cxl_core-y += pmem.o +cxl_core-y += regs.o diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index 408654ad70db..c938d8590c9f 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -550,234 +550,6 @@ err: } EXPORT_SYMBOL_GPL(devm_cxl_add_decoder); -/** - * cxl_probe_component_regs() - Detect CXL Component register blocks - * @dev: Host device of the @base mapping - * @base: Mapping containing the HDM Decoder Capability Header - * @map: Map object describing the register block information found - * - * See CXL 2.0 8.2.4 Component Register Layout and Definition - * See CXL 2.0 8.2.5.5 CXL Device Register Interface - * - * Probe for component register information and return it in map object. - */ -void cxl_probe_component_regs(struct device *dev, void __iomem *base, - struct cxl_component_reg_map *map) -{ - int cap, cap_count; - u64 cap_array; - - *map = (struct cxl_component_reg_map) { 0 }; - - /* - * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in - * CXL 2.0 8.2.4 Table 141. - */ - base += CXL_CM_OFFSET; - - cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET); - - if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { - dev_err(dev, - "Couldn't locate the CXL.cache and CXL.mem capability array header./n"); - return; - } - - /* It's assumed that future versions will be backward compatible */ - cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array); - - for (cap = 1; cap <= cap_count; cap++) { - void __iomem *register_block; - u32 hdr; - int decoder_cnt; - u16 cap_id, offset; - u32 length; - - hdr = readl(base + cap * 0x4); - - cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr); - offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr); - register_block = base + offset; - - switch (cap_id) { - case CXL_CM_CAP_CAP_ID_HDM: - dev_dbg(dev, "found HDM decoder capability (0x%x)\n", - offset); - - hdr = readl(register_block); - - decoder_cnt = cxl_hdm_decoder_count(hdr); - length = 0x20 * decoder_cnt + 0x10; - - map->hdm_decoder.valid = true; - map->hdm_decoder.offset = CXL_CM_OFFSET + offset; - map->hdm_decoder.size = length; - break; - default: - dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id, - offset); - break; - } - } -} -EXPORT_SYMBOL_GPL(cxl_probe_component_regs); - -/** - * cxl_probe_device_regs() - Detect CXL Device register blocks - * @dev: Host device of the @base mapping - * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface - * @map: Map object describing the register block information found - * - * Probe for device register information and return it in map object. - */ -void cxl_probe_device_regs(struct device *dev, void __iomem *base, - struct cxl_device_reg_map *map) -{ - int cap, cap_count; - u64 cap_array; - - *map = (struct cxl_device_reg_map){ 0 }; - - cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET); - if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) != - CXLDEV_CAP_ARRAY_CAP_ID) - return; - - cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array); - - for (cap = 1; cap <= cap_count; cap++) { - u32 offset, length; - u16 cap_id; - - cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK, - readl(base + cap * 0x10)); - offset = readl(base + cap * 0x10 + 0x4); - length = readl(base + cap * 0x10 + 0x8); - - switch (cap_id) { - case CXLDEV_CAP_CAP_ID_DEVICE_STATUS: - dev_dbg(dev, "found Status capability (0x%x)\n", offset); - - map->status.valid = true; - map->status.offset = offset; - map->status.size = length; - break; - case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX: - dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset); - map->mbox.valid = true; - map->mbox.offset = offset; - map->mbox.size = length; - break; - case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX: - dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset); - break; - case CXLDEV_CAP_CAP_ID_MEMDEV: - dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset); - map->memdev.valid = true; - map->memdev.offset = offset; - map->memdev.size = length; - break; - default: - if (cap_id >= 0x8000) - dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset); - else - dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset); - break; - } - } -} -EXPORT_SYMBOL_GPL(cxl_probe_device_regs); - -static void __iomem *devm_cxl_iomap_block(struct device *dev, - resource_size_t addr, - resource_size_t length) -{ - void __iomem *ret_val; - struct resource *res; - - res = devm_request_mem_region(dev, addr, length, dev_name(dev)); - if (!res) { - resource_size_t end = addr + length - 1; - - dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end); - return NULL; - } - - ret_val = devm_ioremap(dev, addr, length); - if (!ret_val) - dev_err(dev, "Failed to map region %pr\n", res); - - return ret_val; -} - -int cxl_map_component_regs(struct pci_dev *pdev, - struct cxl_component_regs *regs, - struct cxl_register_map *map) -{ - struct device *dev = &pdev->dev; - resource_size_t phys_addr; - resource_size_t length; - - phys_addr = pci_resource_start(pdev, map->barno); - phys_addr += map->block_offset; - - phys_addr += map->component_map.hdm_decoder.offset; - length = map->component_map.hdm_decoder.size; - regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length); - if (!regs->hdm_decoder) - return -ENOMEM; - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_map_component_regs); - -int cxl_map_device_regs(struct pci_dev *pdev, - struct cxl_device_regs *regs, - struct cxl_register_map *map) -{ - struct device *dev = &pdev->dev; - resource_size_t phys_addr; - - phys_addr = pci_resource_start(pdev, map->barno); - phys_addr += map->block_offset; - - if (map->device_map.status.valid) { - resource_size_t addr; - resource_size_t length; - - addr = phys_addr + map->device_map.status.offset; - length = map->device_map.status.size; - regs->status = devm_cxl_iomap_block(dev, addr, length); - if (!regs->status) - return -ENOMEM; - } - - if (map->device_map.mbox.valid) { - resource_size_t addr; - resource_size_t length; - - addr = phys_addr + map->device_map.mbox.offset; - length = map->device_map.mbox.size; - regs->mbox = devm_cxl_iomap_block(dev, addr, length); - if (!regs->mbox) - return -ENOMEM; - } - - if (map->device_map.memdev.valid) { - resource_size_t addr; - resource_size_t length; - - addr = phys_addr + map->device_map.memdev.offset; - length = map->device_map.memdev.size; - regs->memdev = devm_cxl_iomap_block(dev, addr, length); - if (!regs->memdev) - return -ENOMEM; - } - - return 0; -} -EXPORT_SYMBOL_GPL(cxl_map_device_regs); - /** * __cxl_driver_register - register a driver for the cxl bus * @cxl_drv: cxl driver structure to attach diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c new file mode 100644 index 000000000000..8535a7b94f28 --- /dev/null +++ b/drivers/cxl/core/regs.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2020 Intel Corporation. */ + +#include +#include +#include +#include +#include + +/** + * cxl_probe_component_regs() - Detect CXL Component register blocks + * @dev: Host device of the @base mapping + * @base: Mapping containing the HDM Decoder Capability Header + * @map: Map object describing the register block information found + * + * See CXL 2.0 8.2.4 Component Register Layout and Definition + * See CXL 2.0 8.2.5.5 CXL Device Register Interface + * + * Probe for component register information and return it in map object. + */ +void cxl_probe_component_regs(struct device *dev, void __iomem *base, + struct cxl_component_reg_map *map) +{ + int cap, cap_count; + u64 cap_array; + + *map = (struct cxl_component_reg_map) { 0 }; + + /* + * CXL.cache and CXL.mem registers are at offset 0x1000 as defined in + * CXL 2.0 8.2.4 Table 141. + */ + base += CXL_CM_OFFSET; + + cap_array = readq(base + CXL_CM_CAP_HDR_OFFSET); + + if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { + dev_err(dev, + "Couldn't locate the CXL.cache and CXL.mem capability array header./n"); + return; + } + + /* It's assumed that future versions will be backward compatible */ + cap_count = FIELD_GET(CXL_CM_CAP_HDR_ARRAY_SIZE_MASK, cap_array); + + for (cap = 1; cap <= cap_count; cap++) { + void __iomem *register_block; + u32 hdr; + int decoder_cnt; + u16 cap_id, offset; + u32 length; + + hdr = readl(base + cap * 0x4); + + cap_id = FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, hdr); + offset = FIELD_GET(CXL_CM_CAP_PTR_MASK, hdr); + register_block = base + offset; + + switch (cap_id) { + case CXL_CM_CAP_CAP_ID_HDM: + dev_dbg(dev, "found HDM decoder capability (0x%x)\n", + offset); + + hdr = readl(register_block); + + decoder_cnt = cxl_hdm_decoder_count(hdr); + length = 0x20 * decoder_cnt + 0x10; + + map->hdm_decoder.valid = true; + map->hdm_decoder.offset = CXL_CM_OFFSET + offset; + map->hdm_decoder.size = length; + break; + default: + dev_dbg(dev, "Unknown CM cap ID: %d (0x%x)\n", cap_id, + offset); + break; + } + } +} +EXPORT_SYMBOL_GPL(cxl_probe_component_regs); + +/** + * cxl_probe_device_regs() - Detect CXL Device register blocks + * @dev: Host device of the @base mapping + * @base: Mapping of CXL 2.0 8.2.8 CXL Device Register Interface + * @map: Map object describing the register block information found + * + * Probe for device register information and return it in map object. + */ +void cxl_probe_device_regs(struct device *dev, void __iomem *base, + struct cxl_device_reg_map *map) +{ + int cap, cap_count; + u64 cap_array; + + *map = (struct cxl_device_reg_map){ 0 }; + + cap_array = readq(base + CXLDEV_CAP_ARRAY_OFFSET); + if (FIELD_GET(CXLDEV_CAP_ARRAY_ID_MASK, cap_array) != + CXLDEV_CAP_ARRAY_CAP_ID) + return; + + cap_count = FIELD_GET(CXLDEV_CAP_ARRAY_COUNT_MASK, cap_array); + + for (cap = 1; cap <= cap_count; cap++) { + u32 offset, length; + u16 cap_id; + + cap_id = FIELD_GET(CXLDEV_CAP_HDR_CAP_ID_MASK, + readl(base + cap * 0x10)); + offset = readl(base + cap * 0x10 + 0x4); + length = readl(base + cap * 0x10 + 0x8); + + switch (cap_id) { + case CXLDEV_CAP_CAP_ID_DEVICE_STATUS: + dev_dbg(dev, "found Status capability (0x%x)\n", offset); + + map->status.valid = true; + map->status.offset = offset; + map->status.size = length; + break; + case CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX: + dev_dbg(dev, "found Mailbox capability (0x%x)\n", offset); + map->mbox.valid = true; + map->mbox.offset = offset; + map->mbox.size = length; + break; + case CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX: + dev_dbg(dev, "found Secondary Mailbox capability (0x%x)\n", offset); + break; + case CXLDEV_CAP_CAP_ID_MEMDEV: + dev_dbg(dev, "found Memory Device capability (0x%x)\n", offset); + map->memdev.valid = true; + map->memdev.offset = offset; + map->memdev.size = length; + break; + default: + if (cap_id >= 0x8000) + dev_dbg(dev, "Vendor cap ID: %#x offset: %#x\n", cap_id, offset); + else + dev_dbg(dev, "Unknown cap ID: %#x offset: %#x\n", cap_id, offset); + break; + } + } +} +EXPORT_SYMBOL_GPL(cxl_probe_device_regs); + +static void __iomem *devm_cxl_iomap_block(struct device *dev, + resource_size_t addr, + resource_size_t length) +{ + void __iomem *ret_val; + struct resource *res; + + res = devm_request_mem_region(dev, addr, length, dev_name(dev)); + if (!res) { + resource_size_t end = addr + length - 1; + + dev_err(dev, "Failed to request region %pa-%pa\n", &addr, &end); + return NULL; + } + + ret_val = devm_ioremap(dev, addr, length); + if (!ret_val) + dev_err(dev, "Failed to map region %pr\n", res); + + return ret_val; +} + +int cxl_map_component_regs(struct pci_dev *pdev, + struct cxl_component_regs *regs, + struct cxl_register_map *map) +{ + struct device *dev = &pdev->dev; + resource_size_t phys_addr; + resource_size_t length; + + phys_addr = pci_resource_start(pdev, map->barno); + phys_addr += map->block_offset; + + phys_addr += map->component_map.hdm_decoder.offset; + length = map->component_map.hdm_decoder.size; + regs->hdm_decoder = devm_cxl_iomap_block(dev, phys_addr, length); + if (!regs->hdm_decoder) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_map_component_regs); + +int cxl_map_device_regs(struct pci_dev *pdev, + struct cxl_device_regs *regs, + struct cxl_register_map *map) +{ + struct device *dev = &pdev->dev; + resource_size_t phys_addr; + + phys_addr = pci_resource_start(pdev, map->barno); + phys_addr += map->block_offset; + + if (map->device_map.status.valid) { + resource_size_t addr; + resource_size_t length; + + addr = phys_addr + map->device_map.status.offset; + length = map->device_map.status.size; + regs->status = devm_cxl_iomap_block(dev, addr, length); + if (!regs->status) + return -ENOMEM; + } + + if (map->device_map.mbox.valid) { + resource_size_t addr; + resource_size_t length; + + addr = phys_addr + map->device_map.mbox.offset; + length = map->device_map.mbox.size; + regs->mbox = devm_cxl_iomap_block(dev, addr, length); + if (!regs->mbox) + return -ENOMEM; + } + + if (map->device_map.memdev.valid) { + resource_size_t addr; + resource_size_t length; + + addr = phys_addr + map->device_map.memdev.offset; + length = map->device_map.memdev.size; + regs->memdev = devm_cxl_iomap_block(dev, addr, length); + if (!regs->memdev) + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_map_device_regs); -- cgit v1.2.3 From 9cc238c7a526dba9ee8c210fa2828886fc65db66 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 2 Aug 2021 10:29:59 -0700 Subject: cxl/pci: Introduce cdevm_file_operations In preparation for moving cxl_memdev allocation to the core, introduce cdevm_file_operations to coordinate file operations shutdown relative to driver data release. The motivation for moving cxl_memdev allocation to the core (beyond better file organization of sysfs attributes in core/ and drivers in cxl/), is that device lifetime is longer than module lifetime. The cxl_pci module should be free to come and go without needing to coordinate with devices that need the text associated with cxl_memdev_release() to stay resident. The move will fix a use after free bug when looping driver load / unload with CONFIG_DEBUG_KOBJECT_RELEASE=y. Another motivation for passing in file_operations to the core cxl_memdev creation flow is to allow for alternate drivers, like unit test code, to define their own ioctl backends. Signed-off-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162792539962.368511.2962268954245340288.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/cxlmem.h | 15 ++++++++++++ drivers/cxl/pci.c | 65 ++++++++++++++++++++++++++++++---------------------- 2 files changed, 53 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 8f02d02b26b4..0cd463de1342 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -34,6 +34,21 @@ */ #define CXL_MEM_MAX_DEVS 65536 +/** + * struct cdevm_file_operations - devm coordinated cdev file operations + * @fops: file operations that are synchronized against @shutdown + * @shutdown: disconnect driver data + * + * @shutdown is invoked in the devres release path to disconnect any + * driver instance data from @dev. It assumes synchronization with any + * fops operation that requires driver data. After @shutdown an + * operation may only reference @device data. + */ +struct cdevm_file_operations { + struct file_operations fops; + void (*shutdown)(struct device *dev); +}; + /** * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device * @dev: driver core device object diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index a945c5fda292..f7a5ad5e1f4a 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -806,13 +806,30 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file) return 0; } -static const struct file_operations cxl_memdev_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = cxl_memdev_ioctl, - .open = cxl_memdev_open, - .release = cxl_memdev_release_file, - .compat_ioctl = compat_ptr_ioctl, - .llseek = noop_llseek, +static struct cxl_memdev *to_cxl_memdev(struct device *dev) +{ + return container_of(dev, struct cxl_memdev, dev); +} + +static void cxl_memdev_shutdown(struct device *dev) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + + down_write(&cxl_memdev_rwsem); + cxlmd->cxlm = NULL; + up_write(&cxl_memdev_rwsem); +} + +static const struct cdevm_file_operations cxl_memdev_fops = { + .fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = cxl_memdev_ioctl, + .open = cxl_memdev_open, + .release = cxl_memdev_release_file, + .compat_ioctl = compat_ptr_ioctl, + .llseek = noop_llseek, + }, + .shutdown = cxl_memdev_shutdown, }; static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode) @@ -1161,11 +1178,6 @@ free_maps: return ret; } -static struct cxl_memdev *to_cxl_memdev(struct device *dev) -{ - return container_of(dev, struct cxl_memdev, dev); -} - static void cxl_memdev_release(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); @@ -1281,24 +1293,22 @@ static const struct device_type cxl_memdev_type = { .groups = cxl_memdev_attribute_groups, }; -static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd) -{ - down_write(&cxl_memdev_rwsem); - cxlmd->cxlm = NULL; - up_write(&cxl_memdev_rwsem); -} - static void cxl_memdev_unregister(void *_cxlmd) { struct cxl_memdev *cxlmd = _cxlmd; struct device *dev = &cxlmd->dev; + struct cdev *cdev = &cxlmd->cdev; + const struct cdevm_file_operations *cdevm_fops; + + cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops); + cdevm_fops->shutdown(dev); cdev_device_del(&cxlmd->cdev, dev); - cxl_memdev_shutdown(cxlmd); put_device(dev); } -static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm) +static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm, + const struct file_operations *fops) { struct pci_dev *pdev = cxlm->pdev; struct cxl_memdev *cxlmd; @@ -1324,7 +1334,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm) device_set_pm_not_required(dev); cdev = &cxlmd->cdev; - cdev_init(cdev, &cxl_memdev_fops); + cdev_init(cdev, fops); return cxlmd; err: @@ -1332,15 +1342,16 @@ err: return ERR_PTR(rc); } -static struct cxl_memdev *devm_cxl_add_memdev(struct device *host, - struct cxl_mem *cxlm) +static struct cxl_memdev * +devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, + const struct cdevm_file_operations *cdevm_fops) { struct cxl_memdev *cxlmd; struct device *dev; struct cdev *cdev; int rc; - cxlmd = cxl_memdev_alloc(cxlm); + cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops); if (IS_ERR(cxlmd)) return cxlmd; @@ -1370,7 +1381,7 @@ err: * The cdev was briefly live, shutdown any ioctl operations that * saw that state. */ - cxl_memdev_shutdown(cxlmd); + cdevm_fops->shutdown(dev); put_device(dev); return ERR_PTR(rc); } @@ -1611,7 +1622,7 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm); + cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); -- cgit v1.2.3 From 3d135db510240fefd79da46181493d3e3b415f6b Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Mon, 2 Aug 2021 10:30:05 -0700 Subject: cxl/core: Move memdev management to core The motivation for moving cxl_memdev allocation to the core (beyond better file organization of sysfs attributes in core/ and drivers in cxl/), is that device lifetime is longer than module lifetime. The cxl_pci module should be free to come and go without needing to coordinate with devices that need the text associated with cxl_memdev_release() to stay resident. The move fixes a use after free bug when looping driver load / unload with CONFIG_DEBUG_KOBJECT_RELEASE=y. Another motivation for disconnecting cxl_memdev creation from cxl_pci is to enable other drivers, like a unit test driver, to registers memdevs. Fixes: b39cb1052a5c ("cxl/mem: Register CXL memX devices") Signed-off-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162792540495.368511.9748638751088219595.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/Makefile | 1 + drivers/cxl/core/bus.c | 16 ++- drivers/cxl/core/core.h | 3 + drivers/cxl/core/memdev.c | 246 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/cxl/cxlmem.h | 15 +-- drivers/cxl/pci.c | 228 +----------------------------------------- 6 files changed, 275 insertions(+), 234 deletions(-) create mode 100644 drivers/cxl/core/memdev.c (limited to 'drivers') diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile index a3522d2fbf5b..0fdbf3c6ac1a 100644 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@ -5,3 +5,4 @@ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl cxl_core-y := bus.o cxl_core-y += pmem.o cxl_core-y += regs.o +cxl_core-y += memdev.o diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c index c938d8590c9f..37b87adaa33f 100644 --- a/drivers/cxl/core/bus.c +++ b/drivers/cxl/core/bus.c @@ -634,12 +634,26 @@ EXPORT_SYMBOL_GPL(cxl_bus_type); static __init int cxl_core_init(void) { - return bus_register(&cxl_bus_type); + int rc; + + rc = cxl_memdev_init(); + if (rc) + return rc; + + rc = bus_register(&cxl_bus_type); + if (rc) + goto err; + return 0; + +err: + cxl_memdev_exit(); + return rc; } static void cxl_core_exit(void) { bus_unregister(&cxl_bus_type); + cxl_memdev_exit(); } module_init(cxl_core_init); diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 49045daf8bd7..036a3c8106b4 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -14,4 +14,7 @@ static inline void unregister_cxl_dev(void *dev) device_unregister(dev); } +int cxl_memdev_init(void); +void cxl_memdev_exit(void); + #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c new file mode 100644 index 000000000000..a9c317e32010 --- /dev/null +++ b/drivers/cxl/core/memdev.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2020 Intel Corporation. */ + +#include +#include +#include +#include +#include +#include "core.h" + +/* + * An entire PCI topology full of devices should be enough for any + * config + */ +#define CXL_MEM_MAX_DEVS 65536 + +static int cxl_mem_major; +static DEFINE_IDA(cxl_memdev_ida); + +static void cxl_memdev_release(struct device *dev) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + + ida_free(&cxl_memdev_ida, cxlmd->id); + kfree(cxlmd); +} + +static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid, + kgid_t *gid) +{ + return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); +} + +static ssize_t firmware_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + + return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version); +} +static DEVICE_ATTR_RO(firmware_version); + +static ssize_t payload_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + + return sysfs_emit(buf, "%zu\n", cxlm->payload_size); +} +static DEVICE_ATTR_RO(payload_max); + +static ssize_t label_storage_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + + return sysfs_emit(buf, "%zu\n", cxlm->lsa_size); +} +static DEVICE_ATTR_RO(label_storage_size); + +static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + unsigned long long len = range_len(&cxlm->ram_range); + + return sysfs_emit(buf, "%#llx\n", len); +} + +static struct device_attribute dev_attr_ram_size = + __ATTR(size, 0444, ram_size_show, NULL); + +static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_memdev *cxlmd = to_cxl_memdev(dev); + struct cxl_mem *cxlm = cxlmd->cxlm; + unsigned long long len = range_len(&cxlm->pmem_range); + + return sysfs_emit(buf, "%#llx\n", len); +} + +static struct device_attribute dev_attr_pmem_size = + __ATTR(size, 0444, pmem_size_show, NULL); + +static struct attribute *cxl_memdev_attributes[] = { + &dev_attr_firmware_version.attr, + &dev_attr_payload_max.attr, + &dev_attr_label_storage_size.attr, + NULL, +}; + +static struct attribute *cxl_memdev_pmem_attributes[] = { + &dev_attr_pmem_size.attr, + NULL, +}; + +static struct attribute *cxl_memdev_ram_attributes[] = { + &dev_attr_ram_size.attr, + NULL, +}; + +static struct attribute_group cxl_memdev_attribute_group = { + .attrs = cxl_memdev_attributes, +}; + +static struct attribute_group cxl_memdev_ram_attribute_group = { + .name = "ram", + .attrs = cxl_memdev_ram_attributes, +}; + +static struct attribute_group cxl_memdev_pmem_attribute_group = { + .name = "pmem", + .attrs = cxl_memdev_pmem_attributes, +}; + +static const struct attribute_group *cxl_memdev_attribute_groups[] = { + &cxl_memdev_attribute_group, + &cxl_memdev_ram_attribute_group, + &cxl_memdev_pmem_attribute_group, + NULL, +}; + +static const struct device_type cxl_memdev_type = { + .name = "cxl_memdev", + .release = cxl_memdev_release, + .devnode = cxl_memdev_devnode, + .groups = cxl_memdev_attribute_groups, +}; + +static void cxl_memdev_unregister(void *_cxlmd) +{ + struct cxl_memdev *cxlmd = _cxlmd; + struct device *dev = &cxlmd->dev; + struct cdev *cdev = &cxlmd->cdev; + const struct cdevm_file_operations *cdevm_fops; + + cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops); + cdevm_fops->shutdown(dev); + + cdev_device_del(&cxlmd->cdev, dev); + put_device(dev); +} + +static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm, + const struct file_operations *fops) +{ + struct pci_dev *pdev = cxlm->pdev; + struct cxl_memdev *cxlmd; + struct device *dev; + struct cdev *cdev; + int rc; + + cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL); + if (!cxlmd) + return ERR_PTR(-ENOMEM); + + rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL); + if (rc < 0) + goto err; + cxlmd->id = rc; + + dev = &cxlmd->dev; + device_initialize(dev); + dev->parent = &pdev->dev; + dev->bus = &cxl_bus_type; + dev->devt = MKDEV(cxl_mem_major, cxlmd->id); + dev->type = &cxl_memdev_type; + device_set_pm_not_required(dev); + + cdev = &cxlmd->cdev; + cdev_init(cdev, fops); + return cxlmd; + +err: + kfree(cxlmd); + return ERR_PTR(rc); +} + +struct cxl_memdev * +devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, + const struct cdevm_file_operations *cdevm_fops) +{ + struct cxl_memdev *cxlmd; + struct device *dev; + struct cdev *cdev; + int rc; + + cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops); + if (IS_ERR(cxlmd)) + return cxlmd; + + dev = &cxlmd->dev; + rc = dev_set_name(dev, "mem%d", cxlmd->id); + if (rc) + goto err; + + /* + * Activate ioctl operations, no cxl_memdev_rwsem manipulation + * needed as this is ordered with cdev_add() publishing the device. + */ + cxlmd->cxlm = cxlm; + + cdev = &cxlmd->cdev; + rc = cdev_device_add(cdev, dev); + if (rc) + goto err; + + rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd); + if (rc) + return ERR_PTR(rc); + return cxlmd; + +err: + /* + * The cdev was briefly live, shutdown any ioctl operations that + * saw that state. + */ + cdevm_fops->shutdown(dev); + put_device(dev); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(devm_cxl_add_memdev); + +__init int cxl_memdev_init(void) +{ + dev_t devt; + int rc; + + rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl"); + if (rc) + return rc; + + cxl_mem_major = MAJOR(devt); + + return 0; +} + +void cxl_memdev_exit(void) +{ + unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS); +} diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 0cd463de1342..25345ece25f8 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -28,12 +28,6 @@ (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \ CXLMDEV_RESET_NEEDED_NOT) -/* - * An entire PCI topology full of devices should be enough for any - * config - */ -#define CXL_MEM_MAX_DEVS 65536 - /** * struct cdevm_file_operations - devm coordinated cdev file operations * @fops: file operations that are synchronized against @shutdown @@ -63,6 +57,15 @@ struct cxl_memdev { int id; }; +static inline struct cxl_memdev *to_cxl_memdev(struct device *dev) +{ + return container_of(dev, struct cxl_memdev, dev); +} + +struct cxl_memdev * +devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, + const struct cdevm_file_operations *cdevm_fops); + /** * struct cxl_mem - A CXL memory device * @pdev: The PCI device associated with this CXL device. diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index f7a5ad5e1f4a..193983e6edce 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -94,8 +94,6 @@ struct mbox_cmd { #define CXL_MBOX_SUCCESS 0 }; -static int cxl_mem_major; -static DEFINE_IDA(cxl_memdev_ida); static DECLARE_RWSEM(cxl_memdev_rwsem); static struct dentry *cxl_debugfs; static bool cxl_raw_allow_all; @@ -806,11 +804,6 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file) return 0; } -static struct cxl_memdev *to_cxl_memdev(struct device *dev) -{ - return container_of(dev, struct cxl_memdev, dev); -} - static void cxl_memdev_shutdown(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); @@ -1178,214 +1171,6 @@ free_maps: return ret; } -static void cxl_memdev_release(struct device *dev) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - - ida_free(&cxl_memdev_ida, cxlmd->id); - kfree(cxlmd); -} - -static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid, - kgid_t *gid) -{ - return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); -} - -static ssize_t firmware_version_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - - return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version); -} -static DEVICE_ATTR_RO(firmware_version); - -static ssize_t payload_max_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - - return sysfs_emit(buf, "%zu\n", cxlm->payload_size); -} -static DEVICE_ATTR_RO(payload_max); - -static ssize_t label_storage_size_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - - return sysfs_emit(buf, "%zu\n", cxlm->lsa_size); -} -static DEVICE_ATTR_RO(label_storage_size); - -static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - unsigned long long len = range_len(&cxlm->ram_range); - - return sysfs_emit(buf, "%#llx\n", len); -} - -static struct device_attribute dev_attr_ram_size = - __ATTR(size, 0444, ram_size_show, NULL); - -static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - unsigned long long len = range_len(&cxlm->pmem_range); - - return sysfs_emit(buf, "%#llx\n", len); -} - -static struct device_attribute dev_attr_pmem_size = - __ATTR(size, 0444, pmem_size_show, NULL); - -static struct attribute *cxl_memdev_attributes[] = { - &dev_attr_firmware_version.attr, - &dev_attr_payload_max.attr, - &dev_attr_label_storage_size.attr, - NULL, -}; - -static struct attribute *cxl_memdev_pmem_attributes[] = { - &dev_attr_pmem_size.attr, - NULL, -}; - -static struct attribute *cxl_memdev_ram_attributes[] = { - &dev_attr_ram_size.attr, - NULL, -}; - -static struct attribute_group cxl_memdev_attribute_group = { - .attrs = cxl_memdev_attributes, -}; - -static struct attribute_group cxl_memdev_ram_attribute_group = { - .name = "ram", - .attrs = cxl_memdev_ram_attributes, -}; - -static struct attribute_group cxl_memdev_pmem_attribute_group = { - .name = "pmem", - .attrs = cxl_memdev_pmem_attributes, -}; - -static const struct attribute_group *cxl_memdev_attribute_groups[] = { - &cxl_memdev_attribute_group, - &cxl_memdev_ram_attribute_group, - &cxl_memdev_pmem_attribute_group, - NULL, -}; - -static const struct device_type cxl_memdev_type = { - .name = "cxl_memdev", - .release = cxl_memdev_release, - .devnode = cxl_memdev_devnode, - .groups = cxl_memdev_attribute_groups, -}; - -static void cxl_memdev_unregister(void *_cxlmd) -{ - struct cxl_memdev *cxlmd = _cxlmd; - struct device *dev = &cxlmd->dev; - struct cdev *cdev = &cxlmd->cdev; - const struct cdevm_file_operations *cdevm_fops; - - cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops); - cdevm_fops->shutdown(dev); - - cdev_device_del(&cxlmd->cdev, dev); - put_device(dev); -} - -static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm, - const struct file_operations *fops) -{ - struct pci_dev *pdev = cxlm->pdev; - struct cxl_memdev *cxlmd; - struct device *dev; - struct cdev *cdev; - int rc; - - cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL); - if (!cxlmd) - return ERR_PTR(-ENOMEM); - - rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL); - if (rc < 0) - goto err; - cxlmd->id = rc; - - dev = &cxlmd->dev; - device_initialize(dev); - dev->parent = &pdev->dev; - dev->bus = &cxl_bus_type; - dev->devt = MKDEV(cxl_mem_major, cxlmd->id); - dev->type = &cxl_memdev_type; - device_set_pm_not_required(dev); - - cdev = &cxlmd->cdev; - cdev_init(cdev, fops); - return cxlmd; - -err: - kfree(cxlmd); - return ERR_PTR(rc); -} - -static struct cxl_memdev * -devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm, - const struct cdevm_file_operations *cdevm_fops) -{ - struct cxl_memdev *cxlmd; - struct device *dev; - struct cdev *cdev; - int rc; - - cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops); - if (IS_ERR(cxlmd)) - return cxlmd; - - dev = &cxlmd->dev; - rc = dev_set_name(dev, "mem%d", cxlmd->id); - if (rc) - goto err; - - /* - * Activate ioctl operations, no cxl_memdev_rwsem manipulation - * needed as this is ordered with cdev_add() publishing the device. - */ - cxlmd->cxlm = cxlm; - - cdev = &cxlmd->cdev; - rc = cdev_device_add(cdev, dev); - if (rc) - goto err; - - rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd); - if (rc) - return ERR_PTR(rc); - return cxlmd; - -err: - /* - * The cdev was briefly live, shutdown any ioctl operations that - * saw that state. - */ - cdevm_fops->shutdown(dev); - put_device(dev); - return ERR_PTR(rc); -} - static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) { u32 remaining = size; @@ -1651,25 +1436,15 @@ static struct pci_driver cxl_mem_driver = { static __init int cxl_mem_init(void) { struct dentry *mbox_debugfs; - dev_t devt; int rc; /* Double check the anonymous union trickery in struct cxl_regs */ BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != offsetof(struct cxl_regs, device_regs.memdev)); - rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl"); - if (rc) - return rc; - - cxl_mem_major = MAJOR(devt); - rc = pci_register_driver(&cxl_mem_driver); - if (rc) { - unregister_chrdev_region(MKDEV(cxl_mem_major, 0), - CXL_MEM_MAX_DEVS); + if (rc) return rc; - } cxl_debugfs = debugfs_create_dir("cxl", NULL); mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); @@ -1683,7 +1458,6 @@ static __exit void cxl_mem_exit(void) { debugfs_remove_recursive(cxl_debugfs); pci_unregister_driver(&cxl_mem_driver); - unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS); } MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 1e39db573e4cdf798b899de2b1e72ac9bea08013 Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 16 Jul 2021 16:15:46 -0700 Subject: cxl/pci: Ignore unknown register block types In an effort to explicit avoid supporting vendor specific register blocks (which can happily be mapped from userspace), entirely skip probing unknown types. The secondary benefit of this will be revealed in the future with code simplification. Signed-off-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/20210716231548.174778-2-ben.widawsky@intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 193983e6edce..90a1c895e442 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1118,14 +1118,6 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) u64 offset; u8 bar; - map = kzalloc(sizeof(*map), GFP_KERNEL); - if (!map) { - ret = -ENOMEM; - goto free_maps; - } - - list_add(&map->list, ®ister_maps); - pci_read_config_dword(pdev, regloc, ®_lo); pci_read_config_dword(pdev, regloc + 4, ®_hi); @@ -1135,6 +1127,18 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", bar, offset, reg_type); + /* Ignore unknown register block types */ + if (reg_type > CXL_REGLOC_RBI_MEMDEV) + continue; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) { + ret = -ENOMEM; + goto free_maps; + } + + list_add(&map->list, ®ister_maps); + base = cxl_mem_map_regblock(cxlm, bar, offset); if (!base) { ret = -ENOMEM; -- cgit v1.2.3 From 5b68705d1e6340127464ef0ac0e1de94f823f14e Mon Sep 17 00:00:00 2001 From: Ben Widawsky Date: Fri, 16 Jul 2021 16:15:47 -0700 Subject: cxl/pci: Simplify register setup It is desirable to retain the mappings from the calling function. By simplifying this code, it will be much more straightforward to do that. Signed-off-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/20210716231548.174778-3-ben.widawsky@intel.com Signed-off-by: Dan Williams --- drivers/cxl/cxl.h | 1 - drivers/cxl/pci.c | 38 ++++++++++++-------------------------- drivers/cxl/pci.h | 1 + 3 files changed, 13 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index b6bda39a59e3..53927f9fa77e 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -140,7 +140,6 @@ struct cxl_device_reg_map { }; struct cxl_register_map { - struct list_head list; u64 block_offset; u8 reg_type; u8 barno; diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 90a1c895e442..47315bb2db10 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1091,9 +1091,8 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) struct device *dev = &pdev->dev; u32 regloc_size, regblocks; void __iomem *base; - int regloc, i; - struct cxl_register_map *map, *n; - LIST_HEAD(register_maps); + int regloc, i, n_maps; + struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; int ret = 0; regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); @@ -1112,7 +1111,7 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; - for (i = 0; i < regblocks; i++, regloc += 8) { + for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) { u32 reg_lo, reg_hi; u8 reg_type; u64 offset; @@ -1131,20 +1130,11 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) if (reg_type > CXL_REGLOC_RBI_MEMDEV) continue; - map = kzalloc(sizeof(*map), GFP_KERNEL); - if (!map) { - ret = -ENOMEM; - goto free_maps; - } - - list_add(&map->list, ®ister_maps); - base = cxl_mem_map_regblock(cxlm, bar, offset); - if (!base) { - ret = -ENOMEM; - goto free_maps; - } + if (!base) + return -ENOMEM; + map = &maps[n_maps]; map->barno = bar; map->block_offset = offset; map->reg_type = reg_type; @@ -1155,21 +1145,17 @@ static int cxl_mem_setup_regs(struct cxl_mem *cxlm) cxl_mem_unmap_regblock(cxlm, base); if (ret) - goto free_maps; + return ret; + + n_maps++; } pci_release_mem_regions(pdev); - list_for_each_entry(map, ®ister_maps, list) { - ret = cxl_map_regs(cxlm, map); + for (i = 0; i < n_maps; i++) { + ret = cxl_map_regs(cxlm, &maps[i]); if (ret) - goto free_maps; - } - -free_maps: - list_for_each_entry_safe(map, n, ®ister_maps, list) { - list_del(&map->list); - kfree(map); + break; } return ret; diff --git a/drivers/cxl/pci.h b/drivers/cxl/pci.h index dad7a831f65f..8c1a58813816 100644 --- a/drivers/cxl/pci.h +++ b/drivers/cxl/pci.h @@ -25,6 +25,7 @@ #define CXL_REGLOC_RBI_COMPONENT 1 #define CXL_REGLOC_RBI_VIRT 2 #define CXL_REGLOC_RBI_MEMDEV 3 +#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1 #define CXL_REGLOC_ADDR_MASK GENMASK(31, 16) -- cgit v1.2.3 From 0b9159d0ff21bc281dbb9ede06ad566330ac0943 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 17 Jun 2021 15:16:18 -0700 Subject: cxl/pci: Store memory capacity values The Identify Memory Device command returns information about the volatile only and persistent only memory capacities. Store those values in the cxl_mem structure for later use. While at it, reuse those calculations to calculate the ram and pmem ranges. Signed-off-by: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/20210617221620.1904031-2-ira.weiny@intel.com Signed-off-by: Dan Williams --- drivers/cxl/cxlmem.h | 4 ++++ drivers/cxl/pci.c | 36 +++++++++++++++++++++++++++++++++--- 2 files changed, 37 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 25345ece25f8..22344fda8ca5 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -95,5 +95,9 @@ struct cxl_mem { struct range pmem_range; struct range ram_range; + u64 total_bytes; + u64 volatile_only_bytes; + u64 persistent_only_bytes; + u64 partition_align_bytes; }; #endif /* __CXL_MEM_H__ */ diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 47315bb2db10..cf4f593f426e 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -64,6 +64,15 @@ enum opcode { CXL_MBOX_OP_MAX = 0x10000 }; +/* + * CXL 2.0 - Memory capacity multiplier + * See Section 8.2.9.5 + * + * Volatile, Persistent, and Partition capacities are specified to be in + * multiples of 256MB - define a multiplier to convert to/from bytes. + */ +#define CXL_CAPACITY_MULTIPLIER SZ_256M + /** * struct mbox_cmd - A command to be submitted to hardware. * @opcode: (input) The command set and command submitted to hardware. @@ -1350,16 +1359,37 @@ static int cxl_mem_identify(struct cxl_mem *cxlm) if (rc < 0) return rc; + cxlm->total_bytes = le64_to_cpu(id.total_capacity); + cxlm->total_bytes *= CXL_CAPACITY_MULTIPLIER; + + cxlm->volatile_only_bytes = le64_to_cpu(id.volatile_capacity); + cxlm->volatile_only_bytes *= CXL_CAPACITY_MULTIPLIER; + + cxlm->persistent_only_bytes = le64_to_cpu(id.persistent_capacity); + cxlm->persistent_only_bytes *= CXL_CAPACITY_MULTIPLIER; + + cxlm->partition_align_bytes = le64_to_cpu(id.partition_align); + cxlm->partition_align_bytes *= CXL_CAPACITY_MULTIPLIER; + + dev_dbg(&cxlm->pdev->dev, "Identify Memory Device\n" + " total_bytes = %#llx\n" + " volatile_only_bytes = %#llx\n" + " persistent_only_bytes = %#llx\n" + " partition_align_bytes = %#llx\n", + cxlm->total_bytes, + cxlm->volatile_only_bytes, + cxlm->persistent_only_bytes, + cxlm->partition_align_bytes); + /* * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias. * For now, only the capacity is exported in sysfs */ cxlm->ram_range.start = 0; - cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1; + cxlm->ram_range.end = cxlm->volatile_only_bytes - 1; cxlm->pmem_range.start = 0; - cxlm->pmem_range.end = - le64_to_cpu(id.persistent_capacity) * SZ_256M - 1; + cxlm->pmem_range.end = cxlm->persistent_only_bytes - 1; cxlm->lsa_size = le32_to_cpu(id.lsa_size); memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); -- cgit v1.2.3 From f847502ad8e3299e7ad256aa0bd7eaf184646117 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Tue, 10 Aug 2021 11:57:59 -0700 Subject: cxl/mem: Account for partitionable space in ram/pmem ranges Memory devices may specify volatile only, persistent only, and partitionable space which when added together result in a total capacity. If Identify Memory Device.Partition Alignment != 0 the device supports partitionable space. This partitionable space can be split between volatile and persistent space. The total volatile and persistent sizes are reported in Get Partition Info. ie active volatile memory = volatile only + partitionable volatile active persistent memory = persistent only + partitionable persistent Define cxl_mem_get_partition(), check for partitionable support, and use cxl_mem_get_partition() if applicable. Reviewed-by: Jonathan Cameron Signed-off-by: Ira Weiny Reported-by: kernel test robot Signed-off-by: Dan Williams --- drivers/cxl/cxlmem.h | 5 +++ drivers/cxl/pci.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 96 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 22344fda8ca5..6c0b1e2ea97c 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -99,5 +99,10 @@ struct cxl_mem { u64 volatile_only_bytes; u64 persistent_only_bytes; u64 partition_align_bytes; + + u64 active_volatile_bytes; + u64 active_persistent_bytes; + u64 next_volatile_bytes; + u64 next_persistent_bytes; }; #endif /* __CXL_MEM_H__ */ diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index cf4f593f426e..3f5db8960098 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1263,6 +1263,53 @@ static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) return ret; } +/** + * cxl_mem_get_partition_info - Get partition info + * @cxlm: The device to act on + * @active_volatile_bytes: returned active volatile capacity + * @active_persistent_bytes: returned active persistent capacity + * @next_volatile_bytes: return next volatile capacity + * @next_persistent_bytes: return next persistent capacity + * + * Retrieve the current partition info for the device specified. If not 0, the + * 'next' values are pending and take affect on next cold reset. + * + * Return: 0 if no error: or the result of the mailbox command. + * + * See CXL @8.2.9.5.2.1 Get Partition Info + */ +static int cxl_mem_get_partition_info(struct cxl_mem *cxlm, + u64 *active_volatile_bytes, + u64 *active_persistent_bytes, + u64 *next_volatile_bytes, + u64 *next_persistent_bytes) +{ + struct cxl_mbox_get_partition_info { + __le64 active_volatile_cap; + __le64 active_persistent_cap; + __le64 next_volatile_cap; + __le64 next_persistent_cap; + } __packed pi; + int rc; + + rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO, + NULL, 0, &pi, sizeof(pi)); + if (rc) + return rc; + + *active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap); + *active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap); + *next_volatile_bytes = le64_to_cpu(pi.next_volatile_cap); + *next_persistent_bytes = le64_to_cpu(pi.next_volatile_cap); + + *active_volatile_bytes *= CXL_CAPACITY_MULTIPLIER; + *active_persistent_bytes *= CXL_CAPACITY_MULTIPLIER; + *next_volatile_bytes *= CXL_CAPACITY_MULTIPLIER; + *next_persistent_bytes *= CXL_CAPACITY_MULTIPLIER; + + return 0; +} + /** * cxl_mem_enumerate_cmds() - Enumerate commands for a device. * @cxlm: The device. @@ -1381,18 +1428,53 @@ static int cxl_mem_identify(struct cxl_mem *cxlm) cxlm->persistent_only_bytes, cxlm->partition_align_bytes); + cxlm->lsa_size = le32_to_cpu(id.lsa_size); + memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); + + return 0; +} + +static int cxl_mem_create_range_info(struct cxl_mem *cxlm) +{ + int rc; + + if (cxlm->partition_align_bytes == 0) { + cxlm->ram_range.start = 0; + cxlm->ram_range.end = cxlm->volatile_only_bytes - 1; + cxlm->pmem_range.start = 0; + cxlm->pmem_range.end = cxlm->persistent_only_bytes - 1; + return 0; + } + + rc = cxl_mem_get_partition_info(cxlm, + &cxlm->active_volatile_bytes, + &cxlm->active_persistent_bytes, + &cxlm->next_volatile_bytes, + &cxlm->next_persistent_bytes); + if (rc < 0) { + dev_err(&cxlm->pdev->dev, "Failed to query partition information\n"); + return rc; + } + + dev_dbg(&cxlm->pdev->dev, "Get Partition Info\n" + " active_volatile_bytes = %#llx\n" + " active_persistent_bytes = %#llx\n" + " next_volatile_bytes = %#llx\n" + " next_persistent_bytes = %#llx\n", + cxlm->active_volatile_bytes, + cxlm->active_persistent_bytes, + cxlm->next_volatile_bytes, + cxlm->next_persistent_bytes); + /* * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias. * For now, only the capacity is exported in sysfs */ cxlm->ram_range.start = 0; - cxlm->ram_range.end = cxlm->volatile_only_bytes - 1; + cxlm->ram_range.end = cxlm->active_volatile_bytes - 1; cxlm->pmem_range.start = 0; - cxlm->pmem_range.end = cxlm->persistent_only_bytes - 1; - - cxlm->lsa_size = le32_to_cpu(id.lsa_size); - memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); + cxlm->pmem_range.end = cxlm->active_persistent_bytes - 1; return 0; } @@ -1427,6 +1509,10 @@ static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; + rc = cxl_mem_create_range_info(cxlm); + if (rc) + return rc; + cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm, &cxl_memdev_fops); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); -- cgit v1.2.3 From ceeb0da0a0322bcba4c50ab3cf97fe9a7aa8a2e4 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 17 Jun 2021 15:16:20 -0700 Subject: cxl/mem: Adjust ram/pmem range to represent DPA ranges CXL spec defines the volatile DPA range to be 0 to Volatile memory size. It further defines the persistent DPA range to follow directly after the end of the Volatile DPA through the persistent memory size. Essentially Volatile DPA range = [0, Volatile size) Persistent DPA range = [Volatile size, Volatile size + Persistent size) Adjust the pmem_range start to reflect this and remote the TODO. Signed-off-by: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/20210617221620.1904031-4-ira.weiny@intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 3f5db8960098..651e8d4ec974 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1441,8 +1441,9 @@ static int cxl_mem_create_range_info(struct cxl_mem *cxlm) if (cxlm->partition_align_bytes == 0) { cxlm->ram_range.start = 0; cxlm->ram_range.end = cxlm->volatile_only_bytes - 1; - cxlm->pmem_range.start = 0; - cxlm->pmem_range.end = cxlm->persistent_only_bytes - 1; + cxlm->pmem_range.start = cxlm->volatile_only_bytes; + cxlm->pmem_range.end = cxlm->volatile_only_bytes + + cxlm->persistent_only_bytes - 1; return 0; } @@ -1466,15 +1467,12 @@ static int cxl_mem_create_range_info(struct cxl_mem *cxlm) cxlm->next_volatile_bytes, cxlm->next_persistent_bytes); - /* - * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias. - * For now, only the capacity is exported in sysfs - */ cxlm->ram_range.start = 0; cxlm->ram_range.end = cxlm->active_volatile_bytes - 1; - cxlm->pmem_range.start = 0; - cxlm->pmem_range.end = cxlm->active_persistent_bytes - 1; + cxlm->pmem_range.start = cxlm->active_volatile_bytes; + cxlm->pmem_range.end = cxlm->active_volatile_bytes + + cxlm->active_persistent_bytes - 1; return 0; } -- cgit v1.2.3 From b4366a827f6cf6e58c93e076e335d9ee9ee0480f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 24 Aug 2021 09:05:30 -0700 Subject: libnvdimm/labels: Introduce getters for namespace label fields In preparation for LIBNVDIMM to manage labels on CXL devices deploy helpers that abstract the label type from the implementation. The CXL label format is mostly similar to the EFI label format with concepts / fields added, like dynamic region creation and label type guids, and other concepts removed like BLK-mode and interleave-set-cookie ids. In addition to nsl_get_* helpers there is the nsl_ref_name() helper that returns a pointer to a label field rather than copying the data. Where changes touch the old whitespace style, update to clang-format expectations. Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162982113002.1124374.15922077050771304490.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/label.c | 20 ++++++------ drivers/nvdimm/namespace_devs.c | 70 ++++++++++++++++++++--------------------- drivers/nvdimm/nd.h | 66 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 110 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 9251441fd8a3..b6d845cfb70e 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -350,14 +350,14 @@ static bool slot_valid(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label, u32 slot) { /* check that we are written where we expect to be written */ - if (slot != __le32_to_cpu(nd_label->slot)) + if (slot != nsl_get_slot(ndd, nd_label)) return false; /* check checksum */ if (namespace_label_has(ndd, checksum)) { u64 sum, sum_save; - sum_save = __le64_to_cpu(nd_label->checksum); + sum_save = nsl_get_checksum(ndd, nd_label); nd_label->checksum = __cpu_to_le64(0); sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); nd_label->checksum = __cpu_to_le64(sum_save); @@ -395,13 +395,13 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) continue; memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); - flags = __le32_to_cpu(nd_label->flags); + flags = nsl_get_flags(ndd, nd_label); if (test_bit(NDD_NOBLK, &nvdimm->flags)) flags &= ~NSLABEL_FLAG_LOCAL; nd_label_gen_id(&label_id, label_uuid, flags); res = nvdimm_allocate_dpa(ndd, &label_id, - __le64_to_cpu(nd_label->dpa), - __le64_to_cpu(nd_label->rawsize)); + nsl_get_dpa(ndd, nd_label), + nsl_get_rawsize(ndd, nd_label)); nd_dbg_dpa(nd_region, ndd, res, "reserve\n"); if (!res) return -EBUSY; @@ -548,9 +548,9 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd) nd_label = to_label(ndd, slot); if (!slot_valid(ndd, nd_label, slot)) { - u32 label_slot = __le32_to_cpu(nd_label->slot); - u64 size = __le64_to_cpu(nd_label->rawsize); - u64 dpa = __le64_to_cpu(nd_label->dpa); + u32 label_slot = nsl_get_slot(ndd, nd_label); + u64 size = nsl_get_rawsize(ndd, nd_label); + u64 dpa = nsl_get_dpa(ndd, nd_label); dev_dbg(ndd->dev, "slot%d invalid slot: %d dpa: %llx size: %llx\n", @@ -879,9 +879,9 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd, struct resource *res; for_each_dpa_resource(ndd, res) { - if (res->start != __le64_to_cpu(nd_label->dpa)) + if (res->start != nsl_get_dpa(ndd, nd_label)) continue; - if (resource_size(res) != __le64_to_cpu(nd_label->rawsize)) + if (resource_size(res) != nsl_get_rawsize(ndd, nd_label)) continue; return res; } diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 2403b71b601e..94da804372bf 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1235,7 +1235,7 @@ static int namespace_update_uuid(struct nd_region *nd_region, if (!nd_label) continue; nd_label_gen_id(&label_id, nd_label->uuid, - __le32_to_cpu(nd_label->flags)); + nsl_get_flags(ndd, nd_label)); if (strcmp(old_label_id.id, label_id.id) == 0) set_bit(ND_LABEL_REAP, &label_ent->flags); } @@ -1851,9 +1851,9 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, if (!nd_label) continue; - isetcookie = __le64_to_cpu(nd_label->isetcookie); - position = __le16_to_cpu(nd_label->position); - nlabel = __le16_to_cpu(nd_label->nlabel); + isetcookie = nsl_get_isetcookie(ndd, nd_label); + position = nsl_get_position(ndd, nd_label); + nlabel = nsl_get_nlabel(ndd, nd_label); if (isetcookie != cookie) continue; @@ -1923,8 +1923,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) */ hw_start = nd_mapping->start; hw_end = hw_start + nd_mapping->size; - pmem_start = __le64_to_cpu(nd_label->dpa); - pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize); + pmem_start = nsl_get_dpa(ndd, nd_label); + pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label); if (pmem_start >= hw_start && pmem_start < hw_end && pmem_end <= hw_end && pmem_end > hw_start) /* pass */; @@ -1947,14 +1947,16 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id) * @nd_label: target pmem namespace label to evaluate */ static struct device *create_namespace_pmem(struct nd_region *nd_region, - struct nd_namespace_index *nsindex, - struct nd_namespace_label *nd_label) + struct nd_mapping *nd_mapping, + struct nd_namespace_label *nd_label) { + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); + struct nd_namespace_index *nsindex = + to_namespace_index(ndd, ndd->ns_current); u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex); u64 altcookie = nd_region_interleave_set_altcookie(nd_region); struct nd_label_ent *label_ent; struct nd_namespace_pmem *nspm; - struct nd_mapping *nd_mapping; resource_size_t size = 0; struct resource *res; struct device *dev; @@ -1966,10 +1968,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, return ERR_PTR(-ENXIO); } - if (__le64_to_cpu(nd_label->isetcookie) != cookie) { + if (nsl_get_isetcookie(ndd, nd_label) != cookie) { dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", nd_label->uuid); - if (__le64_to_cpu(nd_label->isetcookie) != altcookie) + if (nsl_get_isetcookie(ndd, nd_label) != altcookie) return ERR_PTR(-EAGAIN); dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", @@ -2037,16 +2039,16 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, continue; } - size += __le64_to_cpu(label0->rawsize); - if (__le16_to_cpu(label0->position) != 0) + ndd = to_ndd(nd_mapping); + size += nsl_get_rawsize(ndd, label0); + if (nsl_get_position(ndd, label0) != 0) continue; WARN_ON(nspm->alt_name || nspm->uuid); - nspm->alt_name = kmemdup((void __force *) label0->name, - NSLABEL_NAME_LEN, GFP_KERNEL); + nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0), + NSLABEL_NAME_LEN, GFP_KERNEL); nspm->uuid = kmemdup((void __force *) label0->uuid, NSLABEL_UUID_LEN, GFP_KERNEL); - nspm->lbasize = __le64_to_cpu(label0->lbasize); - ndd = to_ndd(nd_mapping); + nspm->lbasize = nsl_get_lbasize(ndd, label0); if (namespace_label_has(ndd, abstraction_guid)) nspm->nsio.common.claim_class = to_nvdimm_cclass(&label0->abstraction_guid); @@ -2237,7 +2239,7 @@ static int add_namespace_resource(struct nd_region *nd_region, if (is_namespace_blk(devs[i])) { res = nsblk_add_resource(nd_region, ndd, to_nd_namespace_blk(devs[i]), - __le64_to_cpu(nd_label->dpa)); + nsl_get_dpa(ndd, nd_label)); if (!res) return -ENXIO; nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count); @@ -2276,7 +2278,7 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) { dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", nd_set->cookie2, - __le64_to_cpu(nd_label->isetcookie)); + nsl_get_isetcookie(ndd, nd_label)); return ERR_PTR(-EAGAIN); } } @@ -2288,7 +2290,7 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, dev->type = &namespace_blk_device_type; dev->parent = &nd_region->dev; nsblk->id = -1; - nsblk->lbasize = __le64_to_cpu(nd_label->lbasize); + nsblk->lbasize = nsl_get_lbasize(ndd, nd_label); nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL); if (namespace_label_has(ndd, abstraction_guid)) @@ -2296,15 +2298,14 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, = to_nvdimm_cclass(&nd_label->abstraction_guid); if (!nsblk->uuid) goto blk_err; - memcpy(name, nd_label->name, NSLABEL_NAME_LEN); + nsl_get_name(ndd, nd_label, name); if (name[0]) { - nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, - GFP_KERNEL); + nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, GFP_KERNEL); if (!nsblk->alt_name) goto blk_err; } res = nsblk_add_resource(nd_region, ndd, nsblk, - __le64_to_cpu(nd_label->dpa)); + nsl_get_dpa(ndd, nd_label)); if (!res) goto blk_err; nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count); @@ -2345,6 +2346,7 @@ static struct device **scan_labels(struct nd_region *nd_region) struct device *dev, **devs = NULL; struct nd_label_ent *label_ent, *e; struct nd_mapping *nd_mapping = &nd_region->mapping[0]; + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; /* "safe" because create_namespace_pmem() might list_move() label_ent */ @@ -2355,7 +2357,7 @@ static struct device **scan_labels(struct nd_region *nd_region) if (!nd_label) continue; - flags = __le32_to_cpu(nd_label->flags); + flags = nsl_get_flags(ndd, nd_label); if (is_nd_blk(&nd_region->dev) == !!(flags & NSLABEL_FLAG_LOCAL)) /* pass, region matches label type */; @@ -2363,9 +2365,9 @@ static struct device **scan_labels(struct nd_region *nd_region) continue; /* skip labels that describe extents outside of the region */ - if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start || - __le64_to_cpu(nd_label->dpa) > map_end) - continue; + if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start || + nsl_get_dpa(ndd, nd_label) > map_end) + continue; i = add_namespace_resource(nd_region, nd_label, devs, count); if (i < 0) @@ -2381,13 +2383,9 @@ static struct device **scan_labels(struct nd_region *nd_region) if (is_nd_blk(&nd_region->dev)) dev = create_namespace_blk(nd_region, nd_label, count); - else { - struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); - struct nd_namespace_index *nsindex; - - nsindex = to_namespace_index(ndd, ndd->ns_current); - dev = create_namespace_pmem(nd_region, nsindex, nd_label); - } + else + dev = create_namespace_pmem(nd_region, nd_mapping, + nd_label); if (IS_ERR(dev)) { switch (PTR_ERR(dev)) { @@ -2570,7 +2568,7 @@ static int init_active_labels(struct nd_region *nd_region) break; label = nd_label_active(ndd, j); if (test_bit(NDD_NOBLK, &nvdimm->flags)) { - u32 flags = __le32_to_cpu(label->flags); + u32 flags = nsl_get_flags(ndd, label); flags &= ~NSLABEL_FLAG_LOCAL; label->flags = __cpu_to_le32(flags); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 696b55556d4d..61f43f0edabf 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -35,6 +35,72 @@ struct nvdimm_drvdata { struct kref kref; }; +static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return nd_label->name; +} + +static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u8 *name) +{ + return memcpy(name, nd_label->name, NSLABEL_NAME_LEN); +} + +static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le32_to_cpu(nd_label->slot); +} + +static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->checksum); +} + +static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le32_to_cpu(nd_label->flags); +} + +static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->dpa); +} + +static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->rawsize); +} + +static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->isetcookie); +} + +static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le16_to_cpu(nd_label->position); +} + +static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le16_to_cpu(nd_label->nlabel); +} + +static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + return __le64_to_cpu(nd_label->lbasize); +} + struct nd_region_data { int ns_count; int ns_active; -- cgit v1.2.3 From 9761b02d40de4b7f9f4947f801b69993817883ee Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 24 Aug 2021 09:05:35 -0700 Subject: libnvdimm/labels: Add isetcookie validation helper In preparation to handle CXL labels with the same code that handles EFI labels, add a specific interleave-set-cookie validation helper rather than a getter since the CXL label type does not support this concept. The answer for CXL labels will always be true. Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162982113550.1124374.206762177785773038.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/namespace_devs.c | 8 +++----- drivers/nvdimm/nd.h | 7 +++++++ 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 94da804372bf..f33245c27cc4 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1847,15 +1847,13 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, list_for_each_entry(label_ent, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; u16 position, nlabel; - u64 isetcookie; if (!nd_label) continue; - isetcookie = nsl_get_isetcookie(ndd, nd_label); position = nsl_get_position(ndd, nd_label); nlabel = nsl_get_nlabel(ndd, nd_label); - if (isetcookie != cookie) + if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) continue; if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0) @@ -1968,10 +1966,10 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, return ERR_PTR(-ENXIO); } - if (nsl_get_isetcookie(ndd, nd_label) != cookie) { + if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) { dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n", nd_label->uuid); - if (nsl_get_isetcookie(ndd, nd_label) != altcookie) + if (!nsl_validate_isetcookie(ndd, nd_label, altcookie)) return ERR_PTR(-EAGAIN); dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n", diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 61f43f0edabf..b3feaf3699f7 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -83,6 +83,13 @@ static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd, return __le64_to_cpu(nd_label->isetcookie); } +static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 cookie) +{ + return cookie == __le64_to_cpu(nd_label->isetcookie); +} + static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label) { -- cgit v1.2.3 From 8176f147891258a8eb42bf5579fa344fef34e5bc Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 24 Aug 2021 09:05:41 -0700 Subject: libnvdimm/labels: Introduce label setter helpers In preparation for LIBNVDIMM to manage labels on CXL devices deploy helpers that abstract the label type from the implementation. The CXL label format is mostly similar to the EFI label format with concepts / fields added, like dynamic region creation and label type guids, and other concepts removed like BLK-mode and interleave-set-cookie ids. Reported-by: kernel test robot Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162982114123.1124374.17153270107594686116.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/label.c | 61 +++++++++++++++++------------------- drivers/nvdimm/namespace_devs.c | 2 +- drivers/nvdimm/nd.h | 69 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 99 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index b6d845cfb70e..b40a4eda1d89 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -358,9 +358,9 @@ static bool slot_valid(struct nvdimm_drvdata *ndd, u64 sum, sum_save; sum_save = nsl_get_checksum(ndd, nd_label); - nd_label->checksum = __cpu_to_le64(0); + nsl_set_checksum(ndd, nd_label, 0); sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); - nd_label->checksum = __cpu_to_le64(sum_save); + nsl_set_checksum(ndd, nd_label, sum_save); if (sum != sum_save) { dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n", slot, sum); @@ -797,16 +797,15 @@ static int __pmem_label_update(struct nd_region *nd_region, nd_label = to_label(ndd, slot); memset(nd_label, 0, sizeof_namespace_label(ndd)); memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN); - if (nspm->alt_name) - memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN); - nd_label->flags = __cpu_to_le32(flags); - nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); - nd_label->position = __cpu_to_le16(pos); - nd_label->isetcookie = __cpu_to_le64(cookie); - nd_label->rawsize = __cpu_to_le64(resource_size(res)); - nd_label->lbasize = __cpu_to_le64(nspm->lbasize); - nd_label->dpa = __cpu_to_le64(res->start); - nd_label->slot = __cpu_to_le32(slot); + nsl_set_name(ndd, nd_label, nspm->alt_name); + nsl_set_flags(ndd, nd_label, flags); + nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings); + nsl_set_position(ndd, nd_label, pos); + nsl_set_isetcookie(ndd, nd_label, cookie); + nsl_set_rawsize(ndd, nd_label, resource_size(res)); + nsl_set_lbasize(ndd, nd_label, nspm->lbasize); + nsl_set_dpa(ndd, nd_label, res->start); + nsl_set_slot(ndd, nd_label, slot); if (namespace_label_has(ndd, type_guid)) guid_copy(&nd_label->type_guid, &nd_set->type_guid); if (namespace_label_has(ndd, abstraction_guid)) @@ -816,9 +815,9 @@ static int __pmem_label_update(struct nd_region *nd_region, if (namespace_label_has(ndd, checksum)) { u64 sum; - nd_label->checksum = __cpu_to_le64(0); + nsl_set_checksum(ndd, nd_label, 0); sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); - nd_label->checksum = __cpu_to_le64(sum); + nsl_set_checksum(ndd, nd_label, sum); } nd_dbg_dpa(nd_region, ndd, res, "\n"); @@ -1017,10 +1016,8 @@ static int __blk_label_update(struct nd_region *nd_region, nd_label = to_label(ndd, slot); memset(nd_label, 0, sizeof_namespace_label(ndd)); memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN); - if (nsblk->alt_name) - memcpy(nd_label->name, nsblk->alt_name, - NSLABEL_NAME_LEN); - nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL); + nsl_set_name(ndd, nd_label, nsblk->alt_name); + nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL); /* * Use the presence of the type_guid as a flag to @@ -1029,23 +1026,23 @@ static int __blk_label_update(struct nd_region *nd_region, */ if (namespace_label_has(ndd, type_guid)) { if (i == min_dpa_idx) { - nd_label->nlabel = __cpu_to_le16(nsblk->num_resources); - nd_label->position = __cpu_to_le16(0); + nsl_set_nlabel(ndd, nd_label, nsblk->num_resources); + nsl_set_position(ndd, nd_label, 0); } else { - nd_label->nlabel = __cpu_to_le16(0xffff); - nd_label->position = __cpu_to_le16(0xffff); + nsl_set_nlabel(ndd, nd_label, 0xffff); + nsl_set_position(ndd, nd_label, 0xffff); } - nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2); + nsl_set_isetcookie(ndd, nd_label, nd_set->cookie2); } else { - nd_label->nlabel = __cpu_to_le16(0); /* N/A */ - nd_label->position = __cpu_to_le16(0); /* N/A */ - nd_label->isetcookie = __cpu_to_le64(0); /* N/A */ + nsl_set_nlabel(ndd, nd_label, 0); /* N/A */ + nsl_set_position(ndd, nd_label, 0); /* N/A */ + nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */ } - nd_label->dpa = __cpu_to_le64(res->start); - nd_label->rawsize = __cpu_to_le64(resource_size(res)); - nd_label->lbasize = __cpu_to_le64(nsblk->lbasize); - nd_label->slot = __cpu_to_le32(slot); + nsl_set_dpa(ndd, nd_label, res->start); + nsl_set_rawsize(ndd, nd_label, resource_size(res)); + nsl_set_lbasize(ndd, nd_label, nsblk->lbasize); + nsl_set_slot(ndd, nd_label, slot); if (namespace_label_has(ndd, type_guid)) guid_copy(&nd_label->type_guid, &nd_set->type_guid); if (namespace_label_has(ndd, abstraction_guid)) @@ -1056,10 +1053,10 @@ static int __blk_label_update(struct nd_region *nd_region, if (namespace_label_has(ndd, checksum)) { u64 sum; - nd_label->checksum = __cpu_to_le64(0); + nsl_set_checksum(ndd, nd_label, 0); sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); - nd_label->checksum = __cpu_to_le64(sum); + nsl_set_checksum(ndd, nd_label, sum); } /* update label */ diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index f33245c27cc4..fb9e080ce654 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -2569,7 +2569,7 @@ static int init_active_labels(struct nd_region *nd_region) u32 flags = nsl_get_flags(ndd, label); flags &= ~NSLABEL_FLAG_LOCAL; - label->flags = __cpu_to_le32(flags); + nsl_set_flags(ndd, label, flags); } label_ent->label = label; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index b3feaf3699f7..9bf9cd4a9a2d 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -47,42 +47,89 @@ static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd, return memcpy(name, nd_label->name, NSLABEL_NAME_LEN); } +static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u8 *name) +{ + if (!name) + return NULL; + return memcpy(nd_label->name, name, NSLABEL_NAME_LEN); +} + static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label) { return __le32_to_cpu(nd_label->slot); } +static inline void nsl_set_slot(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u32 slot) +{ + nd_label->slot = __cpu_to_le32(slot); +} + static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label) { return __le64_to_cpu(nd_label->checksum); } +static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 checksum) +{ + nd_label->checksum = __cpu_to_le64(checksum); +} + static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label) { return __le32_to_cpu(nd_label->flags); } +static inline void nsl_set_flags(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u32 flags) +{ + nd_label->flags = __cpu_to_le32(flags); +} + static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label) { return __le64_to_cpu(nd_label->dpa); } +static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, u64 dpa) +{ + nd_label->dpa = __cpu_to_le64(dpa); +} + static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label) { return __le64_to_cpu(nd_label->rawsize); } +static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 rawsize) +{ + nd_label->rawsize = __cpu_to_le64(rawsize); +} + static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label) { return __le64_to_cpu(nd_label->isetcookie); } +static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 isetcookie) +{ + nd_label->isetcookie = __cpu_to_le64(isetcookie); +} + static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label, u64 cookie) @@ -96,18 +143,40 @@ static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd, return __le16_to_cpu(nd_label->position); } +static inline void nsl_set_position(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u16 position) +{ + nd_label->position = __cpu_to_le16(position); +} + + static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label) { return __le16_to_cpu(nd_label->nlabel); } +static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u16 nlabel) +{ + nd_label->nlabel = __cpu_to_le16(nlabel); +} + static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label) { return __le64_to_cpu(nd_label->lbasize); } +static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 lbasize) +{ + nd_label->lbasize = __cpu_to_le64(lbasize); +} + struct nd_region_data { int ns_count; int ns_active; -- cgit v1.2.3 From 7cd35b2920500908eb3d9d00939f2aea3e8f8361 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 24 Aug 2021 09:05:46 -0700 Subject: libnvdimm/labels: Add a checksum calculation helper In preparation for LIBNVDIMM to manage labels on CXL devices deploy helpers that abstract the label type from the implementation. The CXL label format is mostly similar to the EFI label format with concepts / fields added, like dynamic region creation and label type guids, and other concepts removed like BLK-mode and interleave-set-cookie ids. CXL labels support checksums by default, but early versions of the EFI labels did not. Add a validate function that can return true in the case the label format does not implement a checksum. Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162982114637.1124374.6966639787307077105.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/label.c | 68 ++++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index b40a4eda1d89..3f73412dd438 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -346,29 +346,45 @@ static bool preamble_next(struct nvdimm_drvdata *ndd, free, nslot); } +static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + u64 sum, sum_save; + + if (!namespace_label_has(ndd, checksum)) + return true; + + sum_save = nsl_get_checksum(ndd, nd_label); + nsl_set_checksum(ndd, nd_label, 0); + sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); + nsl_set_checksum(ndd, nd_label, sum_save); + return sum == sum_save; +} + +static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + u64 sum; + + if (!namespace_label_has(ndd, checksum)) + return; + nsl_set_checksum(ndd, nd_label, 0); + sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); + nsl_set_checksum(ndd, nd_label, sum); +} + static bool slot_valid(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label, u32 slot) { + bool valid; + /* check that we are written where we expect to be written */ if (slot != nsl_get_slot(ndd, nd_label)) return false; - - /* check checksum */ - if (namespace_label_has(ndd, checksum)) { - u64 sum, sum_save; - - sum_save = nsl_get_checksum(ndd, nd_label); - nsl_set_checksum(ndd, nd_label, 0); - sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); - nsl_set_checksum(ndd, nd_label, sum_save); - if (sum != sum_save) { - dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n", - slot, sum); - return false; - } - } - - return true; + valid = nsl_validate_checksum(ndd, nd_label); + if (!valid) + dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot); + return valid; } int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) @@ -812,13 +828,7 @@ static int __pmem_label_update(struct nd_region *nd_region, guid_copy(&nd_label->abstraction_guid, to_abstraction_guid(ndns->claim_class, &nd_label->abstraction_guid)); - if (namespace_label_has(ndd, checksum)) { - u64 sum; - - nsl_set_checksum(ndd, nd_label, 0); - sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1); - nsl_set_checksum(ndd, nd_label, sum); - } + nsl_calculate_checksum(ndd, nd_label); nd_dbg_dpa(nd_region, ndd, res, "\n"); /* update label */ @@ -1049,15 +1059,7 @@ static int __blk_label_update(struct nd_region *nd_region, guid_copy(&nd_label->abstraction_guid, to_abstraction_guid(ndns->claim_class, &nd_label->abstraction_guid)); - - if (namespace_label_has(ndd, checksum)) { - u64 sum; - - nsl_set_checksum(ndd, nd_label, 0); - sum = nd_fletcher64(nd_label, - sizeof_namespace_label(ndd), 1); - nsl_set_checksum(ndd, nd_label, sum); - } + nsl_calculate_checksum(ndd, nd_label); /* update label */ offset = nd_label_offset(ndd, nd_label); -- cgit v1.2.3 From f56541a7122c68ef3074e41c2cb14182eb11d3a5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 24 Aug 2021 09:05:51 -0700 Subject: libnvdimm/labels: Add blk isetcookie set / validation helpers In preparation for LIBNVDIMM to manage labels on CXL devices deploy helpers that abstract the label type from the implementation. The CXL label format is mostly similar to the EFI label format with concepts / fields added, like dynamic region creation and label type guids, and other concepts removed like BLK-mode and interleave-set-cookie ids. Given BLK-mode is not even supported on CXL push hide the BLK-mode specific details inside the helpers. Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162982115185.1124374.13459190993792729776.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/label.c | 30 ++++++++++++++++++++++++++++-- drivers/nvdimm/namespace_devs.c | 9 ++------- drivers/nvdimm/nd.h | 4 ++++ 3 files changed, 34 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 3f73412dd438..d1a7f399cfe4 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -898,6 +898,33 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd, return NULL; } +static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 isetcookie) +{ + if (namespace_label_has(ndd, type_guid)) { + nsl_set_isetcookie(ndd, nd_label, isetcookie); + return; + } + nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */ +} + +bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 isetcookie) +{ + if (!namespace_label_has(ndd, type_guid)) + return true; + + if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) { + dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", isetcookie, + nsl_get_isetcookie(ndd, nd_label)); + return false; + } + + return true; +} + /* * 1/ Account all the labels that can be freed after this update * 2/ Allocate and write the label to the staging (next) index @@ -1042,12 +1069,11 @@ static int __blk_label_update(struct nd_region *nd_region, nsl_set_nlabel(ndd, nd_label, 0xffff); nsl_set_position(ndd, nd_label, 0xffff); } - nsl_set_isetcookie(ndd, nd_label, nd_set->cookie2); } else { nsl_set_nlabel(ndd, nd_label, 0); /* N/A */ nsl_set_position(ndd, nd_label, 0); /* N/A */ - nsl_set_isetcookie(ndd, nd_label, 0); /* N/A */ } + nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2); nsl_set_dpa(ndd, nd_label, res->start); nsl_set_rawsize(ndd, nd_label, resource_size(res)); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index fb9e080ce654..fbd0c2fcea4a 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -2272,14 +2272,9 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, &nd_label->type_guid); return ERR_PTR(-EAGAIN); } - - if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) { - dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n", - nd_set->cookie2, - nsl_get_isetcookie(ndd, nd_label)); - return ERR_PTR(-EAGAIN); - } } + if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2)) + return ERR_PTR(-EAGAIN); nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); if (!nsblk) diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 9bf9cd4a9a2d..955c4395a8e3 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -177,6 +177,10 @@ static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd, nd_label->lbasize = __cpu_to_le64(lbasize); } +bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + u64 isetcookie); + struct nd_region_data { int ns_count; int ns_active; -- cgit v1.2.3 From de8fa48b9a281e69f944c58ae2adbd8a277787a6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 24 Aug 2021 09:05:57 -0700 Subject: libnvdimm/labels: Add blk special cases for nlabel and position helpers In preparation for LIBNVDIMM to manage labels on CXL devices deploy helpers that abstract the label type from the implementation. The CXL label format is mostly similar to the EFI label format with concepts / fields added, like dynamic region creation and label type guids, and other concepts removed like BLK-mode and interleave-set-cookie ids. Finish off the BLK-mode specific helper conversion with the nlabel and position behaviour that is specific to EFI v1.2 labels and not the original v1.1 definition. Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162982115698.1124374.10182273478536799613.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/label.c | 46 +++++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index d1a7f399cfe4..7188675c0955 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -898,6 +898,10 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd, return NULL; } +/* + * Use the presence of the type_guid as a flag to determine isetcookie + * usage and nlabel + position policy for blk-aperture namespaces. + */ static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label, u64 isetcookie) @@ -925,6 +929,28 @@ bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, return true; } +static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, int nlabel, + bool first) +{ + if (!namespace_label_has(ndd, type_guid)) { + nsl_set_nlabel(ndd, nd_label, 0); /* N/A */ + return; + } + nsl_set_nlabel(ndd, nd_label, first ? nlabel : 0xffff); +} + +static void nsl_set_blk_position(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + bool first) +{ + if (!namespace_label_has(ndd, type_guid)) { + nsl_set_position(ndd, nd_label, 0); + return; + } + nsl_set_position(ndd, nd_label, first ? 0 : 0xffff); +} + /* * 1/ Account all the labels that can be freed after this update * 2/ Allocate and write the label to the staging (next) index @@ -1056,23 +1082,9 @@ static int __blk_label_update(struct nd_region *nd_region, nsl_set_name(ndd, nd_label, nsblk->alt_name); nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL); - /* - * Use the presence of the type_guid as a flag to - * determine isetcookie usage and nlabel + position - * policy for blk-aperture namespaces. - */ - if (namespace_label_has(ndd, type_guid)) { - if (i == min_dpa_idx) { - nsl_set_nlabel(ndd, nd_label, nsblk->num_resources); - nsl_set_position(ndd, nd_label, 0); - } else { - nsl_set_nlabel(ndd, nd_label, 0xffff); - nsl_set_position(ndd, nd_label, 0xffff); - } - } else { - nsl_set_nlabel(ndd, nd_label, 0); /* N/A */ - nsl_set_position(ndd, nd_label, 0); /* N/A */ - } + nsl_set_blk_nlabel(ndd, nd_label, nsblk->num_resources, + i == min_dpa_idx); + nsl_set_blk_position(ndd, nd_label, i == min_dpa_idx); nsl_set_blk_isetcookie(ndd, nd_label, nd_set->cookie2); nsl_set_dpa(ndd, nd_label, res->start); -- cgit v1.2.3 From 8b03aa0e0e5aea61d5a7fc6434faba2a05a6095b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 24 Aug 2021 09:06:02 -0700 Subject: libnvdimm/labels: Add type-guid helpers In preparation for CXL label support, which does not have the type-guid concept, wrap the existing users with nsl_set_type_guid, and nsl_validate_type_guid. Recall that the type-guid is a value in the ACPI NFIT table to indicate how the memory range is used / should be presented to upper layers. Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162982116208.1124374.13938280892226800953.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/label.c | 26 ++++++++++++++++++++++---- drivers/nvdimm/namespace_devs.c | 19 ++++--------------- drivers/nvdimm/nd.h | 2 ++ 3 files changed, 28 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 7188675c0955..294ffc3cb582 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -772,6 +772,26 @@ static void reap_victim(struct nd_mapping *nd_mapping, victim->label = NULL; } +static void nsl_set_type_guid(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, guid_t *guid) +{ + if (namespace_label_has(ndd, type_guid)) + guid_copy(&nd_label->type_guid, guid); +} + +bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, guid_t *guid) +{ + if (!namespace_label_has(ndd, type_guid)) + return true; + if (!guid_equal(&nd_label->type_guid, guid)) { + dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid, + &nd_label->type_guid); + return false; + } + return true; +} + static int __pmem_label_update(struct nd_region *nd_region, struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, int pos, unsigned long flags) @@ -822,8 +842,7 @@ static int __pmem_label_update(struct nd_region *nd_region, nsl_set_lbasize(ndd, nd_label, nspm->lbasize); nsl_set_dpa(ndd, nd_label, res->start); nsl_set_slot(ndd, nd_label, slot); - if (namespace_label_has(ndd, type_guid)) - guid_copy(&nd_label->type_guid, &nd_set->type_guid); + nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); if (namespace_label_has(ndd, abstraction_guid)) guid_copy(&nd_label->abstraction_guid, to_abstraction_guid(ndns->claim_class, @@ -1091,8 +1110,7 @@ static int __blk_label_update(struct nd_region *nd_region, nsl_set_rawsize(ndd, nd_label, resource_size(res)); nsl_set_lbasize(ndd, nd_label, nsblk->lbasize); nsl_set_slot(ndd, nd_label, slot); - if (namespace_label_has(ndd, type_guid)) - guid_copy(&nd_label->type_guid, &nd_set->type_guid); + nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); if (namespace_label_has(ndd, abstraction_guid)) guid_copy(&nd_label->abstraction_guid, to_abstraction_guid(ndns->claim_class, diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index fbd0c2fcea4a..af5a31dd3147 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1859,14 +1859,9 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid, if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0) continue; - if (namespace_label_has(ndd, type_guid) - && !guid_equal(&nd_set->type_guid, - &nd_label->type_guid)) { - dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", - &nd_set->type_guid, - &nd_label->type_guid); + if (!nsl_validate_type_guid(ndd, nd_label, + &nd_set->type_guid)) continue; - } if (found_uuid) { dev_dbg(ndd->dev, "duplicate entry for uuid\n"); @@ -2265,14 +2260,8 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, struct device *dev = NULL; struct resource *res; - if (namespace_label_has(ndd, type_guid)) { - if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) { - dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", - &nd_set->type_guid, - &nd_label->type_guid); - return ERR_PTR(-EAGAIN); - } - } + if (!nsl_validate_type_guid(ndd, nd_label, &nd_set->type_guid)) + return ERR_PTR(-EAGAIN); if (!nsl_validate_blk_isetcookie(ndd, nd_label, nd_set->cookie2)) return ERR_PTR(-EAGAIN); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 955c4395a8e3..a3e215f2d837 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -180,6 +180,8 @@ static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd, bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label, u64 isetcookie); +bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, guid_t *guid); struct nd_region_data { int ns_count; -- cgit v1.2.3 From a6e6d722957273afebf26b9a36b90acfc27dca8a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 24 Aug 2021 09:06:07 -0700 Subject: libnvdimm/labels: Add claim class helpers In preparation for LIBNVDIMM to manage labels on CXL devices deploy helpers that abstract the label type from the implementation. The CXL label format is mostly similar to the EFI label format with concepts / fields added, like dynamic region creation and label type guids, and other concepts removed like BLK-mode and interleave-set-cookie ids. CXL labels do have the concept of a claim class represented by an "abstraction" identifier. It turns out both label implementations use the same ids, but EFI encodes them as GUIDs and CXL labels encode them as UUIDs. For now abstract out the claim class such that the UUID vs GUID distinction can later be hidden in the helper. Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/162982116719.1124374.9917866609080940364.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/nvdimm/label.c | 31 ++++++++++++++++++++++--------- drivers/nvdimm/label.h | 1 - drivers/nvdimm/namespace_devs.c | 13 ++++--------- drivers/nvdimm/nd.h | 2 ++ 4 files changed, 28 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 294ffc3cb582..7f473f9db300 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -724,7 +724,7 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd, - (unsigned long) to_namespace_index(ndd, 0); } -enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid) +static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid) { if (guid_equal(guid, &nvdimm_btt_guid)) return NVDIMM_CCLASS_BTT; @@ -792,6 +792,25 @@ bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, return true; } +static void nsl_set_claim_class(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label, + enum nvdimm_claim_class claim_class) +{ + if (!namespace_label_has(ndd, abstraction_guid)) + return; + guid_copy(&nd_label->abstraction_guid, + to_abstraction_guid(claim_class, + &nd_label->abstraction_guid)); +} + +enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label) +{ + if (!namespace_label_has(ndd, abstraction_guid)) + return NVDIMM_CCLASS_NONE; + return to_nvdimm_cclass(&nd_label->abstraction_guid); +} + static int __pmem_label_update(struct nd_region *nd_region, struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, int pos, unsigned long flags) @@ -843,10 +862,7 @@ static int __pmem_label_update(struct nd_region *nd_region, nsl_set_dpa(ndd, nd_label, res->start); nsl_set_slot(ndd, nd_label, slot); nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); - if (namespace_label_has(ndd, abstraction_guid)) - guid_copy(&nd_label->abstraction_guid, - to_abstraction_guid(ndns->claim_class, - &nd_label->abstraction_guid)); + nsl_set_claim_class(ndd, nd_label, ndns->claim_class); nsl_calculate_checksum(ndd, nd_label); nd_dbg_dpa(nd_region, ndd, res, "\n"); @@ -1111,10 +1127,7 @@ static int __blk_label_update(struct nd_region *nd_region, nsl_set_lbasize(ndd, nd_label, nsblk->lbasize); nsl_set_slot(ndd, nd_label, slot); nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid); - if (namespace_label_has(ndd, abstraction_guid)) - guid_copy(&nd_label->abstraction_guid, - to_abstraction_guid(ndns->claim_class, - &nd_label->abstraction_guid)); + nsl_set_claim_class(ndd, nd_label, ndns->claim_class); nsl_calculate_checksum(ndd, nd_label); /* update label */ diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h index 956b6d1bd8cc..31f94fad7b92 100644 --- a/drivers/nvdimm/label.h +++ b/drivers/nvdimm/label.h @@ -135,7 +135,6 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n); u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd); bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot); u32 nd_label_nfree(struct nvdimm_drvdata *ndd); -enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid); struct nd_region; struct nd_namespace_pmem; struct nd_namespace_blk; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index af5a31dd3147..58c76d74127a 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -2042,10 +2042,8 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, nspm->uuid = kmemdup((void __force *) label0->uuid, NSLABEL_UUID_LEN, GFP_KERNEL); nspm->lbasize = nsl_get_lbasize(ndd, label0); - if (namespace_label_has(ndd, abstraction_guid)) - nspm->nsio.common.claim_class - = to_nvdimm_cclass(&label0->abstraction_guid); - + nspm->nsio.common.claim_class = + nsl_get_claim_class(ndd, label0); } if (!nspm->alt_name || !nspm->uuid) { @@ -2273,11 +2271,8 @@ static struct device *create_namespace_blk(struct nd_region *nd_region, dev->parent = &nd_region->dev; nsblk->id = -1; nsblk->lbasize = nsl_get_lbasize(ndd, nd_label); - nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, - GFP_KERNEL); - if (namespace_label_has(ndd, abstraction_guid)) - nsblk->common.claim_class - = to_nvdimm_cclass(&nd_label->abstraction_guid); + nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, GFP_KERNEL); + nsblk->common.claim_class = nsl_get_claim_class(ndd, nd_label); if (!nsblk->uuid) goto blk_err; nsl_get_name(ndd, nd_label, name); diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index a3e215f2d837..5467ebbb4a6b 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -182,6 +182,8 @@ bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd, u64 isetcookie); bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd, struct nd_namespace_label *nd_label, guid_t *guid); +enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd, + struct nd_namespace_label *nd_label); struct nd_region_data { int ns_count; -- cgit v1.2.3 From a7bfaad54b8b9cf06041528988d6b75b4b921546 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Fri, 3 Sep 2021 19:20:39 -0700 Subject: cxl/acpi: Do not add DSDT disabled ACPI0016 host bridge ports During CXL ACPI probe, host bridge ports are discovered by scanning the ACPI0017 root port for ACPI0016 host bridge devices. The scan matches on the hardware id of "ACPI0016". An issue occurs when an ACPI0016 device is defined in the DSDT yet disabled on the platform. Attempts by the cxl_acpi driver to add host bridge ports using a disabled device fails, and the entire cxl_acpi probe fails. The DSDT table includes an _STA method that sets the status and the ACPI subsystem has checks available to examine it. One such check is in the acpi_pci_find_root() path. Move the call to acpi_pci_find_root() to the matching function to prevent this issue when adding either upstream or downstream ports. Suggested-by: Dan Williams Signed-off-by: Alison Schofield Fixes: 7d4b5ca2e2cb ("cxl/acpi: Add downstream port data to cxl_port instances") Cc: Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163072203957.2250120.2178685721061002124.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/acpi.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index 8ae89273f58e..54e9d4d2cf5f 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -243,6 +243,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev) { struct acpi_device *adev = to_acpi_device(dev); + if (!acpi_pci_find_root(adev->handle)) + return NULL; + if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0) return adev; return NULL; @@ -266,10 +269,6 @@ static int add_host_bridge_uport(struct device *match, void *arg) if (!bridge) return 0; - pci_root = acpi_pci_find_root(bridge->handle); - if (!pci_root) - return -ENXIO; - dport = find_dport_by_dev(root_port, match); if (!dport) { dev_dbg(host, "host bridge expected and not found\n"); @@ -282,6 +281,11 @@ static int add_host_bridge_uport(struct device *match, void *arg) return PTR_ERR(port); dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev)); + /* + * Note that this lookup already succeeded in + * to_cxl_host_bridge(), so no need to check for failure here + */ + pci_root = acpi_pci_find_root(bridge->handle); ctx = (struct cxl_walk_context){ .dev = host, .root = pci_root->bus, -- cgit v1.2.3 From 9e56614c44b994b78fc9fcb2070bcbe3f5df0d7b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 3 Sep 2021 19:20:45 -0700 Subject: cxl/pci: Fix lockdown level A proposed rework of security_locked_down() users identified that the cxl_pci driver was passing the wrong lockdown_reason. Update cxl_mem_raw_command_allowed() to fail raw command access when raw pci access is also disabled. Fixes: 13237183c735 ("cxl/mem: Add a "RAW" send command") Cc: Ben Widawsky Cc: Jonathan Cameron Cc: Cc: Ondrej Mosnacek Cc: Paul Moore Link: https://lore.kernel.org/r/163072204525.2250120.16615792476976546735.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 651e8d4ec974..37903259ee79 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -575,7 +575,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode) if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) return false; - if (security_locked_down(LOCKDOWN_NONE)) + if (security_locked_down(LOCKDOWN_PCI_ACCESS)) return false; if (cxl_raw_allow_all) -- cgit v1.2.3 From da582aa5ad5787c46e3f475ab3f4602ec84c1617 Mon Sep 17 00:00:00 2001 From: "Li Qiang (Johnny Li)" Date: Fri, 3 Sep 2021 19:20:50 -0700 Subject: cxl/pci: Fix debug message in cxl_probe_regs() Indicator string for mbox and memdev register set to status incorrectly in error message. Cc: Fixes: 30af97296f48 ("cxl/pci: Map registers based on capabilities") Signed-off-by: Li Qiang (Johnny Li) Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163072205089.2250120.8103605864156687395.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- drivers/cxl/pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 37903259ee79..8e45aa07d662 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1041,8 +1041,8 @@ static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, !dev_map->memdev.valid) { dev_err(dev, "registers not found: %s%s%s\n", !dev_map->status.valid ? "status " : "", - !dev_map->mbox.valid ? "status " : "", - !dev_map->memdev.valid ? "status " : ""); + !dev_map->mbox.valid ? "mbox " : "", + !dev_map->memdev.valid ? "memdev " : ""); return -ENXIO; } -- cgit v1.2.3 From a01da6ca7d0ad66b6fa2dc4af0fc97ca8ba28b45 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 3 Sep 2021 19:21:01 -0700 Subject: cxl/pmem: Fix Documentation warning Commit 06737cd0d216 ("cxl/core: Move pmem functionality") neglected to add a DOC header for the new drivers/cxl/core/pmem.c file. Reported-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163072206163.2250120.11486436976516079516.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- Documentation/driver-api/cxl/memory-devices.rst | 2 +- drivers/cxl/core/pmem.c | 30 +++++++++++++++++++++++-- 2 files changed, 29 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst index 46847d8c70a0..df799cdf1c3f 100644 --- a/Documentation/driver-api/cxl/memory-devices.rst +++ b/Documentation/driver-api/cxl/memory-devices.rst @@ -40,7 +40,7 @@ CXL Core :doc: cxl core .. kernel-doc:: drivers/cxl/core/pmem.c - :internal: + :doc: cxl pmem .. kernel-doc:: drivers/cxl/core/regs.c :internal: diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index 69c97cc0d945..d24570f5b8ba 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -1,13 +1,25 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. */ - #include #include #include #include - #include "core.h" +/** + * DOC: cxl pmem + * + * The core CXL PMEM infrastructure supports persistent memory + * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL + * 'bridge' device is added at the root of a CXL device topology if + * platform firmware advertises at least one persistent memory capable + * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus' + * device. Then for each cxl_memdev in the CXL device topology a bridge + * device is added to host a LIBNVDIMM dimm object. When these bridges + * are registered native LIBNVDIMM uapis are translated to CXL + * operations, for example, namespace label access commands. + */ + static void cxl_nvdimm_bridge_release(struct device *dev) { struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev); @@ -85,6 +97,13 @@ static void unregister_nvb(void *_cxl_nvb) device_unregister(&cxl_nvb->dev); } +/** + * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology + * @host: platform firmware root device + * @port: CXL port at the root of a CXL topology + * + * Return: bridge device that can host cxl_nvdimm objects + */ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, struct cxl_port *port) { @@ -173,6 +192,13 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) return cxl_nvd; } +/** + * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm + * @host: same host as @cxlmd + * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations + * + * Return: 0 on success negative error code on failure. + */ int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd) { struct cxl_nvdimm *cxl_nvd; -- cgit v1.2.3 From 2b922a9d064f8e86b53b04f5819917b7a04142ed Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 3 Sep 2021 19:21:06 -0700 Subject: cxl/registers: Fix Documentation warning Commit 0f06157e0135 ("cxl/core: Move register mapping infrastructure") neglected to add a DOC header for the new drivers/core/regs.c file. Reported-by: Ben Widawsky Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/163072206675.2250120.3527179192933919995.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams --- Documentation/driver-api/cxl/memory-devices.rst | 2 +- drivers/cxl/core/regs.c | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/Documentation/driver-api/cxl/memory-devices.rst b/Documentation/driver-api/cxl/memory-devices.rst index df799cdf1c3f..50ebcda17ad0 100644 --- a/Documentation/driver-api/cxl/memory-devices.rst +++ b/Documentation/driver-api/cxl/memory-devices.rst @@ -43,7 +43,7 @@ CXL Core :doc: cxl pmem .. kernel-doc:: drivers/cxl/core/regs.c - :internal: + :doc: cxl registers External Interfaces =================== diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 8535a7b94f28..41de4a136ecd 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -1,12 +1,25 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. */ - #include #include #include #include #include +/** + * DOC: cxl registers + * + * CXL device capabilities are enumerated by PCI DVSEC (Designated + * Vendor-specific) and / or descriptors provided by platform firmware. + * They can be defined as a set like the device and component registers + * mandated by CXL Section 8.1.12.2 Memory Device PCIe Capabilities and + * Extended Capabilities, or they can be individual capabilities + * appended to bridged and endpoint devices. + * + * Provide common infrastructure for enumerating and mapping these + * discrete capabilities. + */ + /** * cxl_probe_component_regs() - Detect CXL Component register blocks * @dev: Host device of the @base mapping -- cgit v1.2.3