summaryrefslogtreecommitdiffstats
path: root/drivers/cxl
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2022-05-18 16:34:48 -0700
committerDan Williams <dan.j.williams@intel.com>2022-05-19 08:50:41 -0700
commit14d78874077442d1d0f08129f5a0ea5070984b4b (patch)
tree4c80ac1f7f8fb934b3a430fdcdacee9d40e47f05 /drivers/cxl
parent2e4ba0ec978335b4b550bbed95cb198ac3a00745 (diff)
downloadlinux-14d78874077442d1d0f08129f5a0ea5070984b4b.tar.bz2
cxl/mem: Consolidate CXL DVSEC Range enumeration in the core
In preparation for fixing the setting of the 'mem_enabled' bit in CXL DVSEC Control register, move all CXL DVSEC range enumeration into the same source file. Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/165291688886.1426646.15046138604010482084.stgit@dwillia2-xfh Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/cxl')
-rw-r--r--drivers/cxl/core/pci.c129
-rw-r--r--drivers/cxl/cxlmem.h1
-rw-r--r--drivers/cxl/cxlpci.h4
-rw-r--r--drivers/cxl/mem.c14
-rw-r--r--drivers/cxl/pci.c135
5 files changed, 141 insertions, 142 deletions
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 603945f49174..ea6711721901 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -142,3 +142,132 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
+
+static int wait_for_valid(struct cxl_dev_state *cxlds)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ int d = cxlds->cxl_dvsec, rc;
+ u32 val;
+
+ /*
+ * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
+ * and Size Low registers are valid. Must be set within 1 second of
+ * deassertion of reset to CXL device. Likely it is already set by the
+ * time this runs, but otherwise give a 1.5 second timeout in case of
+ * clock skew.
+ */
+ rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
+ if (rc)
+ return rc;
+
+ if (val & CXL_DVSEC_MEM_INFO_VALID)
+ return 0;
+
+ msleep(1500);
+
+ rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
+ if (rc)
+ return rc;
+
+ if (val & CXL_DVSEC_MEM_INFO_VALID)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Return positive number of non-zero ranges on success and a negative
+ * error code on failure. The cxl_mem driver depends on ranges == 0 to
+ * init HDM operation.
+ */
+int cxl_dvsec_ranges(struct cxl_dev_state *cxlds,
+ struct cxl_endpoint_dvsec_info *info)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ int hdm_count, rc, i, ranges = 0;
+ struct device *dev = &pdev->dev;
+ int d = cxlds->cxl_dvsec;
+ u16 cap, ctrl;
+
+ if (!d) {
+ dev_dbg(dev, "No DVSEC Capability\n");
+ return -ENXIO;
+ }
+
+ rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
+ if (rc)
+ return rc;
+
+ rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
+ if (rc)
+ return rc;
+
+ if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
+ dev_dbg(dev, "Not MEM Capable\n");
+ return -ENXIO;
+ }
+
+ /*
+ * It is not allowed by spec for MEM.capable to be set and have 0 legacy
+ * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
+ * driver is for a spec defined class code which must be CXL.mem
+ * capable, there is no point in continuing to enable CXL.mem.
+ */
+ hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
+ if (!hdm_count || hdm_count > 2)
+ return -EINVAL;
+
+ rc = wait_for_valid(cxlds);
+ if (rc) {
+ dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
+ return rc;
+ }
+
+ info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
+
+ for (i = 0; i < hdm_count; i++) {
+ u64 base, size;
+ u32 temp;
+
+ rc = pci_read_config_dword(
+ pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
+ if (rc)
+ return rc;
+
+ size = (u64)temp << 32;
+
+ rc = pci_read_config_dword(
+ pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
+ if (rc)
+ return rc;
+
+ size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
+
+ rc = pci_read_config_dword(
+ pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
+ if (rc)
+ return rc;
+
+ base = (u64)temp << 32;
+
+ rc = pci_read_config_dword(
+ pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
+ if (rc)
+ return rc;
+
+ base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
+
+ info->dvsec_range[i] = (struct range) {
+ .start = base,
+ .end = base + size - 1
+ };
+
+ if (size)
+ ranges++;
+ }
+
+ info->ranges = ranges;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_dvsec_ranges, CXL);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 843916c1dab6..60d10ee1e7fc 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -222,7 +222,6 @@ struct cxl_dev_state {
u64 next_persistent_bytes;
resource_size_t component_reg_phys;
- struct cxl_endpoint_dvsec_info info;
u64 serial;
int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 329e7ea3f36a..ad1b62843195 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -72,4 +72,8 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev,
}
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
+struct cxl_dev_state;
+struct cxl_endpoint_dvsec_info;
+int cxl_dvsec_ranges(struct cxl_dev_state *cxlds,
+ struct cxl_endpoint_dvsec_info *info);
#endif /* __CXL_PCI_H__ */
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 8c3a1c85a7ae..0cfbde134fc7 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -58,18 +58,15 @@ static int create_endpoint(struct cxl_memdev *cxlmd,
* decoders, or if it can not be determined if DVSEC Ranges are in use.
* Otherwise, returns true.
*/
-__mock bool cxl_hdm_decode_init(struct cxl_dev_state *cxlds)
+__mock bool cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
+ struct cxl_endpoint_dvsec_info *info)
{
- struct cxl_endpoint_dvsec_info *info = &cxlds->info;
struct cxl_register_map map;
struct cxl_component_reg_map *cmap = &map.component_map;
bool global_enable, retval = false;
void __iomem *crb;
u32 global_ctrl;
- if (info->ranges < 0)
- return false;
-
/* map hdm decoder */
crb = ioremap(cxlds->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
if (!crb) {
@@ -125,6 +122,7 @@ static void enable_suspend(void *data)
static int cxl_mem_probe(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_endpoint_dvsec_info info = { 0 };
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *parent_port;
int rc;
@@ -165,6 +163,10 @@ unlock:
if (rc)
return rc;
+ rc = cxl_dvsec_ranges(cxlds, &info);
+ if (rc)
+ return rc;
+
rc = cxl_await_media_ready(cxlds);
if (rc) {
dev_err(dev, "Media not active (%d)\n", rc);
@@ -175,7 +177,7 @@ unlock:
* If DVSEC ranges are being used instead of HDM decoder registers there
* is no use in trying to manage those.
*/
- if (!cxl_hdm_decode_init(cxlds)) {
+ if (!cxl_hdm_decode_init(cxlds, &info)) {
dev_err(dev,
"Legacy range registers configuration prevents HDM operation.\n");
return -EBUSY;
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 1bf880fa1fb8..5a0ae46d4989 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -386,139 +386,6 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
return rc;
}
-static int wait_for_valid(struct cxl_dev_state *cxlds)
-{
- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
- int d = cxlds->cxl_dvsec, rc;
- u32 val;
-
- /*
- * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
- * and Size Low registers are valid. Must be set within 1 second of
- * deassertion of reset to CXL device. Likely it is already set by the
- * time this runs, but otherwise give a 1.5 second timeout in case of
- * clock skew.
- */
- rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
- if (rc)
- return rc;
-
- if (val & CXL_DVSEC_MEM_INFO_VALID)
- return 0;
-
- msleep(1500);
-
- rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
- if (rc)
- return rc;
-
- if (val & CXL_DVSEC_MEM_INFO_VALID)
- return 0;
-
- return -ETIMEDOUT;
-}
-
-/*
- * Return positive number of non-zero ranges on success and a negative
- * error code on failure. The cxl_mem driver depends on ranges == 0 to
- * init HDM operation.
- */
-static int __cxl_dvsec_ranges(struct cxl_dev_state *cxlds,
- struct cxl_endpoint_dvsec_info *info)
-{
- struct pci_dev *pdev = to_pci_dev(cxlds->dev);
- int hdm_count, rc, i, ranges = 0;
- struct device *dev = &pdev->dev;
- int d = cxlds->cxl_dvsec;
- u16 cap, ctrl;
-
- if (!d) {
- dev_dbg(dev, "No DVSEC Capability\n");
- return -ENXIO;
- }
-
- rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
- if (rc)
- return rc;
-
- rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
- if (rc)
- return rc;
-
- if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
- dev_dbg(dev, "Not MEM Capable\n");
- return -ENXIO;
- }
-
- /*
- * It is not allowed by spec for MEM.capable to be set and have 0 legacy
- * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
- * driver is for a spec defined class code which must be CXL.mem
- * capable, there is no point in continuing to enable CXL.mem.
- */
- hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
- if (!hdm_count || hdm_count > 2)
- return -EINVAL;
-
- rc = wait_for_valid(cxlds);
- if (rc) {
- dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
- return rc;
- }
-
- info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
-
- for (i = 0; i < hdm_count; i++) {
- u64 base, size;
- u32 temp;
-
- rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
- if (rc)
- return rc;
-
- size = (u64)temp << 32;
-
- rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
- if (rc)
- return rc;
-
- size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
-
- rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
- if (rc)
- return rc;
-
- base = (u64)temp << 32;
-
- rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
- if (rc)
- return rc;
-
- base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
-
- info->dvsec_range[i] = (struct range) {
- .start = base,
- .end = base + size - 1
- };
-
- if (size)
- ranges++;
- }
-
- return ranges;
-}
-
-static void cxl_dvsec_ranges(struct cxl_dev_state *cxlds)
-{
- struct cxl_endpoint_dvsec_info *info = &cxlds->info;
-
- info->ranges = __cxl_dvsec_ranges(cxlds, info);
-}
-
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
@@ -583,8 +450,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- cxl_dvsec_ranges(cxlds);
-
cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);