From 6d382616ac2283ed65c7a6a52d05b064488aa8f8 Mon Sep 17 00:00:00 2001 From: Frederic Barrat Date: Tue, 24 May 2016 03:39:18 +1000 Subject: cxl: Abstract the differences between the PSL and XSL The XSL (Translation Service Layer) is a stripped down version of the PSL (Power Service Layer) used in some cards such as the Mellanox CX4. Like the PSL, it implements the CAIA architecture, but has a number of differences, mostly in it's implementation dependent registers. This adds an ops structure to abstract these differences to bring initial support for XSL CAPI devices. The XSL does not implement the optional architected SERR register, however while it treats it as a reserved register and should work with no special treatment, attempting to access it will cause the XSL_FEC (First Error Capture) register to be filled out, preventing it from capturing any subsequent errors. Therefore, this patch also prevents the kernel from trying to set up the SERR register so that the FEC register may still be useful, and to save one interrupt. The XSL also uses a special DMA cxl mode, which uses a slightly different init sequence for the CAPP and PHB. The kernel support for this will be in a future patch once the corresponding support has been merged into skiboot. Co-authored-by: Ian Munsie Signed-off-by: Ian Munsie Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 152 +++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 128 insertions(+), 24 deletions(-) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index a08fcc888a71..556718d7915f 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -352,13 +352,10 @@ static u64 get_capp_unit_id(struct device_node *np) return 0; } -static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev) +static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id) { struct device_node *np; const __be32 *prop; - u64 psl_dsnctl; - u64 chipid; - u64 capp_unit_id; if (!(np = pnv_pci_get_phb_node(dev))) return -ENODEV; @@ -367,14 +364,28 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev np = of_get_next_parent(np); if (!np) return -ENODEV; - chipid = be32_to_cpup(prop); - capp_unit_id = get_capp_unit_id(np); + *chipid = be32_to_cpup(prop); + *capp_unit_id = get_capp_unit_id(np); of_node_put(np); - if (!capp_unit_id) { + if (!*capp_unit_id) { pr_err("cxl: invalid capp unit id\n"); return -ENODEV; } + return 0; +} + +static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) +{ + u64 psl_dsnctl; + u64 chipid; + u64 capp_unit_id; + int rc; + + rc = calc_capp_routing(dev, &chipid, &capp_unit_id); + if (rc) + return rc; + psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */ psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ /* Tell PSL where to route data to */ @@ -393,8 +404,61 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev return 0; } +static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_dev *dev) +{ + u64 xsl_dsnctl; + u64 chipid; + u64 capp_unit_id; + int rc; + + rc = calc_capp_routing(dev, &chipid, &capp_unit_id); + if (rc) + return rc; + + /* Tell XSL where to route data to */ + xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5)); + xsl_dsnctl |= (capp_unit_id << (63-13)); + cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl); + + return 0; +} + +/* PSL & XSL */ +#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3)) #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) -#define _2048_250MHZ_CYCLES 1 +/* For the PSL this is a multiple for 0 < n <= 7: */ +#define PSL_2048_250MHZ_CYCLES 1 + +static void write_timebase_ctrl_psl(struct cxl *adapter) +{ + cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, + TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES)); +} + +/* XSL */ +#define TBSYNC_ENA (1ULL << 63) +/* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */ +#define XSL_2000_CLOCKS 1 +#define XSL_4000_CLOCKS 2 +#define XSL_8000_CLOCKS 3 + +static void write_timebase_ctrl_xsl(struct cxl *adapter) +{ + cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT, + TBSYNC_ENA | + TBSYNC_CAL(3) | + TBSYNC_CNT(XSL_4000_CLOCKS)); +} + +static u64 timebase_read_psl(struct cxl *adapter) +{ + return cxl_p1_read(adapter, CXL_PSL_Timebase); +} + +static u64 timebase_read_xsl(struct cxl *adapter) +{ + return cxl_p1_read(adapter, CXL_XSL_Timebase); +} static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) { @@ -421,8 +485,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) * Setup PSL Timebase Control and Status register * with the recommended Timebase Sync Count value */ - cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, - TBSYNC_CNT(2 * _2048_250MHZ_CYCLES)); + adapter->native->sl_ops->write_timebase_ctrl(adapter); /* Enable PSL Timebase */ cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000); @@ -435,7 +498,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) dev_info(&dev->dev, "PSL timebase can't synchronize\n"); return; } - psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase); + psl_tb = adapter->native->sl_ops->timebase_read(adapter); delta = mftb() - psl_tb; if (delta < 0) delta = -delta; @@ -445,7 +508,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) return; } -static int init_implementation_afu_regs(struct cxl_afu *afu) +static int init_implementation_afu_psl_regs(struct cxl_afu *afu) { /* read/write masks for this slice */ cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL); @@ -753,11 +816,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu) else cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); } - reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); - if (reg) { - if (reg & ~0xffff) - dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); - cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); + if (afu->adapter->native->sl_ops->register_serr_irq) { + reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); + if (reg) { + if (reg & ~0xffff) + dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); + cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); + } } reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); if (reg) { @@ -835,11 +900,13 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc if ((rc = cxl_afu_descriptor_looks_ok(afu))) goto err1; - if ((rc = init_implementation_afu_regs(afu))) - goto err1; + if (adapter->native->sl_ops->afu_regs_init) + if ((rc = adapter->native->sl_ops->afu_regs_init(afu))) + goto err1; - if ((rc = cxl_native_register_serr_irq(afu))) - goto err1; + if (adapter->native->sl_ops->register_serr_irq) + if ((rc = adapter->native->sl_ops->register_serr_irq(afu))) + goto err1; if ((rc = cxl_native_register_psl_irq(afu))) goto err2; @@ -847,7 +914,8 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc return 0; err2: - cxl_native_release_serr_irq(afu); + if (adapter->native->sl_ops->release_serr_irq) + adapter->native->sl_ops->release_serr_irq(afu); err1: pci_unmap_slice_regs(afu); return rc; @@ -856,7 +924,8 @@ err1: static void pci_deconfigure_afu(struct cxl_afu *afu) { cxl_native_release_psl_irq(afu); - cxl_native_release_serr_irq(afu); + if (afu->adapter->native->sl_ops->release_serr_irq) + afu->adapter->native->sl_ops->release_serr_irq(afu); pci_unmap_slice_regs(afu); } @@ -1177,7 +1246,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = sanitise_adapter_regs(adapter))) goto err; - if ((rc = init_implementation_adapter_regs(adapter, dev))) + if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) goto err; if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI))) @@ -1212,6 +1281,39 @@ static void cxl_deconfigure_adapter(struct cxl *adapter) pci_disable_device(pdev); } +static const struct cxl_service_layer_ops psl_ops = { + .adapter_regs_init = init_implementation_adapter_psl_regs, + .afu_regs_init = init_implementation_afu_psl_regs, + .register_serr_irq = cxl_native_register_serr_irq, + .release_serr_irq = cxl_native_release_serr_irq, + .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_psl_regs, + .debugfs_add_afu_sl_regs = cxl_debugfs_add_afu_psl_regs, + .psl_irq_dump_registers = cxl_native_psl_irq_dump_regs, + .err_irq_dump_registers = cxl_native_err_irq_dump_regs, + .debugfs_stop_trace = cxl_stop_trace, + .write_timebase_ctrl = write_timebase_ctrl_psl, + .timebase_read = timebase_read_psl, +}; + +static const struct cxl_service_layer_ops xsl_ops = { + .adapter_regs_init = init_implementation_adapter_xsl_regs, + .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs, + .write_timebase_ctrl = write_timebase_ctrl_xsl, + .timebase_read = timebase_read_xsl, +}; + +static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) +{ + if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { + dev_info(&adapter->dev, "Device uses an XSL\n"); + adapter->native->sl_ops = &xsl_ops; + } else { + dev_info(&adapter->dev, "Device uses a PSL\n"); + adapter->native->sl_ops = &psl_ops; + } +} + + static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) { struct cxl *adapter; @@ -1227,6 +1329,8 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) goto err_release; } + set_sl_ops(adapter, dev); + /* Set defaults for parameters which need to persist over * configure/reconfigure */ -- cgit v1.2.3 From b385c9e971468eb8816b26742449d6d1e49f55f1 Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Wed, 8 Jun 2016 15:09:54 +1000 Subject: cxl: Add support for CAPP DMA mode This adds support for using CAPP DMA mode, which is required for XSL based cards such as the Mellanox CX4 to function. This is currently an RFC as it depends on the corresponding support to be merged into skiboot first, which was submitted here: http://patchwork.ozlabs.org/patch/625582/ In the event that the skiboot on the system does not have the above support, it will indicate as such in the kernel log and abort the init process. Signed-off-by: Ian Munsie Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/opal-api.h | 1 + arch/powerpc/platforms/powernv/pci-ioda.c | 4 +++- drivers/misc/cxl/cxl.h | 1 + drivers/misc/cxl/pci.c | 4 +++- 4 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/misc/cxl/pci.c') diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 70b5cbc0a49c..cd9371b33f1a 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -825,6 +825,7 @@ enum { OPAL_PHB_CAPI_MODE_CAPI = 1, OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2, OPAL_PHB_CAPI_MODE_SNOOP_ON = 3, + OPAL_PHB_CAPI_MODE_DMA = 4, }; /* OPAL I2C request */ diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 1fc53e015d29..2c0e09f2fc50 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2793,7 +2793,9 @@ int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode) pe_info(pe, "Switching PHB to CXL\n"); rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number); - if (rc) + if (rc == OPAL_UNSUPPORTED) + dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n"); + else if (rc) dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc); return rc; diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 790faebf44dc..ce2b9d513069 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -543,6 +543,7 @@ struct cxl_service_layer_ops { void (*debugfs_stop_trace)(struct cxl *adapter); void (*write_timebase_ctrl)(struct cxl *adapter); u64 (*timebase_read)(struct cxl *adapter); + int capi_mode; }; struct cxl_native { diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 556718d7915f..648817a2e219 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1249,7 +1249,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) goto err; - if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI))) + if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) goto err; /* If recovery happened, the last step is to turn on snooping. @@ -1293,6 +1293,7 @@ static const struct cxl_service_layer_ops psl_ops = { .debugfs_stop_trace = cxl_stop_trace, .write_timebase_ctrl = write_timebase_ctrl_psl, .timebase_read = timebase_read_psl, + .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, }; static const struct cxl_service_layer_ops xsl_ops = { @@ -1300,6 +1301,7 @@ static const struct cxl_service_layer_ops xsl_ops = { .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs, .write_timebase_ctrl = write_timebase_ctrl_xsl, .timebase_read = timebase_read_xsl, + .capi_mode = OPAL_PHB_CAPI_MODE_DMA, }; static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) -- cgit v1.2.3 From 49e9c99f47fc43abc9598f9fcf5ba3336d0c09a6 Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Wed, 29 Jun 2016 22:16:25 +1000 Subject: cxl: Fix allowing bogus AFU descriptors with 0 maximum processes If the AFU descriptor of an AFU directed AFU indicates that it supports 0 maximum processes, we will accept that value and attempt to use it. The SPA will still be allocated (with 2 pages due to another minor bug and room for 958 processes), and when a context is allocated we will pass the value of 0 to idr_alloc as the maximum. However, idr_alloc will treat that as meaning no maximum and will allocate a context number and we return a valid context. Conceivably, this could lead to a buffer overflow of the SPA if more than 958 contexts were allocated, however this is mitigated by the fact that there are no known AFUs in the wild with a bogus AFU descriptor like this, and that only the root user is allowed to flash an AFU image to a card. Add a check when validating the AFU descriptor to reject any with 0 maximum processes. We do still allow a dedicated process only AFU to indicate that it supports 0 contexts even though that is forbidden in the architecture, as in that case we ignore the value and use 1 instead. This is just on the off-chance that such a dedicated process AFU may exist (not that I am aware of any), since their developers are less likely to have cared about this value at all. Signed-off-by: Ian Munsie Reviewed-by: Frederic Barrat Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 648817a2e219..58d7d8215964 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -775,6 +775,21 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) } } + if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) { + /* + * We could also check this for the dedicated process model + * since the architecture indicates it should be set to 1, but + * in that case we ignore the value and I'd rather not risk + * breaking any existing dedicated process AFUs that left it as + * 0 (not that I'm aware of any). It is clearly an error for an + * AFU directed AFU to set this to 0, and would have previously + * triggered a bug resulting in the maximum not being enforced + * at all since idr_alloc treats 0 as no maximum. + */ + dev_err(&afu->dev, "AFU does not support any processes\n"); + return -EINVAL; + } + return 0; } -- cgit v1.2.3 From 5e7823c9bc44965c2e7d1d755b382109830c4916 Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Fri, 1 Jul 2016 02:50:40 +1000 Subject: cxl: Fix bug where AFU disable operation had no effect The AFU disable operation has a bug where it will not clear the enable bit and therefore will have no effect. To date this has likely been masked by fact that we perform an AFU reset before the disable, which also has the effect of clearing the enable bit, making the following disable operation effectively a noop on most hardware. This patch modifies the afu_control function to take a parameter to clear from the AFU control register so that the disable operation can clear the appropriate bit. This bug was uncovered on the Mellanox CX4, which uses an XSL rather than a PSL. On the XSL the reset operation will not complete while the AFU is enabled, meaning the enable bit was still set at the start of the disable and as a result this bug was hit and the disable also timed out. Because of this difference in behaviour between the PSL and XSL, this patch now makes the reset dependent on the card using a PSL to avoid waiting for a timeout on the XSL. It is entirely possible that we may be able to drop the reset altogether if it turns out we only ever needed it due to this bug - however I am not willing to drop it without further regression testing and have added comments to the code explaining the background. This also fixes a small issue where the AFU_Cntl register was read outside of the lock that protects it. Signed-off-by: Ian Munsie Reviewed-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/cxl.h | 1 + drivers/misc/cxl/native.c | 58 +++++++++++++++++++++++++++++++++++++++++------ drivers/misc/cxl/pci.c | 1 + 3 files changed, 53 insertions(+), 7 deletions(-) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 27578fceda8a..aafffa8554e2 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -552,6 +552,7 @@ struct cxl_service_layer_ops { void (*write_timebase_ctrl)(struct cxl *adapter); u64 (*timebase_read)(struct cxl *adapter); int capi_mode; + bool needs_reset_before_disable; }; struct cxl_native { diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 120c468ee9e0..e77450567f69 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -21,10 +21,10 @@ #include "cxl.h" #include "trace.h" -static int afu_control(struct cxl_afu *afu, u64 command, +static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, u64 result, u64 mask, bool enabled) { - u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + u64 AFU_Cntl; unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); int rc = 0; @@ -33,7 +33,8 @@ static int afu_control(struct cxl_afu *afu, u64 command, trace_cxl_afu_ctrl(afu, command); - cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); + AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); + cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); while ((AFU_Cntl & mask) != result) { @@ -67,7 +68,7 @@ static int afu_enable(struct cxl_afu *afu) { pr_devel("AFU enable request\n"); - return afu_control(afu, CXL_AFU_Cntl_An_E, + return afu_control(afu, CXL_AFU_Cntl_An_E, 0, CXL_AFU_Cntl_An_ES_Enabled, CXL_AFU_Cntl_An_ES_MASK, true); } @@ -76,7 +77,8 @@ int cxl_afu_disable(struct cxl_afu *afu) { pr_devel("AFU disable request\n"); - return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled, + return afu_control(afu, 0, CXL_AFU_Cntl_An_E, + CXL_AFU_Cntl_An_ES_Disabled, CXL_AFU_Cntl_An_ES_MASK, false); } @@ -85,7 +87,7 @@ static int native_afu_reset(struct cxl_afu *afu) { pr_devel("AFU reset request\n"); - return afu_control(afu, CXL_AFU_Cntl_An_RA, + return afu_control(afu, CXL_AFU_Cntl_An_RA, 0, CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, false); @@ -595,7 +597,33 @@ static int deactivate_afu_directed(struct cxl_afu *afu) cxl_sysfs_afu_m_remove(afu); cxl_chardev_afu_remove(afu); - cxl_ops->afu_reset(afu); + /* + * The CAIA section 2.2.1 indicates that the procedure for starting and + * stopping an AFU in AFU directed mode is AFU specific, which is not + * ideal since this code is generic and with one exception has no + * knowledge of the AFU. This is in contrast to the procedure for + * disabling a dedicated process AFU, which is documented to just + * require a reset. The architecture does indicate that both an AFU + * reset and an AFU disable should result in the AFU being disabled and + * we do both followed by a PSL purge for safety. + * + * Notably we used to have some issues with the disable sequence on PSL + * cards, which is why we ended up using this heavy weight procedure in + * the first place, however a bug was discovered that had rendered the + * disable operation ineffective, so it is conceivable that was the + * sole explanation for those difficulties. Careful regression testing + * is recommended if anyone attempts to remove or reorder these + * operations. + * + * The XSL on the Mellanox CX4 behaves a little differently from the + * PSL based cards and will time out an AFU reset if the AFU is still + * enabled. That card is special in that we do have a means to identify + * it from this code, so in that case we skip the reset and just use a + * disable/purge to avoid the timeout and corresponding noise in the + * kernel log. + */ + if (afu->adapter->native->sl_ops->needs_reset_before_disable) + cxl_ops->afu_reset(afu); cxl_afu_disable(afu); cxl_psl_purge(afu); @@ -735,6 +763,22 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel, static inline int detach_process_native_dedicated(struct cxl_context *ctx) { + /* + * The CAIA section 2.1.1 indicates that we need to do an AFU reset to + * stop the AFU in dedicated mode (we therefore do not make that + * optional like we do in the afu directed path). It does not indicate + * that we need to do an explicit disable (which should occur + * implicitly as part of the reset) or purge, but we do these as well + * to be on the safe side. + * + * Notably we used to have some issues with the disable sequence + * (before the sequence was spelled out in the architecture) which is + * why we were so heavy weight in the first place, however a bug was + * discovered that had rendered the disable operation ineffective, so + * it is conceivable that was the sole explanation for those + * difficulties. Point is, we should be careful and do some regression + * testing if we ever attempt to remove any part of this procedure. + */ cxl_ops->afu_reset(ctx->afu); cxl_afu_disable(ctx->afu); cxl_psl_purge(ctx->afu); diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 58d7d8215964..b7f2e96c1528 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1309,6 +1309,7 @@ static const struct cxl_service_layer_ops psl_ops = { .write_timebase_ctrl = write_timebase_ctrl_psl, .timebase_read = timebase_read_psl, .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, + .needs_reset_before_disable = true, }; static const struct cxl_service_layer_ops xsl_ops = { -- cgit v1.2.3 From 3b3dcd61fa4e3604d8f1bdfd8471fca7b7c012e4 Mon Sep 17 00:00:00 2001 From: Philippe Bergheaud Date: Fri, 1 Jul 2016 13:32:52 +0200 Subject: cxl: Ignore CAPI adapters misplaced in switched slots One should not attempt to switch a PHB into CAPI mode if there is a switch between the PHB and the adapter. This patch modifies the cxl driver to ignore CAPI adapters misplaced in switched slots. Signed-off-by: Philippe Bergheaud Reviewed-by: Frederic Barrat Acked-by: Ian Munsie Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index b7f2e96c1528..3a5f9801640d 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1402,6 +1402,30 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) device_unregister(&adapter->dev); } +#define CXL_MAX_PCIEX_PARENT 2 + +static int cxl_slot_is_switched(struct pci_dev *dev) +{ + struct device_node *np; + int depth = 0; + const __be32 *prop; + + if (!(np = pci_device_to_OF_node(dev))) { + pr_err("cxl: np = NULL\n"); + return -ENODEV; + } + of_node_get(np); + while (np) { + np = of_get_next_parent(np); + prop = of_get_property(np, "device_type", NULL); + if (!prop || strcmp((char *)prop, "pciex")) + break; + depth++; + } + of_node_put(np); + return (depth > CXL_MAX_PCIEX_PARENT); +} + static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct cxl *adapter; @@ -1413,6 +1437,11 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; } + if (cxl_slot_is_switched(dev)) { + dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n"); + return -ENODEV; + } + if (cxl_verbose) dump_cxl_config_space(dev); -- cgit v1.2.3 From 4e56f858bdde5cbfb70f61baddfaa56a8ed851bf Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Thu, 14 Jul 2016 07:17:01 +1000 Subject: cxl: Add cxl_slot_is_supported API This extends the check that the adapter is in a CAPI capable slot so that it may be called by external users in the kernel API. This will be used by the upcoming Mellanox CX4 support, which needs to know ahead of time if the card can be switched to cxl mode so that it can leave it in PCI mode if it is not. This API takes a parameter to check if CAPP DMA mode is supported, which it currently only allows on P8NVL systems, since that mode currently has issues accessing memory < 4GB on P8, and we cannot realistically avoid that. This API does not currently check if a CAPP unit is available (i.e. not already assigned to another PHB) on P8. Doing so would be racy since it is assigned on a first come first serve basis, and so long as CAPP DMA mode is not supported on P8 we don't need this, since the only anticipated user of this API requires CAPP DMA mode. Cc: Philippe Bergheaud Signed-off-by: Ian Munsie Reviewed-by: Andrew Donnellan Reviewed-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 37 +++++++++++++++++++++++++++++++++++++ include/misc/cxl.h | 15 +++++++++++++++ 2 files changed, 52 insertions(+) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 3a5f9801640d..6ac6b05f41a4 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1426,6 +1426,43 @@ static int cxl_slot_is_switched(struct pci_dev *dev) return (depth > CXL_MAX_PCIEX_PARENT); } +bool cxl_slot_is_supported(struct pci_dev *dev, int flags) +{ + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return false; + + if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) { + /* + * CAPP DMA mode is technically supported on regular P8, but + * will EEH if the card attempts to access memory < 4GB, which + * we cannot realistically avoid. We might be able to work + * around the issue, but until then return unsupported: + */ + return false; + } + + if (cxl_slot_is_switched(dev)) + return false; + + /* + * XXX: This gets a little tricky on regular P8 (not POWER8NVL) since + * the CAPP can be connected to PHB 0, 1 or 2 on a first come first + * served basis, which is racy to check from here. If we need to + * support this in future we might need to consider having this + * function effectively reserve it ahead of time. + * + * Currently, the only user of this API is the Mellanox CX4, which is + * only supported on P8NVL due to the above mentioned limitation of + * CAPP DMA mode and therefore does not need to worry about this. If the + * issue with CAPP DMA mode is later worked around on P8 we might need + * to revisit this. + */ + + return true; +} +EXPORT_SYMBOL_GPL(cxl_slot_is_supported); + + static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct cxl *adapter; diff --git a/include/misc/cxl.h b/include/misc/cxl.h index b6d040f31f76..dd9eebba3bb6 100644 --- a/include/misc/cxl.h +++ b/include/misc/cxl.h @@ -24,6 +24,21 @@ * generic PCI API. This API is agnostic to the actual AFU. */ +#define CXL_SLOT_FLAG_DMA 0x1 + +/* + * Checks if the given card is in a cxl capable slot. Pass CXL_SLOT_FLAG_DMA if + * the card requires CAPP DMA mode to also check if the system supports it. + * This is intended to be used by bi-modal devices to determine if they can use + * cxl mode or if they should continue running in PCI mode. + * + * Note that this only checks if the slot is cxl capable - it does not + * currently check if the CAPP is currently available for chips where it can be + * assigned to different PHBs on a first come first serve basis (i.e. P8) + */ +bool cxl_slot_is_supported(struct pci_dev *dev, int flags); + + /* Get the AFU associated with a pci_dev */ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev); -- cgit v1.2.3 From 48b3adf33459c1c42766d9c2068a592216fe7812 Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Thu, 14 Jul 2016 07:17:02 +1000 Subject: cxl: Enable bus mastering for devices using CAPP DMA mode Devices that use CAPP DMA mode (such as the Mellanox CX4) require bus master to be enabled in order for the CAPI traffic to flow. This should be harmless to enable for other cxl devices, so unconditionally enable it in the adapter init flow. Signed-off-by: Ian Munsie Reviewed-by: Andrew Donnellan Reviewed-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 6ac6b05f41a4..deef9c75e73a 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1264,6 +1264,9 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) goto err; + /* Required for devices using CAPP DMA mode, harmless for others */ + pci_set_master(dev); + if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) goto err; -- cgit v1.2.3 From e4f5fc001a6cb82bef910372457ca7754defa84d Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Thu, 14 Jul 2016 07:17:05 +1000 Subject: cxl: Do not create vPHB if there are no AFU configuration records The vPHB model of the cxl kernel API is a hierarchy where the AFU is represented by the vPHB, and it's AFU configuration records are exposed as functions under that vPHB. If there are no AFU configuration records we will create a vPHB with nothing under it, which is a waste of resources and will opt us into EEH handling despite not having anything special to handle. This also does not make sense for cards using the peer model of the cxl kernel API, where the other functions of the device are exposed via additional peer physical functions rather than AFU configuration records. This model will also not work with the existing EEH handling in the cxl driver, as that is designed around the vPHB model. Skip creating the vPHB for AFUs without any AFU configuration records, and opt out of EEH handling for them. Signed-off-by: Ian Munsie Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman --- drivers/misc/cxl/pci.c | 3 +++ drivers/misc/cxl/vphb.c | 11 +++++++++++ 2 files changed, 14 insertions(+) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index deef9c75e73a..dd7ff221288b 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1572,6 +1572,9 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, */ for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; + /* Only participate in EEH if we are on a virtual PHB */ + if (afu->phb == NULL) + return PCI_ERS_RESULT_NONE; cxl_vphb_error_detected(afu, state); } return PCI_ERS_RESULT_DISCONNECT; diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index c8a759f4ccfa..8865e8d9b3c5 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -188,6 +188,17 @@ int cxl_pci_vphb_add(struct cxl_afu *afu) struct device_node *vphb_dn; struct device *parent; + /* + * If there are no AFU configuration records we won't have anything to + * expose under the vPHB, so skip creating one, returning success since + * this is still a valid case. This will also opt us out of EEH + * handling since we won't have anything special to do if there are no + * kernel drivers attached to the vPHB, and EEH handling is not yet + * supported in the peer model. + */ + if (!afu->crs_num) + return 0; + /* The parent device is the adapter. Reuse the device node of * the adapter. * We don't seem to care what device node is used for the vPHB, -- cgit v1.2.3 From 317f5ef1b363417b6f1e93b90dfd2ffd6be6e867 Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Thu, 14 Jul 2016 07:17:07 +1000 Subject: cxl: Add support for using the kernel API with a real PHB This hooks up support for using the kernel API with a real PHB. After the AFU initialisation has completed it calls into the PHB code to pass it the AFU that will be used by other peer physical functions on the adapter. The cxl_pci_to_afu API is extended to work with peer PCI devices, retrieving the peer AFU from the PHB. This API may also now return an error if it is called on a PCI device that is not associated with either a cxl vPHB or a peer PCI device to an AFU, and this error is propagated down. Signed-off-by: Ian Munsie Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman --- drivers/misc/cxl/api.c | 5 +++++ drivers/misc/cxl/pci.c | 3 +++ drivers/misc/cxl/vphb.c | 16 ++++++++++++++-- 3 files changed, 22 insertions(+), 2 deletions(-) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index 7707055d33ab..6a030bf71655 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "cxl.h" @@ -24,6 +25,8 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) int rc; afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return ERR_CAST(afu); ctx = cxl_context_alloc(); if (IS_ERR(ctx)) { @@ -438,6 +441,8 @@ EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) { struct cxl_afu *afu = cxl_pci_to_afu(dev); + if (IS_ERR(afu)) + return -ENODEV; return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); } diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index dd7ff221288b..cb5d172fec1b 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1502,6 +1502,9 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); } + if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1) + pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]); + return 0; } diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index 8865e8d9b3c5..dee8def1c193 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -9,6 +9,7 @@ #include #include +#include #include "cxl.h" static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) @@ -258,13 +259,18 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu) pcibios_free_controller(phb); } +static bool _cxl_pci_is_vphb_device(struct pci_controller *phb) +{ + return (phb->ops == &cxl_pcie_pci_ops); +} + bool cxl_pci_is_vphb_device(struct pci_dev *dev) { struct pci_controller *phb; phb = pci_bus_to_host(dev->bus); - return (phb->ops == &cxl_pcie_pci_ops); + return _cxl_pci_is_vphb_device(phb); } struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) @@ -273,7 +279,13 @@ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev) phb = pci_bus_to_host(dev->bus); - return (struct cxl_afu *)phb->private_data; + if (_cxl_pci_is_vphb_device(phb)) + return (struct cxl_afu *)phb->private_data; + + if (pnv_pci_on_cxl_phb(dev)) + return pnv_cxl_phb_to_afu(phb); + + return ERR_PTR(-ENODEV); } EXPORT_SYMBOL_GPL(cxl_pci_to_afu); -- cgit v1.2.3 From f67a6722d650b864b020b19b3926e7152b55f1ff Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Thu, 14 Jul 2016 07:17:11 +1000 Subject: cxl: Workaround PE=0 hardware limitation in Mellanox CX4 The CX4 card cannot cope with a context with PE=0 due to a hardware limitation, resulting in: [ 34.166577] command failed, status limits exceeded(0x8), syndrome 0x5a7939 [ 34.166580] mlx5_core 0000:01:00.1: Failed allocating uar, aborting Since the kernel API allocates a default context very early during device init that will almost certainly get Process Element ID 0 there is no easy way for us to extend the API to allow the Mellanox to inform us of this limitation ahead of time. Instead, work around the issue by extending the XSL structure to include a minimum PE to allocate. Although the bug is not in the XSL, it is the easiest place to work around this limitation given that the CX4 is currently the only card that uses an XSL. Signed-off-by: Ian Munsie Reviewed-by: Andrew Donnellan Reviewed-by: Frederic Barrat Signed-off-by: Michael Ellerman --- drivers/misc/cxl/context.c | 3 ++- drivers/misc/cxl/cxl.h | 1 + drivers/misc/cxl/pci.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 2616cddbbb33..bdee9a01ef35 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -90,7 +90,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, */ mutex_lock(&afu->contexts_lock); idr_preload(GFP_KERNEL); - i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, + i = idr_alloc(&ctx->afu->contexts_idr, ctx, + ctx->afu->adapter->native->sl_ops->min_pe, ctx->afu->num_procs, GFP_NOWAIT); idr_preload_end(); mutex_unlock(&afu->contexts_lock); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index d50cdb137c43..de090533f18c 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -561,6 +561,7 @@ struct cxl_service_layer_ops { u64 (*timebase_read)(struct cxl *adapter); int capi_mode; bool needs_reset_before_disable; + int min_pe; }; struct cxl_native { diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index cb5d172fec1b..efe202f9cf72 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1321,6 +1321,7 @@ static const struct cxl_service_layer_ops xsl_ops = { .write_timebase_ctrl = write_timebase_ctrl_xsl, .timebase_read = timebase_read_xsl, .capi_mode = OPAL_PHB_CAPI_MODE_DMA, + .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */ }; static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) -- cgit v1.2.3 From b0b5e5918ad1babfd1d43d98c7281926a7b57b9f Mon Sep 17 00:00:00 2001 From: Andrew Donnellan Date: Thu, 14 Jul 2016 07:17:14 +1000 Subject: cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards Add a new API, cxl_check_and_switch_mode() to allow for switching of bi-modal CAPI cards, such as the Mellanox CX-4 network card. When a driver requests to switch a card to CAPI mode, use PCI hotplug infrastructure to remove all PCI devices underneath the slot. We then write an updated mode control register to the CAPI VSEC, hot reset the card, and reprobe the card. As the card may present a different set of PCI devices after the mode switch, use the infrastructure provided by the pnv_php driver and the OPAL PCI slot management facilities to ensure that: * the old devices are removed from both the OPAL and Linux device trees * the new devices are probed by OPAL and added to the OPAL device tree * the new devices are added to the Linux device tree and probed through the regular PCI device probe path As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on the pnv_php driver. Refactor existing code that touches the mode control register in the regular single mode case into a new function, setup_cxl_protocol_area(). Co-authored-by: Ian Munsie Cc: Gavin Shan Signed-off-by: Andrew Donnellan Signed-off-by: Ian Munsie Reviewed-by: Gavin Shan Signed-off-by: Michael Ellerman --- drivers/misc/cxl/Kconfig | 8 ++ drivers/misc/cxl/pci.c | 236 +++++++++++++++++++++++++++++++++++++++++++---- include/misc/cxl.h | 25 +++++ 3 files changed, 251 insertions(+), 18 deletions(-) (limited to 'drivers/misc/cxl/pci.c') diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig index 560412cd4c98..8d767705ec21 100644 --- a/drivers/misc/cxl/Kconfig +++ b/drivers/misc/cxl/Kconfig @@ -38,3 +38,11 @@ config CXL CAPI adapters are found in POWER8 based systems. If unsure, say N. + +config CXL_BIMODAL + bool "Support for bi-modal CAPI cards" + depends on HOTPLUG_PCI_POWERNV = y && CXL || HOTPLUG_PCI_POWERNV = m && CXL = m + default y + help + Select this option to enable support for bi-modal CAPI cards, such as + the Mellanox CX-4. diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index efe202f9cf72..d152e2de8c93 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -55,6 +55,8 @@ pci_read_config_byte(dev, vsec + 0xa, dest) #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ pci_write_config_byte(dev, vsec + 0xa, val) +#define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \ + pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val) #define CXL_VSEC_PROTOCOL_MASK 0xe0 #define CXL_VSEC_PROTOCOL_1024TB 0x80 #define CXL_VSEC_PROTOCOL_512TB 0x40 @@ -614,36 +616,234 @@ static int setup_cxl_bars(struct pci_dev *dev) return 0; } -/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */ -static int switch_card_to_cxl(struct pci_dev *dev) -{ +#ifdef CONFIG_CXL_BIMODAL + +struct cxl_switch_work { + struct pci_dev *dev; + struct work_struct work; int vsec; + int mode; +}; + +static void switch_card_to_cxl(struct work_struct *work) +{ + struct cxl_switch_work *switch_work = + container_of(work, struct cxl_switch_work, work); + struct pci_dev *dev = switch_work->dev; + struct pci_bus *bus = dev->bus; + struct pci_controller *hose = pci_bus_to_host(bus); + struct pci_dev *bridge; + struct pnv_php_slot *php_slot; + unsigned int devfn; u8 val; int rc; - dev_info(&dev->dev, "switch card to CXL\n"); + dev_info(&bus->dev, "cxl: Preparing for mode switch...\n"); + bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev, + bus_list); + if (!bridge) { + dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n"); + goto err_dev_put; + } - if (!(vsec = find_cxl_vsec(dev))) { - dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); + php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge)); + if (!php_slot) { + dev_err(&bus->dev, "cxl: Failed to find slot hotplug " + "information. You may need to upgrade " + "skiboot. Aborting.\n"); + goto err_dev_put; + } + + rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val); + if (rc) { + dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc); + goto err_dev_put; + } + devfn = dev->devfn; + + /* Release the reference obtained in cxl_check_and_switch_mode() */ + pci_dev_put(dev); + + dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n"); + pci_lock_rescan_remove(); + pci_hp_remove_devices(bridge->subordinate); + pci_unlock_rescan_remove(); + + /* Switch the CXL protocol on the card */ + if (switch_work->mode == CXL_BIMODE_CXL) { + dev_info(&bus->dev, "cxl: Switching card to CXL mode\n"); + val &= ~CXL_VSEC_PROTOCOL_MASK; + val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; + rc = pnv_cxl_enable_phb_kernel_api(hose, true); + if (rc) { + dev_err(&bus->dev, "cxl: Failed to enable kernel API" + " on real PHB, aborting\n"); + goto err_free_work; + } + } else { + dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n"); + goto err_free_work; + } + + rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val); + if (rc) { + dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc); + goto err_free_work; + } + + /* + * The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states + * we must wait 100ms after this mode switch before touching PCIe config + * space. + */ + msleep(100); + + /* + * Hot reset to cause the card to come back in cxl mode. A + * OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support + * in skiboot, so we use a hot reset instead. + * + * We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is + * guaranteed to sit directly under the root port, and setting the reset + * state on a device directly under the root port is equivalent to doing + * it on the root port iself. + */ + dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n"); + pci_set_pcie_reset_state(bridge, pcie_hot_reset); + pci_set_pcie_reset_state(bridge, pcie_deassert_reset); + + dev_dbg(&bus->dev, "cxl: Offlining slot\n"); + rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE); + if (rc) { + dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc); + goto err_free_work; + } + + dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n"); + rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE); + if (rc) { + dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc); + goto err_free_work; + } + + pci_lock_rescan_remove(); + pci_hp_add_devices(bridge->subordinate); + pci_unlock_rescan_remove(); + + dev_info(&bus->dev, "cxl: CAPI mode switch completed\n"); + kfree(switch_work); + return; + +err_dev_put: + /* Release the reference obtained in cxl_check_and_switch_mode() */ + pci_dev_put(dev); +err_free_work: + kfree(switch_work); +} + +int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec) +{ + struct cxl_switch_work *work; + u8 val; + int rc; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) return -ENODEV; + + if (!vsec) { + vsec = find_cxl_vsec(dev); + if (!vsec) { + dev_info(&dev->dev, "CXL VSEC not found\n"); + return -ENODEV; + } } - if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) { - dev_err(&dev->dev, "failed to read current mode control: %i", rc); + rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); + if (rc) { + dev_err(&dev->dev, "Failed to read current mode control: %i", rc); return rc; } - val &= ~CXL_VSEC_PROTOCOL_MASK; - val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; - if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) { - dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); - return rc; + + if (mode == CXL_BIMODE_PCI) { + if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { + dev_info(&dev->dev, "Card is already in PCI mode\n"); + return 0; + } + /* + * TODO: Before it's safe to switch the card back to PCI mode + * we need to disable the CAPP and make sure any cachelines the + * card holds have been flushed out. Needs skiboot support. + */ + dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n"); + return -EIO; } + + if (val & CXL_VSEC_PROTOCOL_ENABLE) { + dev_info(&dev->dev, "Card is already in CXL mode\n"); + return 0; + } + + dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread " + "to switch to CXL mode\n"); + + work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL); + if (!work) + return -ENOMEM; + + pci_dev_get(dev); + work->dev = dev; + work->vsec = vsec; + work->mode = mode; + INIT_WORK(&work->work, switch_card_to_cxl); + + schedule_work(&work->work); + /* - * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states - * we must wait 100ms after this mode switch before touching - * PCIe config space. + * We return a failure now to abort the driver init. Once the + * link has been cycled and the card is in cxl mode we will + * come back (possibly using the generic cxl driver), but + * return success as the card should then be in cxl mode. + * + * TODO: What if the card comes back in PCI mode even after + * the switch? Don't want to spin endlessly. */ - msleep(100); + return -EBUSY; +} +EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode); + +#endif /* CONFIG_CXL_BIMODAL */ + +static int setup_cxl_protocol_area(struct pci_dev *dev) +{ + u8 val; + int rc; + int vsec = find_cxl_vsec(dev); + + if (!vsec) { + dev_info(&dev->dev, "CXL VSEC not found\n"); + return -ENODEV; + } + + rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); + if (rc) { + dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc); + return rc; + } + + if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { + dev_err(&dev->dev, "Card not in CAPI mode!\n"); + return -EIO; + } + + if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) { + val &= ~CXL_VSEC_PROTOCOL_MASK; + val |= CXL_VSEC_PROTOCOL_256TB; + rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val); + if (rc) { + dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc); + return rc; + } + } return 0; } @@ -1249,7 +1449,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) if ((rc = setup_cxl_bars(dev))) return rc; - if ((rc = switch_card_to_cxl(dev))) + if ((rc = setup_cxl_protocol_area(dev))) return rc; if ((rc = cxl_update_image_control(adapter))) diff --git a/include/misc/cxl.h b/include/misc/cxl.h index 6c52cbcdfd79..480d50a0b8ba 100644 --- a/include/misc/cxl.h +++ b/include/misc/cxl.h @@ -39,6 +39,31 @@ bool cxl_slot_is_supported(struct pci_dev *dev, int flags); +#define CXL_BIMODE_CXL 1 +#define CXL_BIMODE_PCI 2 + +/* + * Check the mode that the given bi-modal CXL adapter is currently in and + * change it if necessary. This does not apply to AFU drivers. + * + * If the mode matches the requested mode this function will return 0 - if the + * driver was expecting the generic CXL driver to have bound to the adapter and + * it gets this return value it should fail the probe function to give the CXL + * driver a chance to probe it. + * + * If the mode does not match it will start a background task to unplug the + * device from Linux and switch its mode, and will return -EBUSY. At this + * point the calling driver should make sure it has released the device and + * fail its probe function. + * + * The offset of the CXL VSEC can be provided to this function. If 0 is passed, + * this function will search for a CXL VSEC with ID 0x1280 and return -ENODEV + * if it is not found. + */ +#ifdef CONFIG_CXL_BIMODAL +int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec); +#endif + /* Get the AFU associated with a pci_dev */ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev); -- cgit v1.2.3