From 97c2b5cba2044f1c0bc3f14d7102176dbcf81af0 Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Mon, 21 May 2018 10:59:54 +0100 Subject: mfd: madera: Add register definitions for Cirrus Logic Madera codecs This patch adds a header file of register definitions for Cirrus Logic "Madera" class codecs. These codecs are all based off a common set of hardware IP so have a common register map (with a few minor device-to-device variations). The registers.h file is tool-generated directly from the hardware design but has been manually stripped down to reduce size (full register map is >44000 lines). All names are kept the same as datasheet names so that they can be cross-referenced between source and datasheet without confusion. The register map layout is kept fully-defined rather than factored into macros and/or block-indexing code. The major reasons for this are: - #1 is that it makes the source highly greppable, which is important. "What does the driver do with register bits XYZ" or "Where does it use register bits XYZ" are commonly types of questions. These can be quickly answered by a grep. Squashing definitions into generator macros or block- indexing code is a way of defeating grep. - most of the register definitions are used in tables, so a constant value is required. Using generator macros make the table definition clunky and obscure. - the code is clearer when it's there in the source exactly what register and field it is using - it is easier to diff the register map of a new (unsupported) codec against what is already supported and merge in differences - it makes the register map available in source for maintenance/debugging instead of having to refer back to the datasheet for a register map Signed-off-by: Richard Fitzgerald Signed-off-by: Lee Jones --- MAINTAINERS | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9c125f705f78..6e5bb62acea2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3479,6 +3479,16 @@ M: Dave Goodell S: Supported F: drivers/infiniband/hw/usnic/ +CIRRUS LOGIC MADERA CODEC DRIVERS +M: Charles Keepax +M: Richard Fitzgerald +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +L: patches@opensource.cirrus.com +T: git https://github.com/CirrusLogic/linux-drivers.git +W: https://github.com/CirrusLogic/linux-drivers/wiki +S: Supported +F: include/linux/mfd/madera/* + CLEANCACHE API M: Konrad Rzeszutek Wilk L: linux-kernel@vger.kernel.org -- cgit v1.2.3 From 16b27467f46c1e0dbf093f53971aeb5decbaff4e Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Mon, 21 May 2018 10:59:56 +0100 Subject: mfd: madera: Add common support for Cirrus Logic Madera codecs This adds the generic core support for Cirrus Logic "Madera" class codecs. These are complex audio codec SoCs with a variety of digital and analogue I/O, onboard audio processing and DSPs, and other features. These codecs are all based off a common set of hardware IP so can be supported by a core of common code (with a few minor device-to-device variations). Signed-off-by: Charles Keepax Signed-off-by: Nikesh Oswal Signed-off-by: Richard Fitzgerald Signed-off-by: Lee Jones --- MAINTAINERS | 3 + drivers/mfd/Kconfig | 29 ++ drivers/mfd/Makefile | 5 + drivers/mfd/madera-core.c | 609 +++++++++++++++++++++++++++++++++++++++ drivers/mfd/madera-i2c.c | 140 +++++++++ drivers/mfd/madera-spi.c | 139 +++++++++ drivers/mfd/madera.h | 44 +++ include/linux/mfd/madera/core.h | 187 ++++++++++++ include/linux/mfd/madera/pdata.h | 59 ++++ 9 files changed, 1215 insertions(+) create mode 100644 drivers/mfd/madera-core.c create mode 100644 drivers/mfd/madera-i2c.c create mode 100644 drivers/mfd/madera-spi.c create mode 100644 drivers/mfd/madera.h create mode 100644 include/linux/mfd/madera/core.h create mode 100644 include/linux/mfd/madera/pdata.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 6e5bb62acea2..7b8e857d6b33 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3487,7 +3487,10 @@ L: patches@opensource.cirrus.com T: git https://github.com/CirrusLogic/linux-drivers.git W: https://github.com/CirrusLogic/linux-drivers/wiki S: Supported +F: Documentation/devicetree/bindings/mfd/madera.txt F: include/linux/mfd/madera/* +F: drivers/mfd/madera* +F: drivers/mfd/cs47l* CLEANCACHE API M: Konrad Rzeszutek Wilk diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index b860eb5aa194..f5ca392f8bc2 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -232,6 +232,35 @@ config MFD_CROS_EC_CHARDEV If you have a supported Chromebook, choose Y or M here. The module will be called cros_ec_dev. +config MFD_MADERA + tristate "Cirrus Logic Madera codecs" + select MFD_CORE + select REGMAP + select REGMAP_IRQ + select MADERA_IRQ + select PINCTRL + select PINCTRL_MADERA + help + Support for the Cirrus Logic Madera platform audio codecs + +config MFD_MADERA_I2C + tristate "Cirrus Logic Madera codecs with I2C" + depends on MFD_MADERA + depends on I2C + select REGMAP_I2C + help + Support for the Cirrus Logic Madera platform audio SoC + core functionality controlled via I2C. + +config MFD_MADERA_SPI + tristate "Cirrus Logic Madera codecs with SPI" + depends on MFD_MADERA + depends on SPI_MASTER + select REGMAP_SPI + help + Support for the Cirrus Logic Madera platform audio SoC + core functionality controlled via SPI. + config MFD_ASIC3 bool "Compaq ASIC3" depends on GPIOLIB && ARM diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index d9d2cf0d32ef..0a89a6a6d793 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -73,6 +73,11 @@ wm8994-objs := wm8994-core.o wm8994-irq.o wm8994-regmap.o obj-$(CONFIG_MFD_WM8994) += wm8994.o obj-$(CONFIG_MFD_WM97xx) += wm97xx-core.o +madera-objs := madera-core.o +obj-$(CONFIG_MFD_MADERA) += madera.o +obj-$(CONFIG_MFD_MADERA_I2C) += madera-i2c.o +obj-$(CONFIG_MFD_MADERA_SPI) += madera-spi.o + obj-$(CONFIG_TPS6105X) += tps6105x.o obj-$(CONFIG_TPS65010) += tps65010.o obj-$(CONFIG_TPS6507X) += tps6507x.o diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c new file mode 100644 index 000000000000..8cfea969b060 --- /dev/null +++ b/drivers/mfd/madera-core.c @@ -0,0 +1,609 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Core MFD support for Cirrus Logic Madera codecs + * + * Copyright (C) 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "madera.h" + +#define CS47L35_SILICON_ID 0x6360 +#define CS47L85_SILICON_ID 0x6338 +#define CS47L90_SILICON_ID 0x6364 + +#define MADERA_32KZ_MCLK2 1 + +static const char * const madera_core_supplies[] = { + "AVDD", + "DBVDD1", +}; + +static const struct mfd_cell madera_ldo1_devs[] = { + { .name = "madera-ldo1" }, +}; + +static const char * const cs47l35_supplies[] = { + "MICVDD", + "DBVDD2", + "CPVDD1", + "CPVDD2", + "SPKVDD", +}; + +static const struct mfd_cell cs47l35_devs[] = { + { .name = "madera-pinctrl", }, + { .name = "madera-irq", }, + { .name = "madera-micsupp", }, + { .name = "madera-gpio", }, + { .name = "madera-extcon", }, + { + .name = "cs47l35-codec", + .parent_supplies = cs47l35_supplies, + .num_parent_supplies = ARRAY_SIZE(cs47l35_supplies), + }, +}; + +static const char * const cs47l85_supplies[] = { + "MICVDD", + "DBVDD2", + "DBVDD3", + "DBVDD4", + "CPVDD1", + "CPVDD2", + "SPKVDDL", + "SPKVDDR", +}; + +static const struct mfd_cell cs47l85_devs[] = { + { .name = "madera-pinctrl", }, + { .name = "madera-irq", }, + { .name = "madera-micsupp" }, + { .name = "madera-gpio", }, + { .name = "madera-extcon", }, + { + .name = "cs47l85-codec", + .parent_supplies = cs47l85_supplies, + .num_parent_supplies = ARRAY_SIZE(cs47l85_supplies), + }, +}; + +static const char * const cs47l90_supplies[] = { + "MICVDD", + "DBVDD2", + "DBVDD3", + "DBVDD4", + "CPVDD1", + "CPVDD2", +}; + +static const struct mfd_cell cs47l90_devs[] = { + { .name = "madera-pinctrl", }, + { .name = "madera-irq", }, + { .name = "madera-micsupp", }, + { .name = "madera-gpio", }, + { .name = "madera-extcon", }, + { + .name = "cs47l90-codec", + .parent_supplies = cs47l90_supplies, + .num_parent_supplies = ARRAY_SIZE(cs47l90_supplies), + }, +}; + +/* Used by madera-i2c and madera-spi drivers */ +const char *madera_name_from_type(enum madera_type type) +{ + switch (type) { + case CS47L35: + return "CS47L35"; + case CS47L85: + return "CS47L85"; + case CS47L90: + return "CS47L90"; + case CS47L91: + return "CS47L91"; + case WM1840: + return "WM1840"; + default: + return "Unknown"; + } +} +EXPORT_SYMBOL_GPL(madera_name_from_type); + +#define MADERA_BOOT_POLL_MAX_INTERVAL_US 5000 +#define MADERA_BOOT_POLL_TIMEOUT_US 25000 + +static int madera_wait_for_boot(struct madera *madera) +{ + unsigned int val; + int ret; + + /* + * We can't use an interrupt as we need to runtime resume to do so, + * so we poll the status bit. This won't race with the interrupt + * handler because it will be blocked on runtime resume. + */ + ret = regmap_read_poll_timeout(madera->regmap, + MADERA_IRQ1_RAW_STATUS_1, + val, + (val & MADERA_BOOT_DONE_STS1), + MADERA_BOOT_POLL_MAX_INTERVAL_US, + MADERA_BOOT_POLL_TIMEOUT_US); + + if (ret) + dev_err(madera->dev, "Polling BOOT_DONE_STS failed: %d\n", ret); + + /* + * BOOT_DONE defaults to unmasked on boot so we must ack it. + * Do this unconditionally to avoid interrupt storms. + */ + regmap_write(madera->regmap, MADERA_IRQ1_STATUS_1, + MADERA_BOOT_DONE_EINT1); + + pm_runtime_mark_last_busy(madera->dev); + + return ret; +} + +static int madera_soft_reset(struct madera *madera) +{ + int ret; + + ret = regmap_write(madera->regmap, MADERA_SOFTWARE_RESET, 0); + if (ret != 0) { + dev_err(madera->dev, "Failed to soft reset device: %d\n", ret); + return ret; + } + + /* Allow time for internal clocks to startup after reset */ + usleep_range(1000, 2000); + + return 0; +} + +static void madera_enable_hard_reset(struct madera *madera) +{ + if (!madera->pdata.reset) + return; + + /* + * There are many existing out-of-tree users of these codecs that we + * can't break so preserve the expected behaviour of setting the line + * low to assert reset. + */ + gpiod_set_raw_value_cansleep(madera->pdata.reset, 0); +} + +static void madera_disable_hard_reset(struct madera *madera) +{ + if (!madera->pdata.reset) + return; + + gpiod_set_raw_value_cansleep(madera->pdata.reset, 1); + usleep_range(1000, 2000); +} + +static int __maybe_unused madera_runtime_resume(struct device *dev) +{ + struct madera *madera = dev_get_drvdata(dev); + int ret; + + dev_dbg(dev, "Leaving sleep mode\n"); + + ret = regulator_enable(madera->dcvdd); + if (ret) { + dev_err(dev, "Failed to enable DCVDD: %d\n", ret); + return ret; + } + + regcache_cache_only(madera->regmap, false); + regcache_cache_only(madera->regmap_32bit, false); + + ret = madera_wait_for_boot(madera); + if (ret) + goto err; + + ret = regcache_sync(madera->regmap); + if (ret) { + dev_err(dev, "Failed to restore 16-bit register cache\n"); + goto err; + } + + ret = regcache_sync(madera->regmap_32bit); + if (ret) { + dev_err(dev, "Failed to restore 32-bit register cache\n"); + goto err; + } + + return 0; + +err: + regcache_cache_only(madera->regmap_32bit, true); + regcache_cache_only(madera->regmap, true); + regulator_disable(madera->dcvdd); + + return ret; +} + +static int __maybe_unused madera_runtime_suspend(struct device *dev) +{ + struct madera *madera = dev_get_drvdata(dev); + + dev_dbg(madera->dev, "Entering sleep mode\n"); + + regcache_cache_only(madera->regmap, true); + regcache_mark_dirty(madera->regmap); + regcache_cache_only(madera->regmap_32bit, true); + regcache_mark_dirty(madera->regmap_32bit); + + regulator_disable(madera->dcvdd); + + return 0; +} + +const struct dev_pm_ops madera_pm_ops = { + SET_RUNTIME_PM_OPS(madera_runtime_suspend, + madera_runtime_resume, + NULL) +}; +EXPORT_SYMBOL_GPL(madera_pm_ops); + +const struct of_device_id madera_of_match[] = { + { .compatible = "cirrus,cs47l35", .data = (void *)CS47L35 }, + { .compatible = "cirrus,cs47l85", .data = (void *)CS47L85 }, + { .compatible = "cirrus,cs47l90", .data = (void *)CS47L90 }, + { .compatible = "cirrus,cs47l91", .data = (void *)CS47L91 }, + { .compatible = "cirrus,wm1840", .data = (void *)WM1840 }, + {} +}; +EXPORT_SYMBOL_GPL(madera_of_match); + +static int madera_get_reset_gpio(struct madera *madera) +{ + struct gpio_desc *reset; + int ret; + + if (madera->pdata.reset) + return 0; + + reset = devm_gpiod_get_optional(madera->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(reset)) { + ret = PTR_ERR(reset); + if (ret != -EPROBE_DEFER) + dev_err(madera->dev, "Failed to request /RESET: %d\n", + ret); + return ret; + } + + /* + * A hard reset is needed for full reset of the chip. We allow running + * without hard reset only because it can be useful for early + * prototyping and some debugging, but we need to warn it's not ideal. + */ + if (!reset) + dev_warn(madera->dev, + "Running without reset GPIO is not recommended\n"); + + madera->pdata.reset = reset; + + return 0; +} + +static void madera_set_micbias_info(struct madera *madera) +{ + /* + * num_childbias is an array because future codecs can have different + * childbiases for each micbias. Unspecified values default to 0. + */ + switch (madera->type) { + case CS47L35: + madera->num_micbias = 2; + madera->num_childbias[0] = 2; + madera->num_childbias[1] = 2; + return; + case CS47L85: + case WM1840: + madera->num_micbias = 4; + /* no child biases */ + return; + case CS47L90: + case CS47L91: + madera->num_micbias = 2; + madera->num_childbias[0] = 4; + madera->num_childbias[1] = 4; + return; + default: + return; + } +} + +int madera_dev_init(struct madera *madera) +{ + struct device *dev = madera->dev; + unsigned int hwid; + int (*patch_fn)(struct madera *) = NULL; + const struct mfd_cell *mfd_devs; + int n_devs = 0; + int i, ret; + + dev_set_drvdata(madera->dev, madera); + BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier); + madera_set_micbias_info(madera); + + /* + * We need writable hw config info that all children can share. + * Simplest to take one shared copy of pdata struct. + */ + if (dev_get_platdata(madera->dev)) { + memcpy(&madera->pdata, dev_get_platdata(madera->dev), + sizeof(madera->pdata)); + } + + ret = madera_get_reset_gpio(madera); + if (ret) + return ret; + + regcache_cache_only(madera->regmap, true); + regcache_cache_only(madera->regmap_32bit, true); + + for (i = 0; i < ARRAY_SIZE(madera_core_supplies); i++) + madera->core_supplies[i].supply = madera_core_supplies[i]; + + madera->num_core_supplies = ARRAY_SIZE(madera_core_supplies); + + /* + * On some codecs DCVDD could be supplied by the internal LDO1. + * For those we must add the LDO1 driver before requesting DCVDD + * No devm_ because we need to control shutdown order of children. + */ + switch (madera->type) { + case CS47L35: + case CS47L90: + case CS47L91: + break; + case CS47L85: + case WM1840: + ret = mfd_add_devices(madera->dev, PLATFORM_DEVID_NONE, + madera_ldo1_devs, + ARRAY_SIZE(madera_ldo1_devs), + NULL, 0, NULL); + if (ret) { + dev_err(dev, "Failed to add LDO1 child: %d\n", ret); + return ret; + } + break; + default: + /* No point continuing if the type is unknown */ + dev_err(madera->dev, "Unknown device type %d\n", madera->type); + return -ENODEV; + } + + ret = devm_regulator_bulk_get(dev, madera->num_core_supplies, + madera->core_supplies); + if (ret) { + dev_err(dev, "Failed to request core supplies: %d\n", ret); + goto err_devs; + } + + /* + * Don't use devres here. If the regulator is one of our children it + * will already have been removed before devres cleanup on this mfd + * driver tries to call put() on it. We need control of shutdown order. + */ + madera->dcvdd = regulator_get(madera->dev, "DCVDD"); + if (IS_ERR(madera->dcvdd)) { + ret = PTR_ERR(madera->dcvdd); + dev_err(dev, "Failed to request DCVDD: %d\n", ret); + goto err_devs; + } + + ret = regulator_bulk_enable(madera->num_core_supplies, + madera->core_supplies); + if (ret) { + dev_err(dev, "Failed to enable core supplies: %d\n", ret); + goto err_dcvdd; + } + + ret = regulator_enable(madera->dcvdd); + if (ret) { + dev_err(dev, "Failed to enable DCVDD: %d\n", ret); + goto err_enable; + } + + madera_disable_hard_reset(madera); + + regcache_cache_only(madera->regmap, false); + regcache_cache_only(madera->regmap_32bit, false); + + /* + * Now we can power up and verify that this is a chip we know about + * before we start doing any writes to its registers. + */ + ret = regmap_read(madera->regmap, MADERA_SOFTWARE_RESET, &hwid); + if (ret) { + dev_err(dev, "Failed to read ID register: %d\n", ret); + goto err_reset; + } + + switch (hwid) { + case CS47L35_SILICON_ID: + if (IS_ENABLED(CONFIG_MFD_CS47L35)) { + switch (madera->type) { + case CS47L35: + patch_fn = cs47l35_patch; + mfd_devs = cs47l35_devs; + n_devs = ARRAY_SIZE(cs47l35_devs); + break; + default: + break; + } + } + break; + case CS47L85_SILICON_ID: + if (IS_ENABLED(CONFIG_MFD_CS47L85)) { + switch (madera->type) { + case CS47L85: + case WM1840: + patch_fn = cs47l85_patch; + mfd_devs = cs47l85_devs; + n_devs = ARRAY_SIZE(cs47l85_devs); + break; + default: + break; + } + } + break; + case CS47L90_SILICON_ID: + if (IS_ENABLED(CONFIG_MFD_CS47L90)) { + switch (madera->type) { + case CS47L90: + case CS47L91: + patch_fn = cs47l90_patch; + mfd_devs = cs47l90_devs; + n_devs = ARRAY_SIZE(cs47l90_devs); + break; + default: + break; + } + } + break; + default: + dev_err(madera->dev, "Unknown device ID: %x\n", hwid); + ret = -EINVAL; + goto err_reset; + } + + if (!n_devs) { + dev_err(madera->dev, "Device ID 0x%x not a %s\n", hwid, + madera->type_name); + ret = -ENODEV; + goto err_reset; + } + + /* + * It looks like a device we support. If we don't have a hard reset + * we can now attempt a soft reset. + */ + if (!madera->pdata.reset) { + ret = madera_soft_reset(madera); + if (ret) + goto err_reset; + } + + ret = madera_wait_for_boot(madera); + if (ret) { + dev_err(madera->dev, "Device failed initial boot: %d\n", ret); + goto err_reset; + } + + ret = regmap_read(madera->regmap, MADERA_HARDWARE_REVISION, + &madera->rev); + if (ret) { + dev_err(dev, "Failed to read revision register: %d\n", ret); + goto err_reset; + } + madera->rev &= MADERA_HW_REVISION_MASK; + + dev_info(dev, "%s silicon revision %d\n", madera->type_name, + madera->rev); + + /* Apply hardware patch */ + if (patch_fn) { + ret = patch_fn(madera); + if (ret) { + dev_err(madera->dev, "Failed to apply patch %d\n", ret); + goto err_reset; + } + } + + /* Init 32k clock sourced from MCLK2 */ + ret = regmap_update_bits(madera->regmap, + MADERA_CLOCK_32K_1, + MADERA_CLK_32K_ENA_MASK | MADERA_CLK_32K_SRC_MASK, + MADERA_CLK_32K_ENA | MADERA_32KZ_MCLK2); + if (ret) { + dev_err(madera->dev, "Failed to init 32k clock: %d\n", ret); + goto err_reset; + } + + pm_runtime_set_active(madera->dev); + pm_runtime_enable(madera->dev); + pm_runtime_set_autosuspend_delay(madera->dev, 100); + pm_runtime_use_autosuspend(madera->dev); + + /* No devm_ because we need to control shutdown order of children */ + ret = mfd_add_devices(madera->dev, PLATFORM_DEVID_NONE, + mfd_devs, n_devs, + NULL, 0, NULL); + if (ret) { + dev_err(madera->dev, "Failed to add subdevices: %d\n", ret); + goto err_pm_runtime; + } + + return 0; + +err_pm_runtime: + pm_runtime_disable(madera->dev); +err_reset: + madera_enable_hard_reset(madera); + regulator_disable(madera->dcvdd); +err_enable: + regulator_bulk_disable(madera->num_core_supplies, + madera->core_supplies); +err_dcvdd: + regulator_put(madera->dcvdd); +err_devs: + mfd_remove_devices(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(madera_dev_init); + +int madera_dev_exit(struct madera *madera) +{ + /* Prevent any IRQs being serviced while we clean up */ + disable_irq(madera->irq); + + /* + * DCVDD could be supplied by a child node, we must disable it before + * removing the children, and prevent PM runtime from turning it back on + */ + pm_runtime_disable(madera->dev); + + regulator_disable(madera->dcvdd); + regulator_put(madera->dcvdd); + + mfd_remove_devices(madera->dev); + madera_enable_hard_reset(madera); + + regulator_bulk_disable(madera->num_core_supplies, + madera->core_supplies); + return 0; +} +EXPORT_SYMBOL_GPL(madera_dev_exit); + +MODULE_DESCRIPTION("Madera core MFD driver"); +MODULE_AUTHOR("Richard Fitzgerald "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/madera-i2c.c b/drivers/mfd/madera-i2c.c new file mode 100644 index 000000000000..05ae94be01d8 --- /dev/null +++ b/drivers/mfd/madera-i2c.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * I2C bus interface to Cirrus Logic Madera codecs + * + * Copyright (C) 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "madera.h" + +static int madera_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct madera *madera; + const struct regmap_config *regmap_16bit_config = NULL; + const struct regmap_config *regmap_32bit_config = NULL; + const void *of_data; + unsigned long type; + const char *name; + int ret; + + of_data = of_device_get_match_data(&i2c->dev); + if (of_data) + type = (unsigned long)of_data; + else + type = id->driver_data; + + switch (type) { + case CS47L35: + if (IS_ENABLED(CONFIG_MFD_CS47L35)) { + regmap_16bit_config = &cs47l35_16bit_i2c_regmap; + regmap_32bit_config = &cs47l35_32bit_i2c_regmap; + } + break; + case CS47L85: + case WM1840: + if (IS_ENABLED(CONFIG_MFD_CS47L85)) { + regmap_16bit_config = &cs47l85_16bit_i2c_regmap; + regmap_32bit_config = &cs47l85_32bit_i2c_regmap; + } + break; + case CS47L90: + case CS47L91: + if (IS_ENABLED(CONFIG_MFD_CS47L90)) { + regmap_16bit_config = &cs47l90_16bit_i2c_regmap; + regmap_32bit_config = &cs47l90_32bit_i2c_regmap; + } + break; + default: + dev_err(&i2c->dev, + "Unknown Madera I2C device type %ld\n", type); + return -EINVAL; + } + + name = madera_name_from_type(type); + + if (!regmap_16bit_config) { + /* it's polite to say which codec isn't built into the kernel */ + dev_err(&i2c->dev, + "Kernel does not include support for %s\n", name); + return -EINVAL; + } + + madera = devm_kzalloc(&i2c->dev, sizeof(*madera), GFP_KERNEL); + if (!madera) + return -ENOMEM; + + + madera->regmap = devm_regmap_init_i2c(i2c, regmap_16bit_config); + if (IS_ERR(madera->regmap)) { + ret = PTR_ERR(madera->regmap); + dev_err(&i2c->dev, + "Failed to allocate 16-bit register map: %d\n", ret); + return ret; + } + + madera->regmap_32bit = devm_regmap_init_i2c(i2c, regmap_32bit_config); + if (IS_ERR(madera->regmap_32bit)) { + ret = PTR_ERR(madera->regmap_32bit); + dev_err(&i2c->dev, + "Failed to allocate 32-bit register map: %d\n", ret); + return ret; + } + + madera->type = type; + madera->type_name = name; + madera->dev = &i2c->dev; + madera->irq = i2c->irq; + + return madera_dev_init(madera); +} + +static int madera_i2c_remove(struct i2c_client *i2c) +{ + struct madera *madera = dev_get_drvdata(&i2c->dev); + + madera_dev_exit(madera); + + return 0; +} + +static const struct i2c_device_id madera_i2c_id[] = { + { "cs47l35", CS47L35 }, + { "cs47l85", CS47L85 }, + { "cs47l90", CS47L90 }, + { "cs47l91", CS47L91 }, + { "wm1840", WM1840 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, madera_i2c_id); + +static struct i2c_driver madera_i2c_driver = { + .driver = { + .name = "madera", + .pm = &madera_pm_ops, + .of_match_table = of_match_ptr(madera_of_match), + }, + .probe = madera_i2c_probe, + .remove = madera_i2c_remove, + .id_table = madera_i2c_id, +}; + +module_i2c_driver(madera_i2c_driver); + +MODULE_DESCRIPTION("Madera I2C bus interface"); +MODULE_AUTHOR("Richard Fitzgerald "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/madera-spi.c b/drivers/mfd/madera-spi.c new file mode 100644 index 000000000000..4c398b278bba --- /dev/null +++ b/drivers/mfd/madera-spi.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SPI bus interface to Cirrus Logic Madera codecs + * + * Copyright (C) 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "madera.h" + +static int madera_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *id = spi_get_device_id(spi); + struct madera *madera; + const struct regmap_config *regmap_16bit_config = NULL; + const struct regmap_config *regmap_32bit_config = NULL; + const void *of_data; + unsigned long type; + const char *name; + int ret; + + of_data = of_device_get_match_data(&spi->dev); + if (of_data) + type = (unsigned long)of_data; + else + type = id->driver_data; + + switch (type) { + case CS47L35: + if (IS_ENABLED(CONFIG_MFD_CS47L35)) { + regmap_16bit_config = &cs47l35_16bit_spi_regmap; + regmap_32bit_config = &cs47l35_32bit_spi_regmap; + } + break; + case CS47L85: + case WM1840: + if (IS_ENABLED(CONFIG_MFD_CS47L85)) { + regmap_16bit_config = &cs47l85_16bit_spi_regmap; + regmap_32bit_config = &cs47l85_32bit_spi_regmap; + } + break; + case CS47L90: + case CS47L91: + if (IS_ENABLED(CONFIG_MFD_CS47L90)) { + regmap_16bit_config = &cs47l90_16bit_spi_regmap; + regmap_32bit_config = &cs47l90_32bit_spi_regmap; + } + break; + default: + dev_err(&spi->dev, + "Unknown Madera SPI device type %ld\n", type); + return -EINVAL; + } + + name = madera_name_from_type(type); + + if (!regmap_16bit_config) { + /* it's polite to say which codec isn't built into the kernel */ + dev_err(&spi->dev, + "Kernel does not include support for %s\n", name); + return -EINVAL; + } + + madera = devm_kzalloc(&spi->dev, sizeof(*madera), GFP_KERNEL); + if (!madera) + return -ENOMEM; + + madera->regmap = devm_regmap_init_spi(spi, regmap_16bit_config); + if (IS_ERR(madera->regmap)) { + ret = PTR_ERR(madera->regmap); + dev_err(&spi->dev, + "Failed to allocate 16-bit register map: %d\n", ret); + return ret; + } + + madera->regmap_32bit = devm_regmap_init_spi(spi, regmap_32bit_config); + if (IS_ERR(madera->regmap_32bit)) { + ret = PTR_ERR(madera->regmap_32bit); + dev_err(&spi->dev, + "Failed to allocate 32-bit register map: %d\n", ret); + return ret; + } + + madera->type = type; + madera->type_name = name; + madera->dev = &spi->dev; + madera->irq = spi->irq; + + return madera_dev_init(madera); +} + +static int madera_spi_remove(struct spi_device *spi) +{ + struct madera *madera = spi_get_drvdata(spi); + + madera_dev_exit(madera); + + return 0; +} + +static const struct spi_device_id madera_spi_ids[] = { + { "cs47l35", CS47L35 }, + { "cs47l85", CS47L85 }, + { "cs47l90", CS47L90 }, + { "cs47l91", CS47L91 }, + { "wm1840", WM1840 }, + { } +}; +MODULE_DEVICE_TABLE(spi, madera_spi_ids); + +static struct spi_driver madera_spi_driver = { + .driver = { + .name = "madera", + .pm = &madera_pm_ops, + .of_match_table = of_match_ptr(madera_of_match), + }, + .probe = madera_spi_probe, + .remove = madera_spi_remove, + .id_table = madera_spi_ids, +}; + +module_spi_driver(madera_spi_driver); + +MODULE_DESCRIPTION("Madera SPI bus interface"); +MODULE_AUTHOR("Richard Fitzgerald "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/madera.h b/drivers/mfd/madera.h new file mode 100644 index 000000000000..891b84efb9a7 --- /dev/null +++ b/drivers/mfd/madera.h @@ -0,0 +1,44 @@ +/* + * MFD internals for Cirrus Logic Madera codecs + * + * Copyright 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MADERA_MFD_H +#define MADERA_MFD_H + +#include +#include + +struct madera; + +extern const struct dev_pm_ops madera_pm_ops; +extern const struct of_device_id madera_of_match[]; + +int madera_dev_init(struct madera *madera); +int madera_dev_exit(struct madera *madera); + +const char *madera_name_from_type(enum madera_type type); + +extern const struct regmap_config cs47l35_16bit_spi_regmap; +extern const struct regmap_config cs47l35_32bit_spi_regmap; +extern const struct regmap_config cs47l35_16bit_i2c_regmap; +extern const struct regmap_config cs47l35_32bit_i2c_regmap; +int cs47l35_patch(struct madera *madera); + +extern const struct regmap_config cs47l85_16bit_spi_regmap; +extern const struct regmap_config cs47l85_32bit_spi_regmap; +extern const struct regmap_config cs47l85_16bit_i2c_regmap; +extern const struct regmap_config cs47l85_32bit_i2c_regmap; +int cs47l85_patch(struct madera *madera); + +extern const struct regmap_config cs47l90_16bit_spi_regmap; +extern const struct regmap_config cs47l90_32bit_spi_regmap; +extern const struct regmap_config cs47l90_16bit_i2c_regmap; +extern const struct regmap_config cs47l90_32bit_i2c_regmap; +int cs47l90_patch(struct madera *madera); +#endif diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h new file mode 100644 index 000000000000..c332681848ef --- /dev/null +++ b/include/linux/mfd/madera/core.h @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MFD internals for Cirrus Logic Madera codecs + * + * Copyright (C) 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#ifndef MADERA_CORE_H +#define MADERA_CORE_H + +#include +#include +#include +#include +#include +#include + +enum madera_type { + /* 0 is reserved for indicating failure to identify */ + CS47L35 = 1, + CS47L85 = 2, + CS47L90 = 3, + CS47L91 = 4, + WM1840 = 7, +}; + +#define MADERA_MAX_CORE_SUPPLIES 2 +#define MADERA_MAX_GPIOS 40 + +#define CS47L35_NUM_GPIOS 16 +#define CS47L85_NUM_GPIOS 40 +#define CS47L90_NUM_GPIOS 38 + +#define MADERA_MAX_MICBIAS 4 + +/* Notifier events */ +#define MADERA_NOTIFY_VOICE_TRIGGER 0x1 +#define MADERA_NOTIFY_HPDET 0x2 +#define MADERA_NOTIFY_MICDET 0x4 + +/* GPIO Function Definitions */ +#define MADERA_GP_FN_ALTERNATE 0x00 +#define MADERA_GP_FN_GPIO 0x01 +#define MADERA_GP_FN_DSP_GPIO 0x02 +#define MADERA_GP_FN_IRQ1 0x03 +#define MADERA_GP_FN_IRQ2 0x04 +#define MADERA_GP_FN_FLL1_CLOCK 0x10 +#define MADERA_GP_FN_FLL2_CLOCK 0x11 +#define MADERA_GP_FN_FLL3_CLOCK 0x12 +#define MADERA_GP_FN_FLLAO_CLOCK 0x13 +#define MADERA_GP_FN_FLL1_LOCK 0x18 +#define MADERA_GP_FN_FLL2_LOCK 0x19 +#define MADERA_GP_FN_FLL3_LOCK 0x1A +#define MADERA_GP_FN_FLLAO_LOCK 0x1B +#define MADERA_GP_FN_OPCLK_OUT 0x40 +#define MADERA_GP_FN_OPCLK_ASYNC_OUT 0x41 +#define MADERA_GP_FN_PWM1 0x48 +#define MADERA_GP_FN_PWM2 0x49 +#define MADERA_GP_FN_SPDIF_OUT 0x4C +#define MADERA_GP_FN_HEADPHONE_DET 0x50 +#define MADERA_GP_FN_MIC_DET 0x58 +#define MADERA_GP_FN_DRC1_SIGNAL_DETECT 0x80 +#define MADERA_GP_FN_DRC2_SIGNAL_DETECT 0x81 +#define MADERA_GP_FN_ASRC1_IN1_LOCK 0x88 +#define MADERA_GP_FN_ASRC1_IN2_LOCK 0x89 +#define MADERA_GP_FN_ASRC2_IN1_LOCK 0x8A +#define MADERA_GP_FN_ASRC2_IN2_LOCK 0x8B +#define MADERA_GP_FN_DSP_IRQ1 0xA0 +#define MADERA_GP_FN_DSP_IRQ2 0xA1 +#define MADERA_GP_FN_DSP_IRQ3 0xA2 +#define MADERA_GP_FN_DSP_IRQ4 0xA3 +#define MADERA_GP_FN_DSP_IRQ5 0xA4 +#define MADERA_GP_FN_DSP_IRQ6 0xA5 +#define MADERA_GP_FN_DSP_IRQ7 0xA6 +#define MADERA_GP_FN_DSP_IRQ8 0xA7 +#define MADERA_GP_FN_DSP_IRQ9 0xA8 +#define MADERA_GP_FN_DSP_IRQ10 0xA9 +#define MADERA_GP_FN_DSP_IRQ11 0xAA +#define MADERA_GP_FN_DSP_IRQ12 0xAB +#define MADERA_GP_FN_DSP_IRQ13 0xAC +#define MADERA_GP_FN_DSP_IRQ14 0xAD +#define MADERA_GP_FN_DSP_IRQ15 0xAE +#define MADERA_GP_FN_DSP_IRQ16 0xAF +#define MADERA_GP_FN_HPOUT1L_SC 0xB0 +#define MADERA_GP_FN_HPOUT1R_SC 0xB1 +#define MADERA_GP_FN_HPOUT2L_SC 0xB2 +#define MADERA_GP_FN_HPOUT2R_SC 0xB3 +#define MADERA_GP_FN_HPOUT3L_SC 0xB4 +#define MADERA_GP_FN_HPOUT4R_SC 0xB5 +#define MADERA_GP_FN_SPKOUTL_SC 0xB6 +#define MADERA_GP_FN_SPKOUTR_SC 0xB7 +#define MADERA_GP_FN_HPOUT1L_ENA 0xC0 +#define MADERA_GP_FN_HPOUT1R_ENA 0xC1 +#define MADERA_GP_FN_HPOUT2L_ENA 0xC2 +#define MADERA_GP_FN_HPOUT2R_ENA 0xC3 +#define MADERA_GP_FN_HPOUT3L_ENA 0xC4 +#define MADERA_GP_FN_HPOUT4R_ENA 0xC5 +#define MADERA_GP_FN_SPKOUTL_ENA 0xC6 +#define MADERA_GP_FN_SPKOUTR_ENA 0xC7 +#define MADERA_GP_FN_HPOUT1L_DIS 0xD0 +#define MADERA_GP_FN_HPOUT1R_DIS 0xD1 +#define MADERA_GP_FN_HPOUT2L_DIS 0xD2 +#define MADERA_GP_FN_HPOUT2R_DIS 0xD3 +#define MADERA_GP_FN_HPOUT3L_DIS 0xD4 +#define MADERA_GP_FN_HPOUT4R_DIS 0xD5 +#define MADERA_GP_FN_SPKOUTL_DIS 0xD6 +#define MADERA_GP_FN_SPKOUTR_DIS 0xD7 +#define MADERA_GP_FN_SPK_SHUTDOWN 0xE0 +#define MADERA_GP_FN_SPK_OVH_SHUTDOWN 0xE1 +#define MADERA_GP_FN_SPK_OVH_WARN 0xE2 +#define MADERA_GP_FN_TIMER1_STATUS 0x140 +#define MADERA_GP_FN_TIMER2_STATUS 0x141 +#define MADERA_GP_FN_TIMER3_STATUS 0x142 +#define MADERA_GP_FN_TIMER4_STATUS 0x143 +#define MADERA_GP_FN_TIMER5_STATUS 0x144 +#define MADERA_GP_FN_TIMER6_STATUS 0x145 +#define MADERA_GP_FN_TIMER7_STATUS 0x146 +#define MADERA_GP_FN_TIMER8_STATUS 0x147 +#define MADERA_GP_FN_EVENTLOG1_FIFO_STS 0x150 +#define MADERA_GP_FN_EVENTLOG2_FIFO_STS 0x151 +#define MADERA_GP_FN_EVENTLOG3_FIFO_STS 0x152 +#define MADERA_GP_FN_EVENTLOG4_FIFO_STS 0x153 +#define MADERA_GP_FN_EVENTLOG5_FIFO_STS 0x154 +#define MADERA_GP_FN_EVENTLOG6_FIFO_STS 0x155 +#define MADERA_GP_FN_EVENTLOG7_FIFO_STS 0x156 +#define MADERA_GP_FN_EVENTLOG8_FIFO_STS 0x157 + +struct snd_soc_dapm_context; + +/* + * struct madera - internal data shared by the set of Madera drivers + * + * This should not be used by anything except child drivers of the Madera MFD + * + * @regmap: pointer to the regmap instance for 16-bit registers + * @regmap_32bit: pointer to the regmap instance for 32-bit registers + * @dev: pointer to the MFD device + * @type: type of codec + * @rev: silicon revision + * @type_name: display name of this codec + * @num_core_supplies: number of core supply regulators + * @core_supplies: list of core supplies that are always required + * @dcvdd: pointer to DCVDD regulator + * @internal_dcvdd: true if DCVDD is supplied from the internal LDO1 + * @pdata: our pdata + * @irq_dev: the irqchip child driver device + * @irq: host irq number from SPI or I2C configuration + * @out_clamp: indicates output clamp state for each analogue output + * @out_shorted: indicates short circuit state for each analogue output + * @hp_ena: bitflags of enable state for the headphone outputs + * @num_micbias: number of MICBIAS outputs + * @num_childbias: number of child biases for each MICBIAS + * @dapm: pointer to codec driver DAPM context + * @notifier: notifier for signalling events to ASoC machine driver + */ +struct madera { + struct regmap *regmap; + struct regmap *regmap_32bit; + + struct device *dev; + + enum madera_type type; + unsigned int rev; + const char *type_name; + + int num_core_supplies; + struct regulator_bulk_data core_supplies[MADERA_MAX_CORE_SUPPLIES]; + struct regulator *dcvdd; + bool internal_dcvdd; + + struct madera_pdata pdata; + + struct device *irq_dev; + int irq; + + unsigned int num_micbias; + unsigned int num_childbias[MADERA_MAX_MICBIAS]; + + struct snd_soc_dapm_context *dapm; + + struct blocking_notifier_head notifier; +}; +#endif diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h new file mode 100644 index 000000000000..0b311f39c8f4 --- /dev/null +++ b/include/linux/mfd/madera/pdata.h @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Platform data for Cirrus Logic Madera codecs + * + * Copyright (C) 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#ifndef MADERA_PDATA_H +#define MADERA_PDATA_H + +#include +#include +#include +#include + +#define MADERA_MAX_MICBIAS 4 +#define MADERA_MAX_CHILD_MICBIAS 4 + +#define MADERA_MAX_GPSW 2 + +struct gpio_desc; +struct pinctrl_map; +struct madera_irqchip_pdata; +struct madera_codec_pdata; + +/** + * struct madera_pdata - Configuration data for Madera devices + * + * @reset: GPIO controlling /RESET (NULL = none) + * @ldo1: Substruct of pdata for the LDO1 regulator + * @micvdd: Substruct of pdata for the MICVDD regulator + * @irq_flags: Mode for primary IRQ (defaults to active low) + * @gpio_base: Base GPIO number + * @gpio_configs: Array of GPIO configurations (See Documentation/pinctrl.txt) + * @n_gpio_configs: Number of entries in gpio_configs + * @gpsw: General purpose switch mode setting. Depends on the external + * hardware connected to the switch. (See the SW1_MODE field + * in the datasheet for the available values for your codec) + */ +struct madera_pdata { + struct gpio_desc *reset; + + struct arizona_ldo1_pdata ldo1; + struct arizona_micsupp_pdata micvdd; + + unsigned int irq_flags; + int gpio_base; + + const struct pinctrl_map *gpio_configs; + int n_gpio_configs; + + u32 gpsw[MADERA_MAX_GPSW]; +}; + +#endif -- cgit v1.2.3 From 218d72a77b0bc203649c28f03cad6f90af88a787 Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Mon, 21 May 2018 11:00:01 +0100 Subject: pinctrl: madera: Add driver for Cirrus Logic Madera codecs These codecs have a variable number of I/O lines each of which is individually selectable to a wide range of possible functions. The functionality is slightly different from the traditional muxed GPIO since most of the functions can be mapped to any pin (and even the same function to multiple pins). Most pins have a dedicated "alternate" function that is only available on that pin. The alternate functions are usually a group of signals, though it is not always necessary to enable the full group, depending on the alternate function and how it is to be used. The mapping between alternate functions and GPIO pins varies between codecs depending on the number of alternate functions and available pins. Signed-off-by: Richard Fitzgerald Reviewed-by: Linus Walleij Signed-off-by: Lee Jones --- MAINTAINERS | 2 + drivers/pinctrl/Kconfig | 1 + drivers/pinctrl/Makefile | 1 + drivers/pinctrl/cirrus/Kconfig | 14 + drivers/pinctrl/cirrus/Makefile | 13 + drivers/pinctrl/cirrus/pinctrl-cs47l35.c | 45 ++ drivers/pinctrl/cirrus/pinctrl-cs47l85.c | 59 ++ drivers/pinctrl/cirrus/pinctrl-cs47l90.c | 57 ++ drivers/pinctrl/cirrus/pinctrl-madera-core.c | 1076 ++++++++++++++++++++++++++ drivers/pinctrl/cirrus/pinctrl-madera.h | 41 + 10 files changed, 1309 insertions(+) create mode 100644 drivers/pinctrl/cirrus/Kconfig create mode 100644 drivers/pinctrl/cirrus/Makefile create mode 100644 drivers/pinctrl/cirrus/pinctrl-cs47l35.c create mode 100644 drivers/pinctrl/cirrus/pinctrl-cs47l85.c create mode 100644 drivers/pinctrl/cirrus/pinctrl-cs47l90.c create mode 100644 drivers/pinctrl/cirrus/pinctrl-madera-core.c create mode 100644 drivers/pinctrl/cirrus/pinctrl-madera.h (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 7b8e857d6b33..f72ef5292fc9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3488,9 +3488,11 @@ T: git https://github.com/CirrusLogic/linux-drivers.git W: https://github.com/CirrusLogic/linux-drivers/wiki S: Supported F: Documentation/devicetree/bindings/mfd/madera.txt +F: Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt F: include/linux/mfd/madera/* F: drivers/mfd/madera* F: drivers/mfd/cs47l* +F: drivers/pinctrl/cirrus/* CLEANCACHE API M: Konrad Rzeszutek Wilk diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 01fe8e0455a0..bc3bd2075ed0 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -359,6 +359,7 @@ source "drivers/pinctrl/vt8500/Kconfig" source "drivers/pinctrl/mediatek/Kconfig" source "drivers/pinctrl/zte/Kconfig" source "drivers/pinctrl/meson/Kconfig" +source "drivers/pinctrl/cirrus/Kconfig" config PINCTRL_XWAY bool diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 657332b121fb..e9aa4f913376 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -63,3 +63,4 @@ obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/ obj-$(CONFIG_ARCH_VT8500) += vt8500/ obj-y += mediatek/ obj-$(CONFIG_PINCTRL_ZX) += zte/ +obj-y += cirrus/ diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig new file mode 100644 index 000000000000..27013e5949bc --- /dev/null +++ b/drivers/pinctrl/cirrus/Kconfig @@ -0,0 +1,14 @@ +# This is all selected by the Madera MFD driver Kconfig options +config PINCTRL_MADERA + tristate + select PINMUX + select GENERIC_PINCONF + +config PINCTRL_CS47L35 + bool + +config PINCTRL_CS47L85 + bool + +config PINCTRL_CS47L90 + bool diff --git a/drivers/pinctrl/cirrus/Makefile b/drivers/pinctrl/cirrus/Makefile new file mode 100644 index 000000000000..6e4938cde9e3 --- /dev/null +++ b/drivers/pinctrl/cirrus/Makefile @@ -0,0 +1,13 @@ +# Cirrus Logic pinctrl drivers +pinctrl-madera-objs := pinctrl-madera-core.o +ifeq ($(CONFIG_PINCTRL_CS47L35),y) +pinctrl-madera-objs += pinctrl-cs47l35.o +endif +ifeq ($(CONFIG_PINCTRL_CS47L85),y) +pinctrl-madera-objs += pinctrl-cs47l85.o +endif +ifeq ($(CONFIG_PINCTRL_CS47L90),y) +pinctrl-madera-objs += pinctrl-cs47l90.o +endif + +obj-$(CONFIG_PINCTRL_MADERA) += pinctrl-madera.o diff --git a/drivers/pinctrl/cirrus/pinctrl-cs47l35.c b/drivers/pinctrl/cirrus/pinctrl-cs47l35.c new file mode 100644 index 000000000000..06b59160783d --- /dev/null +++ b/drivers/pinctrl/cirrus/pinctrl-cs47l35.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Pinctrl for Cirrus Logic CS47L35 + * + * Copyright (C) 2016-2017 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#include +#include + +#include "pinctrl-madera.h" + +/* + * The alt func groups are the most commonly used functions we place these at + * the lower function indexes for convenience, and the less commonly used gpio + * functions at higher indexes. + * + * To stay consistent with the datasheet the function names are the same as + * the group names for that function's pins + * + * Note - all 1 less than in datasheet because these are zero-indexed + */ +static const unsigned int cs47l35_aif3_pins[] = { 0, 1, 2, 3 }; +static const unsigned int cs47l35_spk_pins[] = { 4, 5 }; +static const unsigned int cs47l35_aif1_pins[] = { 7, 8, 9, 10 }; +static const unsigned int cs47l35_aif2_pins[] = { 11, 12, 13, 14 }; +static const unsigned int cs47l35_mif1_pins[] = { 6, 15 }; + +static const struct madera_pin_groups cs47l35_pin_groups[] = { + { "aif1", cs47l35_aif1_pins, ARRAY_SIZE(cs47l35_aif1_pins) }, + { "aif2", cs47l35_aif2_pins, ARRAY_SIZE(cs47l35_aif2_pins) }, + { "aif3", cs47l35_aif3_pins, ARRAY_SIZE(cs47l35_aif3_pins) }, + { "mif1", cs47l35_mif1_pins, ARRAY_SIZE(cs47l35_mif1_pins) }, + { "pdmspk1", cs47l35_spk_pins, ARRAY_SIZE(cs47l35_spk_pins) }, +}; + +const struct madera_pin_chip cs47l35_pin_chip = { + .n_pins = CS47L35_NUM_GPIOS, + .pin_groups = cs47l35_pin_groups, + .n_pin_groups = ARRAY_SIZE(cs47l35_pin_groups), +}; diff --git a/drivers/pinctrl/cirrus/pinctrl-cs47l85.c b/drivers/pinctrl/cirrus/pinctrl-cs47l85.c new file mode 100644 index 000000000000..0a322e2a0fde --- /dev/null +++ b/drivers/pinctrl/cirrus/pinctrl-cs47l85.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Pinctrl for Cirrus Logic CS47L85 + * + * Copyright (C) 2016-2017 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#include +#include + +#include "pinctrl-madera.h" + +/* + * The alt func groups are the most commonly used functions we place these at + * the lower function indexes for convenience, and the less commonly used gpio + * functions at higher indexes. + * + * To stay consistent with the datasheet the function names are the same as + * the group names for that function's pins + * + * Note - all 1 less than in datasheet because these are zero-indexed + */ +static const unsigned int cs47l85_mif1_pins[] = { 8, 9 }; +static const unsigned int cs47l85_mif2_pins[] = { 10, 11 }; +static const unsigned int cs47l85_mif3_pins[] = { 12, 13 }; +static const unsigned int cs47l85_aif1_pins[] = { 14, 15, 16, 17 }; +static const unsigned int cs47l85_aif2_pins[] = { 18, 19, 20, 21 }; +static const unsigned int cs47l85_aif3_pins[] = { 22, 23, 24, 25 }; +static const unsigned int cs47l85_aif4_pins[] = { 26, 27, 28, 29 }; +static const unsigned int cs47l85_dmic4_pins[] = { 30, 31 }; +static const unsigned int cs47l85_dmic5_pins[] = { 32, 33 }; +static const unsigned int cs47l85_dmic6_pins[] = { 34, 35 }; +static const unsigned int cs47l85_spk1_pins[] = { 36, 38 }; +static const unsigned int cs47l85_spk2_pins[] = { 37, 39 }; + +static const struct madera_pin_groups cs47l85_pin_groups[] = { + { "aif1", cs47l85_aif1_pins, ARRAY_SIZE(cs47l85_aif1_pins) }, + { "aif2", cs47l85_aif2_pins, ARRAY_SIZE(cs47l85_aif2_pins) }, + { "aif3", cs47l85_aif3_pins, ARRAY_SIZE(cs47l85_aif3_pins) }, + { "aif4", cs47l85_aif4_pins, ARRAY_SIZE(cs47l85_aif4_pins) }, + { "mif1", cs47l85_mif1_pins, ARRAY_SIZE(cs47l85_mif1_pins) }, + { "mif2", cs47l85_mif2_pins, ARRAY_SIZE(cs47l85_mif2_pins) }, + { "mif3", cs47l85_mif3_pins, ARRAY_SIZE(cs47l85_mif3_pins) }, + { "dmic4", cs47l85_dmic4_pins, ARRAY_SIZE(cs47l85_dmic4_pins) }, + { "dmic5", cs47l85_dmic5_pins, ARRAY_SIZE(cs47l85_dmic5_pins) }, + { "dmic6", cs47l85_dmic6_pins, ARRAY_SIZE(cs47l85_dmic6_pins) }, + { "pdmspk1", cs47l85_spk1_pins, ARRAY_SIZE(cs47l85_spk1_pins) }, + { "pdmspk2", cs47l85_spk2_pins, ARRAY_SIZE(cs47l85_spk2_pins) }, +}; + +const struct madera_pin_chip cs47l85_pin_chip = { + .n_pins = CS47L85_NUM_GPIOS, + .pin_groups = cs47l85_pin_groups, + .n_pin_groups = ARRAY_SIZE(cs47l85_pin_groups), +}; diff --git a/drivers/pinctrl/cirrus/pinctrl-cs47l90.c b/drivers/pinctrl/cirrus/pinctrl-cs47l90.c new file mode 100644 index 000000000000..fc38f579f492 --- /dev/null +++ b/drivers/pinctrl/cirrus/pinctrl-cs47l90.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Pinctrl for Cirrus Logic CS47L90 + * + * Copyright (C) 2016-2017 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#include +#include + +#include "pinctrl-madera.h" + +/* + * The alt func groups are the most commonly used functions we place these at + * the lower function indexes for convenience, and the less commonly used gpio + * functions at higher indexes. + * + * To stay consistent with the datasheet the function names are the same as + * the group names for that function's pins + * + * Note - all 1 less than in datasheet because these are zero-indexed + */ +static const unsigned int cs47l90_mif1_pins[] = { 8, 9 }; +static const unsigned int cs47l90_mif2_pins[] = { 10, 11 }; +static const unsigned int cs47l90_mif3_pins[] = { 12, 13 }; +static const unsigned int cs47l90_aif1_pins[] = { 14, 15, 16, 17 }; +static const unsigned int cs47l90_aif2_pins[] = { 18, 19, 20, 21 }; +static const unsigned int cs47l90_aif3_pins[] = { 22, 23, 24, 25 }; +static const unsigned int cs47l90_aif4_pins[] = { 26, 27, 28, 29 }; +static const unsigned int cs47l90_dmic4_pins[] = { 30, 31 }; +static const unsigned int cs47l90_dmic5_pins[] = { 32, 33 }; +static const unsigned int cs47l90_dmic3_pins[] = { 34, 35 }; +static const unsigned int cs47l90_spk1_pins[] = { 36, 37 }; + +static const struct madera_pin_groups cs47l90_pin_groups[] = { + { "aif1", cs47l90_aif1_pins, ARRAY_SIZE(cs47l90_aif1_pins) }, + { "aif2", cs47l90_aif2_pins, ARRAY_SIZE(cs47l90_aif2_pins) }, + { "aif3", cs47l90_aif3_pins, ARRAY_SIZE(cs47l90_aif3_pins) }, + { "aif4", cs47l90_aif4_pins, ARRAY_SIZE(cs47l90_aif4_pins) }, + { "mif1", cs47l90_mif1_pins, ARRAY_SIZE(cs47l90_mif1_pins) }, + { "mif2", cs47l90_mif2_pins, ARRAY_SIZE(cs47l90_mif2_pins) }, + { "mif3", cs47l90_mif3_pins, ARRAY_SIZE(cs47l90_mif3_pins) }, + { "dmic3", cs47l90_dmic3_pins, ARRAY_SIZE(cs47l90_dmic3_pins) }, + { "dmic4", cs47l90_dmic4_pins, ARRAY_SIZE(cs47l90_dmic4_pins) }, + { "dmic5", cs47l90_dmic5_pins, ARRAY_SIZE(cs47l90_dmic5_pins) }, + { "pdmspk1", cs47l90_spk1_pins, ARRAY_SIZE(cs47l90_spk1_pins) }, +}; + +const struct madera_pin_chip cs47l90_pin_chip = { + .n_pins = CS47L90_NUM_GPIOS, + .pin_groups = cs47l90_pin_groups, + .n_pin_groups = ARRAY_SIZE(cs47l90_pin_groups), +}; diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c new file mode 100644 index 000000000000..ece41fb2848f --- /dev/null +++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c @@ -0,0 +1,1076 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Pinctrl for Cirrus Logic Madera codecs + * + * Copyright (C) 2016-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "../pinctrl-utils.h" + +#include "pinctrl-madera.h" + +/* + * Use pin GPIO names for consistency + * NOTE: IDs are zero-indexed for coding convenience + */ +static const struct pinctrl_pin_desc madera_pins[] = { + PINCTRL_PIN(0, "gpio1"), + PINCTRL_PIN(1, "gpio2"), + PINCTRL_PIN(2, "gpio3"), + PINCTRL_PIN(3, "gpio4"), + PINCTRL_PIN(4, "gpio5"), + PINCTRL_PIN(5, "gpio6"), + PINCTRL_PIN(6, "gpio7"), + PINCTRL_PIN(7, "gpio8"), + PINCTRL_PIN(8, "gpio9"), + PINCTRL_PIN(9, "gpio10"), + PINCTRL_PIN(10, "gpio11"), + PINCTRL_PIN(11, "gpio12"), + PINCTRL_PIN(12, "gpio13"), + PINCTRL_PIN(13, "gpio14"), + PINCTRL_PIN(14, "gpio15"), + PINCTRL_PIN(15, "gpio16"), + PINCTRL_PIN(16, "gpio17"), + PINCTRL_PIN(17, "gpio18"), + PINCTRL_PIN(18, "gpio19"), + PINCTRL_PIN(19, "gpio20"), + PINCTRL_PIN(20, "gpio21"), + PINCTRL_PIN(21, "gpio22"), + PINCTRL_PIN(22, "gpio23"), + PINCTRL_PIN(23, "gpio24"), + PINCTRL_PIN(24, "gpio25"), + PINCTRL_PIN(25, "gpio26"), + PINCTRL_PIN(26, "gpio27"), + PINCTRL_PIN(27, "gpio28"), + PINCTRL_PIN(28, "gpio29"), + PINCTRL_PIN(29, "gpio30"), + PINCTRL_PIN(30, "gpio31"), + PINCTRL_PIN(31, "gpio32"), + PINCTRL_PIN(32, "gpio33"), + PINCTRL_PIN(33, "gpio34"), + PINCTRL_PIN(34, "gpio35"), + PINCTRL_PIN(35, "gpio36"), + PINCTRL_PIN(36, "gpio37"), + PINCTRL_PIN(37, "gpio38"), + PINCTRL_PIN(38, "gpio39"), + PINCTRL_PIN(39, "gpio40"), +}; + +/* + * All single-pin functions can be mapped to any GPIO, however pinmux applies + * functions to pin groups and only those groups declared as supporting that + * function. To make this work we must put each pin in its own dummy group so + * that the functions can be described as applying to all pins. + * Since these do not correspond to anything in the actual hardware - they are + * merely an adaptation to pinctrl's view of the world - we use the same name + * as the pin to avoid confusion when comparing with datasheet instructions + */ +static const char * const madera_pin_single_group_names[] = { + "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", + "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", + "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", + "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", + "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", + "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", +}; + +/* set of pin numbers for single-pin groups, zero-indexed */ +static const unsigned int madera_pin_single_group_pins[] = { + 0, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, +}; + +static const char * const madera_aif1_group_names[] = { "aif1" }; +static const char * const madera_aif2_group_names[] = { "aif2" }; +static const char * const madera_aif3_group_names[] = { "aif3" }; +static const char * const madera_aif4_group_names[] = { "aif4" }; +static const char * const madera_mif1_group_names[] = { "mif1" }; +static const char * const madera_mif2_group_names[] = { "mif2" }; +static const char * const madera_mif3_group_names[] = { "mif3" }; +static const char * const madera_dmic3_group_names[] = { "dmic3" }; +static const char * const madera_dmic4_group_names[] = { "dmic4" }; +static const char * const madera_dmic5_group_names[] = { "dmic5" }; +static const char * const madera_dmic6_group_names[] = { "dmic6" }; +static const char * const madera_spk1_group_names[] = { "pdmspk1" }; +static const char * const madera_spk2_group_names[] = { "pdmspk2" }; + +/* + * alt-functions always apply to a single pin group, other functions always + * apply to all pins + */ +static const struct { + const char *name; + const char * const *group_names; + u32 func; +} madera_mux_funcs[] = { + { + .name = "aif1", + .group_names = madera_aif1_group_names, + .func = 0x000 + }, + { + .name = "aif2", + .group_names = madera_aif2_group_names, + .func = 0x000 + }, + { + .name = "aif3", + .group_names = madera_aif3_group_names, + .func = 0x000 + }, + { + .name = "aif4", + .group_names = madera_aif4_group_names, + .func = 0x000 + }, + { + .name = "mif1", + .group_names = madera_mif1_group_names, + .func = 0x000 + }, + { + .name = "mif2", + .group_names = madera_mif2_group_names, + .func = 0x000 + }, + { + .name = "mif3", + .group_names = madera_mif3_group_names, + .func = 0x000 + }, + { + .name = "dmic3", + .group_names = madera_dmic3_group_names, + .func = 0x000 + }, + { + .name = "dmic4", + .group_names = madera_dmic4_group_names, + .func = 0x000 + }, + { + .name = "dmic5", + .group_names = madera_dmic5_group_names, + .func = 0x000 + }, + { + .name = "dmic6", + .group_names = madera_dmic6_group_names, + .func = 0x000 + }, + { + .name = "pdmspk1", + .group_names = madera_spk1_group_names, + .func = 0x000 + }, + { + .name = "pdmspk2", + .group_names = madera_spk2_group_names, + .func = 0x000 + }, + { + .name = "io", + .group_names = madera_pin_single_group_names, + .func = 0x001 + }, + { + .name = "dsp-gpio", + .group_names = madera_pin_single_group_names, + .func = 0x002 + }, + { + .name = "irq1", + .group_names = madera_pin_single_group_names, + .func = 0x003 + }, + { + .name = "irq2", + .group_names = madera_pin_single_group_names, + .func = 0x004 + }, + { + .name = "fll1-clk", + .group_names = madera_pin_single_group_names, + .func = 0x010 + }, + { + .name = "fll2-clk", + .group_names = madera_pin_single_group_names, + .func = 0x011 + }, + { + .name = "fll3-clk", + .group_names = madera_pin_single_group_names, + .func = 0x012 + }, + { + .name = "fllao-clk", + .group_names = madera_pin_single_group_names, + .func = 0x013 + }, + { + .name = "fll1-lock", + .group_names = madera_pin_single_group_names, + .func = 0x018 + }, + { + .name = "fll2-lock", + .group_names = madera_pin_single_group_names, + .func = 0x019 + }, + { + .name = "fll3-lock", + .group_names = madera_pin_single_group_names, + .func = 0x01a + }, + { + .name = "fllao-lock", + .group_names = madera_pin_single_group_names, + .func = 0x01b + }, + { + .name = "opclk", + .group_names = madera_pin_single_group_names, + .func = 0x040 + }, + { + .name = "opclk-async", + .group_names = madera_pin_single_group_names, + .func = 0x041 + }, + { + .name = "pwm1", + .group_names = madera_pin_single_group_names, + .func = 0x048 + }, + { + .name = "pwm2", + .group_names = madera_pin_single_group_names, + .func = 0x049 + }, + { + .name = "spdif", + .group_names = madera_pin_single_group_names, + .func = 0x04c + }, + { + .name = "asrc1-in1-lock", + .group_names = madera_pin_single_group_names, + .func = 0x088 + }, + { + .name = "asrc1-in2-lock", + .group_names = madera_pin_single_group_names, + .func = 0x089 + }, + { + .name = "asrc2-in1-lock", + .group_names = madera_pin_single_group_names, + .func = 0x08a + }, + { + .name = "asrc2-in2-lock", + .group_names = madera_pin_single_group_names, + .func = 0x08b + }, + { + .name = "spkl-short-circuit", + .group_names = madera_pin_single_group_names, + .func = 0x0b6 + }, + { + .name = "spkr-short-circuit", + .group_names = madera_pin_single_group_names, + .func = 0x0b7 + }, + { + .name = "spk-shutdown", + .group_names = madera_pin_single_group_names, + .func = 0x0e0 + }, + { + .name = "spk-overheat-shutdown", + .group_names = madera_pin_single_group_names, + .func = 0x0e1 + }, + { + .name = "spk-overheat-warn", + .group_names = madera_pin_single_group_names, + .func = 0x0e2 + }, + { + .name = "timer1-sts", + .group_names = madera_pin_single_group_names, + .func = 0x140 + }, + { + .name = "timer2-sts", + .group_names = madera_pin_single_group_names, + .func = 0x141 + }, + { + .name = "timer3-sts", + .group_names = madera_pin_single_group_names, + .func = 0x142 + }, + { + .name = "timer4-sts", + .group_names = madera_pin_single_group_names, + .func = 0x143 + }, + { + .name = "timer5-sts", + .group_names = madera_pin_single_group_names, + .func = 0x144 + }, + { + .name = "timer6-sts", + .group_names = madera_pin_single_group_names, + .func = 0x145 + }, + { + .name = "timer7-sts", + .group_names = madera_pin_single_group_names, + .func = 0x146 + }, + { + .name = "timer8-sts", + .group_names = madera_pin_single_group_names, + .func = 0x147 + }, + { + .name = "log1-fifo-ne", + .group_names = madera_pin_single_group_names, + .func = 0x150 + }, + { + .name = "log2-fifo-ne", + .group_names = madera_pin_single_group_names, + .func = 0x151 + }, + { + .name = "log3-fifo-ne", + .group_names = madera_pin_single_group_names, + .func = 0x152 + }, + { + .name = "log4-fifo-ne", + .group_names = madera_pin_single_group_names, + .func = 0x153 + }, + { + .name = "log5-fifo-ne", + .group_names = madera_pin_single_group_names, + .func = 0x154 + }, + { + .name = "log6-fifo-ne", + .group_names = madera_pin_single_group_names, + .func = 0x155 + }, + { + .name = "log7-fifo-ne", + .group_names = madera_pin_single_group_names, + .func = 0x156 + }, + { + .name = "log8-fifo-ne", + .group_names = madera_pin_single_group_names, + .func = 0x157 + }, +}; + +static u16 madera_pin_make_drv_str(struct madera_pin_private *priv, + unsigned int milliamps) +{ + switch (milliamps) { + case 4: + return 0; + case 8: + return 2 << MADERA_GP1_DRV_STR_SHIFT; + default: + break; + } + + dev_warn(priv->dev, "%u mA not a valid drive strength", milliamps); + + return 0; +} + +static unsigned int madera_pin_unmake_drv_str(struct madera_pin_private *priv, + u16 regval) +{ + regval = (regval & MADERA_GP1_DRV_STR_MASK) >> MADERA_GP1_DRV_STR_SHIFT; + + switch (regval) { + case 0: + return 4; + case 2: + return 8; + default: + return 0; + } +} + +static int madera_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + + /* Number of alt function groups plus number of single-pin groups */ + return priv->chip->n_pin_groups + priv->chip->n_pins; +} + +static const char *madera_get_group_name(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + + if (selector < priv->chip->n_pin_groups) + return priv->chip->pin_groups[selector].name; + + selector -= priv->chip->n_pin_groups; + return madera_pin_single_group_names[selector]; +} + +static int madera_get_group_pins(struct pinctrl_dev *pctldev, + unsigned int selector, + const unsigned int **pins, + unsigned int *num_pins) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + + if (selector < priv->chip->n_pin_groups) { + *pins = priv->chip->pin_groups[selector].pins; + *num_pins = priv->chip->pin_groups[selector].n_pins; + } else { + /* return the dummy group for a single pin */ + selector -= priv->chip->n_pin_groups; + *pins = &madera_pin_single_group_pins[selector]; + *num_pins = 1; + } + return 0; +} + +static void madera_pin_dbg_show_fn(struct madera_pin_private *priv, + struct seq_file *s, + unsigned int pin, unsigned int fn) +{ + const struct madera_pin_chip *chip = priv->chip; + int i, g_pin; + + if (fn != 0) { + for (i = 0; i < ARRAY_SIZE(madera_mux_funcs); ++i) { + if (madera_mux_funcs[i].func == fn) { + seq_printf(s, " FN=%s", + madera_mux_funcs[i].name); + return; + } + } + return; /* ignore unknown function values */ + } + + /* alt function */ + for (i = 0; i < chip->n_pin_groups; ++i) { + for (g_pin = 0; g_pin < chip->pin_groups[i].n_pins; ++g_pin) { + if (chip->pin_groups[i].pins[g_pin] == pin) { + seq_printf(s, " FN=%s", + chip->pin_groups[i].name); + return; + } + } + } +} + +static void __maybe_unused madera_pin_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, + unsigned int pin) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + unsigned int conf[2]; + unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * pin); + unsigned int fn; + int ret; + + ret = regmap_read(priv->madera->regmap, reg, &conf[0]); + if (ret) + return; + + ret = regmap_read(priv->madera->regmap, reg + 1, &conf[1]); + if (ret) + return; + + seq_printf(s, "%04x:%04x", conf[0], conf[1]); + + fn = (conf[0] & MADERA_GP1_FN_MASK) >> MADERA_GP1_FN_SHIFT; + madera_pin_dbg_show_fn(priv, s, pin, fn); + + /* State of direction bit is only relevant if function==1 */ + if (fn == 1) { + if (conf[1] & MADERA_GP1_DIR_MASK) + seq_puts(s, " IN"); + else + seq_puts(s, " OUT"); + } + + if (conf[1] & MADERA_GP1_PU_MASK) + seq_puts(s, " PU"); + + if (conf[1] & MADERA_GP1_PD_MASK) + seq_puts(s, " PD"); + + if (conf[0] & MADERA_GP1_DB_MASK) + seq_puts(s, " DB"); + + if (conf[0] & MADERA_GP1_OP_CFG_MASK) + seq_puts(s, " OD"); + else + seq_puts(s, " CMOS"); + + seq_printf(s, " DRV=%umA", madera_pin_unmake_drv_str(priv, conf[1])); + + if (conf[0] & MADERA_GP1_IP_CFG_MASK) + seq_puts(s, "SCHMITT"); +} + + +static const struct pinctrl_ops madera_pin_group_ops = { + .get_groups_count = madera_get_groups_count, + .get_group_name = madera_get_group_name, + .get_group_pins = madera_get_group_pins, +#if IS_ENABLED(CONFIG_OF) + .dt_node_to_map = pinconf_generic_dt_node_to_map_all, + .dt_free_map = pinctrl_utils_free_map, +#endif +#if IS_ENABLED(CONFIG_DEBUG_FS) + .pin_dbg_show = madera_pin_dbg_show, +#endif +}; + +static int madera_mux_get_funcs_count(struct pinctrl_dev *pctldev) +{ + return ARRAY_SIZE(madera_mux_funcs); +} + +static const char *madera_mux_get_func_name(struct pinctrl_dev *pctldev, + unsigned int selector) +{ + return madera_mux_funcs[selector].name; +} + +static int madera_mux_get_groups(struct pinctrl_dev *pctldev, + unsigned int selector, + const char * const **groups, + unsigned int * const num_groups) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + + *groups = madera_mux_funcs[selector].group_names; + + if (madera_mux_funcs[selector].func == 0) { + /* alt func always maps to a single group */ + *num_groups = 1; + } else { + /* other funcs map to all available gpio pins */ + *num_groups = priv->chip->n_pins; + } + + return 0; +} + +static int madera_mux_set_mux(struct pinctrl_dev *pctldev, + unsigned int selector, + unsigned int group) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + struct madera *madera = priv->madera; + const struct madera_pin_groups *pin_group = priv->chip->pin_groups; + unsigned int n_chip_groups = priv->chip->n_pin_groups; + const char *func_name = madera_mux_funcs[selector].name; + unsigned int reg; + int i, ret; + + dev_dbg(priv->dev, "%s selecting %u (%s) for group %u (%s)\n", + __func__, selector, func_name, group, + madera_get_group_name(pctldev, group)); + + if (madera_mux_funcs[selector].func == 0) { + /* alt func pin assignments are codec-specific */ + for (i = 0; i < n_chip_groups; ++i) { + if (strcmp(func_name, pin_group->name) == 0) + break; + + ++pin_group; + } + + if (i == n_chip_groups) + return -EINVAL; + + for (i = 0; i < pin_group->n_pins; ++i) { + reg = MADERA_GPIO1_CTRL_1 + (2 * pin_group->pins[i]); + + dev_dbg(priv->dev, "%s setting 0x%x func bits to 0\n", + __func__, reg); + + ret = regmap_update_bits(madera->regmap, reg, + MADERA_GP1_FN_MASK, 0); + if (ret) + break; + + } + } else { + /* + * for other funcs the group will be the gpio number and will + * be offset by the number of chip-specific functions at the + * start of the group list + */ + group -= n_chip_groups; + reg = MADERA_GPIO1_CTRL_1 + (2 * group); + + dev_dbg(priv->dev, "%s setting 0x%x func bits to 0x%x\n", + __func__, reg, madera_mux_funcs[selector].func); + + ret = regmap_update_bits(madera->regmap, + reg, + MADERA_GP1_FN_MASK, + madera_mux_funcs[selector].func); + } + + if (ret) + dev_err(priv->dev, "Failed to write to 0x%x (%d)\n", reg, ret); + + return ret; +} + +static int madera_gpio_set_direction(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset, + bool input) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + struct madera *madera = priv->madera; + unsigned int reg = MADERA_GPIO1_CTRL_2 + (2 * offset); + unsigned int val; + int ret; + + if (input) + val = MADERA_GP1_DIR; + else + val = 0; + + ret = regmap_update_bits(madera->regmap, reg, MADERA_GP1_DIR_MASK, val); + if (ret) + dev_err(priv->dev, "Failed to write to 0x%x (%d)\n", reg, ret); + + return ret; +} + +static int madera_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + struct madera *madera = priv->madera; + unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * offset); + int ret; + + /* put the pin into GPIO mode */ + ret = regmap_update_bits(madera->regmap, reg, MADERA_GP1_FN_MASK, 1); + if (ret) + dev_err(priv->dev, "Failed to write to 0x%x (%d)\n", reg, ret); + + return ret; +} + +static void madera_gpio_disable_free(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + struct madera *madera = priv->madera; + unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * offset); + int ret; + + /* disable GPIO by setting to GPIO IN */ + madera_gpio_set_direction(pctldev, range, offset, true); + + ret = regmap_update_bits(madera->regmap, reg, MADERA_GP1_FN_MASK, 1); + if (ret) + dev_err(priv->dev, "Failed to write to 0x%x (%d)\n", reg, ret); +} + +static const struct pinmux_ops madera_pin_mux_ops = { + .get_functions_count = madera_mux_get_funcs_count, + .get_function_name = madera_mux_get_func_name, + .get_function_groups = madera_mux_get_groups, + .set_mux = madera_mux_set_mux, + .gpio_request_enable = madera_gpio_request_enable, + .gpio_disable_free = madera_gpio_disable_free, + .gpio_set_direction = madera_gpio_set_direction, + .strict = true, /* GPIO and other functions are exclusive */ +}; + +static int madera_pin_conf_get(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *config) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + unsigned int param = pinconf_to_config_param(*config); + unsigned int result = 0; + unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * pin); + unsigned int conf[2]; + int ret; + + ret = regmap_read(priv->madera->regmap, reg, &conf[0]); + if (!ret) + ret = regmap_read(priv->madera->regmap, reg + 1, &conf[1]); + + if (ret) { + dev_err(priv->dev, "Failed to read GP%d conf (%d)\n", + pin + 1, ret); + return ret; + } + + switch (param) { + case PIN_CONFIG_BIAS_BUS_HOLD: + conf[1] &= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK; + if (conf[1] == (MADERA_GP1_PU | MADERA_GP1_PD)) + result = 1; + break; + case PIN_CONFIG_BIAS_DISABLE: + conf[1] &= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK; + if (!conf[1]) + result = 1; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + conf[1] &= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK; + if (conf[1] == MADERA_GP1_PD_MASK) + result = 1; + break; + case PIN_CONFIG_BIAS_PULL_UP: + conf[1] &= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK; + if (conf[1] == MADERA_GP1_PU_MASK) + result = 1; + break; + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + if (conf[0] & MADERA_GP1_OP_CFG_MASK) + result = 1; + break; + case PIN_CONFIG_DRIVE_PUSH_PULL: + if (!(conf[0] & MADERA_GP1_OP_CFG_MASK)) + result = 1; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + result = madera_pin_unmake_drv_str(priv, conf[1]); + break; + case PIN_CONFIG_INPUT_DEBOUNCE: + if (conf[0] & MADERA_GP1_DB_MASK) + result = 1; + break; + case PIN_CONFIG_INPUT_ENABLE: + if (conf[0] & MADERA_GP1_DIR_MASK) + result = 1; + break; + case PIN_CONFIG_INPUT_SCHMITT: + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + if (conf[0] & MADERA_GP1_IP_CFG_MASK) + result = 1; + break; + case PIN_CONFIG_OUTPUT: + if ((conf[1] & MADERA_GP1_DIR_MASK) && + (conf[0] & MADERA_GP1_LVL_MASK)) + result = 1; + break; + default: + break; + } + + *config = pinconf_to_config_packed(param, result); + + return 0; +} + +static int madera_pin_conf_set(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *configs, unsigned int num_configs) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + u16 conf[2] = {0, 0}; + u16 mask[2] = {0, 0}; + unsigned int reg = MADERA_GPIO1_CTRL_1 + (2 * pin); + unsigned int val; + int ret; + + while (num_configs) { + dev_dbg(priv->dev, "%s config 0x%lx\n", __func__, *configs); + + switch (pinconf_to_config_param(*configs)) { + case PIN_CONFIG_BIAS_BUS_HOLD: + mask[1] |= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK; + conf[1] |= MADERA_GP1_PU | MADERA_GP1_PD; + break; + case PIN_CONFIG_BIAS_DISABLE: + mask[1] |= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK; + conf[1] &= ~(MADERA_GP1_PU | MADERA_GP1_PD); + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + mask[1] |= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK; + conf[1] |= MADERA_GP1_PD; + conf[1] &= ~MADERA_GP1_PU; + break; + case PIN_CONFIG_BIAS_PULL_UP: + mask[1] |= MADERA_GP1_PU_MASK | MADERA_GP1_PD_MASK; + conf[1] |= MADERA_GP1_PU; + conf[1] &= ~MADERA_GP1_PD; + break; + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + mask[0] |= MADERA_GP1_OP_CFG_MASK; + conf[0] |= MADERA_GP1_OP_CFG; + break; + case PIN_CONFIG_DRIVE_PUSH_PULL: + mask[0] |= MADERA_GP1_OP_CFG_MASK; + conf[0] &= ~MADERA_GP1_OP_CFG; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + val = pinconf_to_config_argument(*configs); + mask[1] |= MADERA_GP1_DRV_STR_MASK; + conf[1] &= ~MADERA_GP1_DRV_STR_MASK; + conf[1] |= madera_pin_make_drv_str(priv, val); + break; + case PIN_CONFIG_INPUT_DEBOUNCE: + mask[0] |= MADERA_GP1_DB_MASK; + + /* + * we can't configure debounce time per-pin so value + * is just a flag + */ + val = pinconf_to_config_argument(*configs); + if (val) + conf[0] |= MADERA_GP1_DB; + else + conf[0] &= ~MADERA_GP1_DB; + break; + case PIN_CONFIG_INPUT_ENABLE: + val = pinconf_to_config_argument(*configs); + mask[1] |= MADERA_GP1_DIR_MASK; + if (val) + conf[1] |= MADERA_GP1_DIR; + else + conf[1] &= ~MADERA_GP1_DIR; + break; + case PIN_CONFIG_INPUT_SCHMITT: + val = pinconf_to_config_argument(*configs); + mask[0] |= MADERA_GP1_IP_CFG; + if (val) + conf[0] |= MADERA_GP1_IP_CFG; + else + conf[0] &= ~MADERA_GP1_IP_CFG; + + mask[1] |= MADERA_GP1_DIR_MASK; + conf[1] |= MADERA_GP1_DIR; + break; + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + mask[0] |= MADERA_GP1_IP_CFG; + conf[0] |= MADERA_GP1_IP_CFG; + mask[1] |= MADERA_GP1_DIR_MASK; + conf[1] |= MADERA_GP1_DIR; + break; + case PIN_CONFIG_OUTPUT: + val = pinconf_to_config_argument(*configs); + mask[0] |= MADERA_GP1_LVL_MASK; + if (val) + conf[0] |= MADERA_GP1_LVL; + else + conf[0] &= ~MADERA_GP1_LVL; + + mask[1] |= MADERA_GP1_DIR_MASK; + conf[1] &= ~MADERA_GP1_DIR; + break; + default: + break; + } + + ++configs; + --num_configs; + } + + dev_dbg(priv->dev, + "%s gpio%d 0x%x:0x%x 0x%x:0x%x\n", + __func__, pin + 1, reg, conf[0], reg + 1, conf[1]); + + ret = regmap_update_bits(priv->madera->regmap, reg, mask[0], conf[0]); + if (ret) + goto err; + + ++reg; + ret = regmap_update_bits(priv->madera->regmap, reg, mask[1], conf[1]); + if (ret) + goto err; + + return 0; + +err: + dev_err(priv->dev, + "Failed to write GPIO%d conf (%d) reg 0x%x\n", + pin + 1, ret, reg); + + return ret; +} + +static int madera_pin_conf_group_set(struct pinctrl_dev *pctldev, + unsigned int selector, + unsigned long *configs, + unsigned int num_configs) +{ + struct madera_pin_private *priv = pinctrl_dev_get_drvdata(pctldev); + const struct madera_pin_groups *pin_group; + unsigned int n_groups = priv->chip->n_pin_groups; + int i, ret; + + dev_dbg(priv->dev, "%s setting group %s\n", __func__, + madera_get_group_name(pctldev, selector)); + + if (selector >= n_groups) { + /* group is a single pin, convert to pin number and set */ + return madera_pin_conf_set(pctldev, + selector - n_groups, + configs, + num_configs); + } else { + pin_group = &priv->chip->pin_groups[selector]; + + for (i = 0; i < pin_group->n_pins; ++i) { + ret = madera_pin_conf_set(pctldev, + pin_group->pins[i], + configs, + num_configs); + if (ret) + return ret; + } + } + + return 0; +} + +static const struct pinconf_ops madera_pin_conf_ops = { + .pin_config_get = madera_pin_conf_get, + .pin_config_set = madera_pin_conf_set, + .pin_config_group_set = madera_pin_conf_group_set, + +}; + +static struct pinctrl_desc madera_pin_desc = { + .name = "madera-pinctrl", + .pins = madera_pins, + .pctlops = &madera_pin_group_ops, + .pmxops = &madera_pin_mux_ops, + .confops = &madera_pin_conf_ops, + .owner = THIS_MODULE, +}; + +static int madera_pin_probe(struct platform_device *pdev) +{ + struct madera *madera = dev_get_drvdata(pdev->dev.parent); + const struct madera_pdata *pdata = dev_get_platdata(madera->dev); + struct madera_pin_private *priv; + int ret; + + BUILD_BUG_ON(ARRAY_SIZE(madera_pin_single_group_names) != + ARRAY_SIZE(madera_pin_single_group_pins)); + + dev_dbg(&pdev->dev, "%s\n", __func__); + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + priv->madera = madera; + pdev->dev.of_node = madera->dev->of_node; + + switch (madera->type) { + case CS47L35: + if (IS_ENABLED(CONFIG_PINCTRL_CS47L35)) + priv->chip = &cs47l35_pin_chip; + break; + case CS47L85: + case WM1840: + if (IS_ENABLED(CONFIG_PINCTRL_CS47L85)) + priv->chip = &cs47l85_pin_chip; + break; + case CS47L90: + case CS47L91: + if (IS_ENABLED(CONFIG_PINCTRL_CS47L90)) + priv->chip = &cs47l90_pin_chip; + break; + default: + break; + } + + if (!priv->chip) + return -ENODEV; + + madera_pin_desc.npins = priv->chip->n_pins; + + ret = devm_pinctrl_register_and_init(&pdev->dev, + &madera_pin_desc, + priv, + &priv->pctl); + if (ret) { + dev_err(priv->dev, "Failed pinctrl register (%d)\n", ret); + return ret; + } + + /* if the configuration is provided through pdata, apply it */ + if (pdata) { + ret = pinctrl_register_mappings(pdata->gpio_configs, + pdata->n_gpio_configs); + if (ret) { + dev_err(priv->dev, + "Failed to register pdata mappings (%d)\n", + ret); + return ret; + } + } + + ret = pinctrl_enable(priv->pctl); + if (ret) { + dev_err(priv->dev, "Failed to enable pinctrl (%d)\n", ret); + return ret; + } + + dev_dbg(priv->dev, "pinctrl probed ok\n"); + + return 0; +} + +static struct platform_driver madera_pin_driver = { + .probe = madera_pin_probe, + .driver = { + .name = "madera-pinctrl", + }, +}; + +module_platform_driver(madera_pin_driver); + +MODULE_DESCRIPTION("Madera pinctrl driver"); +MODULE_AUTHOR("Richard Fitzgerald "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/cirrus/pinctrl-madera.h b/drivers/pinctrl/cirrus/pinctrl-madera.h new file mode 100644 index 000000000000..8000f4f832a1 --- /dev/null +++ b/drivers/pinctrl/cirrus/pinctrl-madera.h @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Pinctrl for Cirrus Logic Madera codecs + * + * Copyright (C) 2016-2017 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#ifndef PINCTRL_MADERA_H +#define PINCTRL_MADERA_H + +struct madera_pin_groups { + const char *name; + const unsigned int *pins; + unsigned int n_pins; +}; + +struct madera_pin_chip { + unsigned int n_pins; + + const struct madera_pin_groups *pin_groups; + unsigned int n_pin_groups; +}; + +struct madera_pin_private { + struct madera *madera; + + const struct madera_pin_chip *chip; /* chip-specific groups */ + + struct device *dev; + struct pinctrl_dev *pctl; +}; + +extern const struct madera_pin_chip cs47l35_pin_chip; +extern const struct madera_pin_chip cs47l85_pin_chip; +extern const struct madera_pin_chip cs47l90_pin_chip; + +#endif -- cgit v1.2.3 From aca429ff9d14f0f55f6d319d6bb1dfc2bbee09fe Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Mon, 21 May 2018 11:00:02 +0100 Subject: gpio: madera: Support Cirrus Logic Madera class codecs This adds support for the GPIOs on Cirrus Logic Madera class codecs. Any pins not used for special functions (see the pinctrl driver) can be used as general single-bit input or output lines. The number of available GPIOs varies between codecs. Note that this is part of a composite MFD for these codecs and can only be used with the corresponding MFD and other child drivers on those silicon. The GPIO block on these codecs does not exist indepedently of the rest of the MFD. Signed-off-by: Nariman Poushin Signed-off-by: Richard Fitzgerald Signed-off-by: Charles Keepax Acked-by: Linus Walleij Signed-off-by: Lee Jones --- MAINTAINERS | 1 + drivers/gpio/Kconfig | 6 ++ drivers/gpio/Makefile | 1 + drivers/gpio/gpio-madera.c | 206 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 214 insertions(+) create mode 100644 drivers/gpio/gpio-madera.c (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f72ef5292fc9..fecfbd9e45df 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3490,6 +3490,7 @@ S: Supported F: Documentation/devicetree/bindings/mfd/madera.txt F: Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt F: include/linux/mfd/madera/* +F: drivers/gpio/gpio-madera* F: drivers/mfd/madera* F: drivers/mfd/cs47l* F: drivers/pinctrl/cirrus/* diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b960f6f35abd..08c2c121a6dc 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -1027,6 +1027,12 @@ config GPIO_LP87565 This driver can also be built as a module. If so, the module will be called gpio-lp87565. +config GPIO_MADERA + tristate "Cirrus Logic Madera class codecs" + depends on PINCTRL_MADERA + help + Support for GPIOs on Cirrus Logic Madera class codecs. + config GPIO_MAX77620 tristate "GPIO support for PMIC MAX77620 and MAX20024" depends on MFD_MAX77620 diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 1324c8f966a7..22bef2e7c162 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o +obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c new file mode 100644 index 000000000000..7ba68d1a0932 --- /dev/null +++ b/drivers/gpio/gpio-madera.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * GPIO support for Cirrus Logic Madera codecs + * + * Copyright (C) 2015-2018 Cirrus Logic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2. + */ + +#include +#include +#include +#include + +#include +#include +#include + +struct madera_gpio { + struct madera *madera; + /* storage space for the gpio_chip we're using */ + struct gpio_chip gpio_chip; +}; + +static int madera_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + struct madera_gpio *madera_gpio = gpiochip_get_data(chip); + struct madera *madera = madera_gpio->madera; + unsigned int reg_offset = 2 * offset; + unsigned int val; + int ret; + + ret = regmap_read(madera->regmap, MADERA_GPIO1_CTRL_2 + reg_offset, + &val); + if (ret < 0) + return ret; + + return !!(val & MADERA_GP1_DIR_MASK); +} + +static int madera_gpio_direction_in(struct gpio_chip *chip, unsigned int offset) +{ + struct madera_gpio *madera_gpio = gpiochip_get_data(chip); + struct madera *madera = madera_gpio->madera; + unsigned int reg_offset = 2 * offset; + + return regmap_update_bits(madera->regmap, + MADERA_GPIO1_CTRL_2 + reg_offset, + MADERA_GP1_DIR_MASK, MADERA_GP1_DIR); +} + +static int madera_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct madera_gpio *madera_gpio = gpiochip_get_data(chip); + struct madera *madera = madera_gpio->madera; + unsigned int reg_offset = 2 * offset; + unsigned int val; + int ret; + + ret = regmap_read(madera->regmap, MADERA_GPIO1_CTRL_1 + reg_offset, + &val); + if (ret < 0) + return ret; + + return !!(val & MADERA_GP1_LVL_MASK); +} + +static int madera_gpio_direction_out(struct gpio_chip *chip, + unsigned int offset, int value) +{ + struct madera_gpio *madera_gpio = gpiochip_get_data(chip); + struct madera *madera = madera_gpio->madera; + unsigned int reg_offset = 2 * offset; + unsigned int reg_val = value ? MADERA_GP1_LVL : 0; + int ret; + + ret = regmap_update_bits(madera->regmap, + MADERA_GPIO1_CTRL_2 + reg_offset, + MADERA_GP1_DIR_MASK, 0); + if (ret < 0) + return ret; + + return regmap_update_bits(madera->regmap, + MADERA_GPIO1_CTRL_1 + reg_offset, + MADERA_GP1_LVL_MASK, reg_val); +} + +static void madera_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct madera_gpio *madera_gpio = gpiochip_get_data(chip); + struct madera *madera = madera_gpio->madera; + unsigned int reg_offset = 2 * offset; + unsigned int reg_val = value ? MADERA_GP1_LVL : 0; + int ret; + + ret = regmap_update_bits(madera->regmap, + MADERA_GPIO1_CTRL_1 + reg_offset, + MADERA_GP1_LVL_MASK, reg_val); + + /* set() doesn't return an error so log a warning */ + if (ret) + dev_warn(madera->dev, "Failed to write to 0x%x (%d)\n", + MADERA_GPIO1_CTRL_1 + reg_offset, ret); +} + +static struct gpio_chip madera_gpio_chip = { + .label = "madera", + .owner = THIS_MODULE, + .request = gpiochip_generic_request, + .free = gpiochip_generic_free, + .get_direction = madera_gpio_get_direction, + .direction_input = madera_gpio_direction_in, + .get = madera_gpio_get, + .direction_output = madera_gpio_direction_out, + .set = madera_gpio_set, + .set_config = gpiochip_generic_config, + .can_sleep = true, +}; + +static int madera_gpio_probe(struct platform_device *pdev) +{ + struct madera *madera = dev_get_drvdata(pdev->dev.parent); + struct madera_pdata *pdata = dev_get_platdata(madera->dev); + struct madera_gpio *madera_gpio; + int ret; + + madera_gpio = devm_kzalloc(&pdev->dev, sizeof(*madera_gpio), + GFP_KERNEL); + if (!madera_gpio) + return -ENOMEM; + + madera_gpio->madera = madera; + + /* Construct suitable gpio_chip from the template in madera_gpio_chip */ + madera_gpio->gpio_chip = madera_gpio_chip; + madera_gpio->gpio_chip.parent = pdev->dev.parent; + + switch (madera->type) { + case CS47L35: + madera_gpio->gpio_chip.ngpio = CS47L35_NUM_GPIOS; + break; + case CS47L85: + case WM1840: + madera_gpio->gpio_chip.ngpio = CS47L85_NUM_GPIOS; + break; + case CS47L90: + case CS47L91: + madera_gpio->gpio_chip.ngpio = CS47L90_NUM_GPIOS; + break; + default: + dev_err(&pdev->dev, "Unknown chip variant %d\n", madera->type); + return -EINVAL; + } + + /* We want to be usable on systems that don't use devicetree or acpi */ + if (pdata && pdata->gpio_base) + madera_gpio->gpio_chip.base = pdata->gpio_base; + else + madera_gpio->gpio_chip.base = -1; + + ret = devm_gpiochip_add_data(&pdev->dev, + &madera_gpio->gpio_chip, + madera_gpio); + if (ret < 0) { + dev_dbg(&pdev->dev, "Could not register gpiochip, %d\n", ret); + return ret; + } + + /* + * This is part of a composite MFD device which can only be used with + * the corresponding pinctrl driver. On all supported silicon the GPIO + * to pinctrl mapping is fixed in the silicon, so we register it + * explicitly instead of requiring a redundant gpio-ranges in the + * devicetree. + * In any case we also want to work on systems that don't use devicetree + * or acpi. + */ + ret = gpiochip_add_pin_range(&madera_gpio->gpio_chip, "madera-pinctrl", + 0, 0, madera_gpio->gpio_chip.ngpio); + if (ret) { + dev_dbg(&pdev->dev, "Failed to add pin range (%d)\n", ret); + return ret; + } + + return 0; +} + +static struct platform_driver madera_gpio_driver = { + .driver = { + .name = "madera-gpio", + }, + .probe = madera_gpio_probe, +}; + +module_platform_driver(madera_gpio_driver); + +MODULE_SOFTDEP("pre: pinctrl-madera"); +MODULE_DESCRIPTION("GPIO interface for Madera codecs"); +MODULE_AUTHOR("Nariman Poushin "); +MODULE_AUTHOR("Richard Fitzgerald "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:madera-gpio"); -- cgit v1.2.3 From cf65a0f6f6ff7631ba0ac0513a14ca5b65320d80 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Jun 2018 19:01:45 +0200 Subject: dma-mapping: move all DMA mapping code to kernel/dma Currently the code is split over various files with dma- prefixes in the lib/ and drives/base directories, and the number of files keeps growing. Move them into a single directory to keep the code together and remove the file name prefixes. To match the irq infrastructure this directory is placed under the kernel/ directory. Signed-off-by: Christoph Hellwig --- Documentation/driver-api/infrastructure.rst | 4 +- MAINTAINERS | 9 +- drivers/base/Makefile | 3 - drivers/base/dma-coherent.c | 434 ------- drivers/base/dma-contiguous.c | 278 ----- drivers/base/dma-mapping.c | 345 ------ include/linux/dma-contiguous.h | 2 +- init/Kconfig | 4 - kernel/Makefile | 1 + kernel/dma/Kconfig | 50 + kernel/dma/Makefile | 11 + kernel/dma/coherent.c | 434 +++++++ kernel/dma/contiguous.c | 278 +++++ kernel/dma/debug.c | 1773 +++++++++++++++++++++++++++ kernel/dma/direct.c | 204 +++ kernel/dma/mapping.c | 345 ++++++ kernel/dma/noncoherent.c | 102 ++ kernel/dma/swiotlb.c | 1087 ++++++++++++++++ kernel/dma/virt.c | 59 + lib/Kconfig | 47 +- lib/Makefile | 6 - lib/dma-debug.c | 1773 --------------------------- lib/dma-direct.c | 204 --- lib/dma-noncoherent.c | 102 -- lib/dma-virt.c | 61 - lib/swiotlb.c | 1087 ---------------- 26 files changed, 4350 insertions(+), 4353 deletions(-) delete mode 100644 drivers/base/dma-coherent.c delete mode 100644 drivers/base/dma-contiguous.c delete mode 100644 drivers/base/dma-mapping.c create mode 100644 kernel/dma/Kconfig create mode 100644 kernel/dma/Makefile create mode 100644 kernel/dma/coherent.c create mode 100644 kernel/dma/contiguous.c create mode 100644 kernel/dma/debug.c create mode 100644 kernel/dma/direct.c create mode 100644 kernel/dma/mapping.c create mode 100644 kernel/dma/noncoherent.c create mode 100644 kernel/dma/swiotlb.c create mode 100644 kernel/dma/virt.c delete mode 100644 lib/dma-debug.c delete mode 100644 lib/dma-direct.c delete mode 100644 lib/dma-noncoherent.c delete mode 100644 lib/dma-virt.c delete mode 100644 lib/swiotlb.c (limited to 'MAINTAINERS') diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst index bee1b9a1702f..6172f3cc3d0b 100644 --- a/Documentation/driver-api/infrastructure.rst +++ b/Documentation/driver-api/infrastructure.rst @@ -49,10 +49,10 @@ Device Drivers Base Device Drivers DMA Management ----------------------------- -.. kernel-doc:: drivers/base/dma-coherent.c +.. kernel-doc:: kernel/dma/coherent.c :export: -.. kernel-doc:: drivers/base/dma-mapping.c +.. kernel-doc:: kernel/dma/mapping.c :export: Device drivers PnP support diff --git a/MAINTAINERS b/MAINTAINERS index c13b9fb3be0b..a6844a9e2f64 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4359,12 +4359,7 @@ L: iommu@lists.linux-foundation.org T: git git://git.infradead.org/users/hch/dma-mapping.git W: http://git.infradead.org/users/hch/dma-mapping.git S: Supported -F: lib/dma-debug.c -F: lib/dma-direct.c -F: lib/dma-noncoherent.c -F: lib/dma-virt.c -F: drivers/base/dma-mapping.c -F: drivers/base/dma-coherent.c +F: kernel/dma/ F: include/asm-generic/dma-mapping.h F: include/linux/dma-direct.h F: include/linux/dma-mapping.h @@ -13642,7 +13637,7 @@ M: Konrad Rzeszutek Wilk L: iommu@lists.linux-foundation.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git S: Supported -F: lib/swiotlb.c +F: kernel/dma/swiotlb.c F: arch/*/kernel/pci-swiotlb.c F: include/linux/swiotlb.h diff --git a/drivers/base/Makefile b/drivers/base/Makefile index b074f242a435..704f44295810 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -8,10 +8,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \ topology.o container.o property.o cacheinfo.o \ devcon.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o -obj-$(CONFIG_DMA_CMA) += dma-contiguous.o obj-y += power/ -obj-$(CONFIG_HAS_DMA) += dma-mapping.o -obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o obj-$(CONFIG_ISA_BUS_API) += isa.o obj-y += firmware_loader/ obj-$(CONFIG_NUMA) += node.o diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c deleted file mode 100644 index 597d40893862..000000000000 --- a/drivers/base/dma-coherent.c +++ /dev/null @@ -1,434 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Coherent per-device memory handling. - * Borrowed from i386 - */ -#include -#include -#include -#include -#include - -struct dma_coherent_mem { - void *virt_base; - dma_addr_t device_base; - unsigned long pfn_base; - int size; - int flags; - unsigned long *bitmap; - spinlock_t spinlock; - bool use_dev_dma_pfn_offset; -}; - -static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; - -static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) -{ - if (dev && dev->dma_mem) - return dev->dma_mem; - return NULL; -} - -static inline dma_addr_t dma_get_device_base(struct device *dev, - struct dma_coherent_mem * mem) -{ - if (mem->use_dev_dma_pfn_offset) - return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; - else - return mem->device_base; -} - -static int dma_init_coherent_memory( - phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, - struct dma_coherent_mem **mem) -{ - struct dma_coherent_mem *dma_mem = NULL; - void __iomem *mem_base = NULL; - int pages = size >> PAGE_SHIFT; - int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); - int ret; - - if (!size) { - ret = -EINVAL; - goto out; - } - - mem_base = memremap(phys_addr, size, MEMREMAP_WC); - if (!mem_base) { - ret = -EINVAL; - goto out; - } - dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); - if (!dma_mem) { - ret = -ENOMEM; - goto out; - } - dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!dma_mem->bitmap) { - ret = -ENOMEM; - goto out; - } - - dma_mem->virt_base = mem_base; - dma_mem->device_base = device_addr; - dma_mem->pfn_base = PFN_DOWN(phys_addr); - dma_mem->size = pages; - dma_mem->flags = flags; - spin_lock_init(&dma_mem->spinlock); - - *mem = dma_mem; - return 0; - -out: - kfree(dma_mem); - if (mem_base) - memunmap(mem_base); - return ret; -} - -static void dma_release_coherent_memory(struct dma_coherent_mem *mem) -{ - if (!mem) - return; - - memunmap(mem->virt_base); - kfree(mem->bitmap); - kfree(mem); -} - -static int dma_assign_coherent_memory(struct device *dev, - struct dma_coherent_mem *mem) -{ - if (!dev) - return -ENODEV; - - if (dev->dma_mem) - return -EBUSY; - - dev->dma_mem = mem; - return 0; -} - -int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, - dma_addr_t device_addr, size_t size, int flags) -{ - struct dma_coherent_mem *mem; - int ret; - - ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem); - if (ret) - return ret; - - ret = dma_assign_coherent_memory(dev, mem); - if (ret) - dma_release_coherent_memory(mem); - return ret; -} -EXPORT_SYMBOL(dma_declare_coherent_memory); - -void dma_release_declared_memory(struct device *dev) -{ - struct dma_coherent_mem *mem = dev->dma_mem; - - if (!mem) - return; - dma_release_coherent_memory(mem); - dev->dma_mem = NULL; -} -EXPORT_SYMBOL(dma_release_declared_memory); - -void *dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size) -{ - struct dma_coherent_mem *mem = dev->dma_mem; - unsigned long flags; - int pos, err; - - size += device_addr & ~PAGE_MASK; - - if (!mem) - return ERR_PTR(-EINVAL); - - spin_lock_irqsave(&mem->spinlock, flags); - pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem)); - err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); - spin_unlock_irqrestore(&mem->spinlock, flags); - - if (err != 0) - return ERR_PTR(err); - return mem->virt_base + (pos << PAGE_SHIFT); -} -EXPORT_SYMBOL(dma_mark_declared_memory_occupied); - -static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, - ssize_t size, dma_addr_t *dma_handle) -{ - int order = get_order(size); - unsigned long flags; - int pageno; - void *ret; - - spin_lock_irqsave(&mem->spinlock, flags); - - if (unlikely(size > (mem->size << PAGE_SHIFT))) - goto err; - - pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); - if (unlikely(pageno < 0)) - goto err; - - /* - * Memory was found in the coherent area. - */ - *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); - ret = mem->virt_base + (pageno << PAGE_SHIFT); - spin_unlock_irqrestore(&mem->spinlock, flags); - memset(ret, 0, size); - return ret; -err: - spin_unlock_irqrestore(&mem->spinlock, flags); - return NULL; -} - -/** - * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool - * @dev: device from which we allocate memory - * @size: size of requested memory area - * @dma_handle: This will be filled with the correct dma handle - * @ret: This pointer will be filled with the virtual address - * to allocated area. - * - * This function should be only called from per-arch dma_alloc_coherent() - * to support allocation from per-device coherent memory pools. - * - * Returns 0 if dma_alloc_coherent should continue with allocating from - * generic memory areas, or !0 if dma_alloc_coherent should return @ret. - */ -int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle, void **ret) -{ - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); - - if (!mem) - return 0; - - *ret = __dma_alloc_from_coherent(mem, size, dma_handle); - if (*ret) - return 1; - - /* - * In the case where the allocation can not be satisfied from the - * per-device area, try to fall back to generic memory if the - * constraints allow it. - */ - return mem->flags & DMA_MEMORY_EXCLUSIVE; -} -EXPORT_SYMBOL(dma_alloc_from_dev_coherent); - -void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) -{ - if (!dma_coherent_default_memory) - return NULL; - - return __dma_alloc_from_coherent(dma_coherent_default_memory, size, - dma_handle); -} - -static int __dma_release_from_coherent(struct dma_coherent_mem *mem, - int order, void *vaddr) -{ - if (mem && vaddr >= mem->virt_base && vaddr < - (mem->virt_base + (mem->size << PAGE_SHIFT))) { - int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; - unsigned long flags; - - spin_lock_irqsave(&mem->spinlock, flags); - bitmap_release_region(mem->bitmap, page, order); - spin_unlock_irqrestore(&mem->spinlock, flags); - return 1; - } - return 0; -} - -/** - * dma_release_from_dev_coherent() - free memory to device coherent memory pool - * @dev: device from which the memory was allocated - * @order: the order of pages allocated - * @vaddr: virtual address of allocated pages - * - * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, releases that memory. - * - * Returns 1 if we correctly released the memory, or 0 if the caller should - * proceed with releasing memory from generic pools. - */ -int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) -{ - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); - - return __dma_release_from_coherent(mem, order, vaddr); -} -EXPORT_SYMBOL(dma_release_from_dev_coherent); - -int dma_release_from_global_coherent(int order, void *vaddr) -{ - if (!dma_coherent_default_memory) - return 0; - - return __dma_release_from_coherent(dma_coherent_default_memory, order, - vaddr); -} - -static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, - struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) -{ - if (mem && vaddr >= mem->virt_base && vaddr + size <= - (mem->virt_base + (mem->size << PAGE_SHIFT))) { - unsigned long off = vma->vm_pgoff; - int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; - int user_count = vma_pages(vma); - int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - - *ret = -ENXIO; - if (off < count && user_count <= count - off) { - unsigned long pfn = mem->pfn_base + start + off; - *ret = remap_pfn_range(vma, vma->vm_start, pfn, - user_count << PAGE_SHIFT, - vma->vm_page_prot); - } - return 1; - } - return 0; -} - -/** - * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool - * @dev: device from which the memory was allocated - * @vma: vm_area for the userspace memory - * @vaddr: cpu address returned by dma_alloc_from_dev_coherent - * @size: size of the memory buffer allocated - * @ret: result from remap_pfn_range() - * - * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, maps that memory to the provided vma. - * - * Returns 1 if @vaddr belongs to the device coherent pool and the caller - * should return @ret, or 0 if they should proceed with mapping memory from - * generic areas. - */ -int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, - void *vaddr, size_t size, int *ret) -{ - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); - - return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); -} -EXPORT_SYMBOL(dma_mmap_from_dev_coherent); - -int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, - size_t size, int *ret) -{ - if (!dma_coherent_default_memory) - return 0; - - return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, - vaddr, size, ret); -} - -/* - * Support for reserved memory regions defined in device tree - */ -#ifdef CONFIG_OF_RESERVED_MEM -#include -#include -#include - -static struct reserved_mem *dma_reserved_default_memory __initdata; - -static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) -{ - struct dma_coherent_mem *mem = rmem->priv; - int ret; - - if (!mem) { - ret = dma_init_coherent_memory(rmem->base, rmem->base, - rmem->size, - DMA_MEMORY_EXCLUSIVE, &mem); - if (ret) { - pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", - &rmem->base, (unsigned long)rmem->size / SZ_1M); - return ret; - } - } - mem->use_dev_dma_pfn_offset = true; - rmem->priv = mem; - dma_assign_coherent_memory(dev, mem); - return 0; -} - -static void rmem_dma_device_release(struct reserved_mem *rmem, - struct device *dev) -{ - if (dev) - dev->dma_mem = NULL; -} - -static const struct reserved_mem_ops rmem_dma_ops = { - .device_init = rmem_dma_device_init, - .device_release = rmem_dma_device_release, -}; - -static int __init rmem_dma_setup(struct reserved_mem *rmem) -{ - unsigned long node = rmem->fdt_node; - - if (of_get_flat_dt_prop(node, "reusable", NULL)) - return -EINVAL; - -#ifdef CONFIG_ARM - if (!of_get_flat_dt_prop(node, "no-map", NULL)) { - pr_err("Reserved memory: regions without no-map are not yet supported\n"); - return -EINVAL; - } - - if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { - WARN(dma_reserved_default_memory, - "Reserved memory: region for default DMA coherent area is redefined\n"); - dma_reserved_default_memory = rmem; - } -#endif - - rmem->ops = &rmem_dma_ops; - pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", - &rmem->base, (unsigned long)rmem->size / SZ_1M); - return 0; -} - -static int __init dma_init_reserved_memory(void) -{ - const struct reserved_mem_ops *ops; - int ret; - - if (!dma_reserved_default_memory) - return -ENOMEM; - - ops = dma_reserved_default_memory->ops; - - /* - * We rely on rmem_dma_device_init() does not propagate error of - * dma_assign_coherent_memory() for "NULL" device. - */ - ret = ops->device_init(dma_reserved_default_memory, NULL); - - if (!ret) { - dma_coherent_default_memory = dma_reserved_default_memory->priv; - pr_info("DMA: default coherent area is set\n"); - } - - return ret; -} - -core_initcall(dma_init_reserved_memory); - -RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); -#endif diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c deleted file mode 100644 index d987dcd1bd56..000000000000 --- a/drivers/base/dma-contiguous.c +++ /dev/null @@ -1,278 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Contiguous Memory Allocator for DMA mapping framework - * Copyright (c) 2010-2011 by Samsung Electronics. - * Written by: - * Marek Szyprowski - * Michal Nazarewicz - */ - -#define pr_fmt(fmt) "cma: " fmt - -#ifdef CONFIG_CMA_DEBUG -#ifndef DEBUG -# define DEBUG -#endif -#endif - -#include -#include - -#include -#include -#include -#include -#include - -#ifdef CONFIG_CMA_SIZE_MBYTES -#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES -#else -#define CMA_SIZE_MBYTES 0 -#endif - -struct cma *dma_contiguous_default_area; - -/* - * Default global CMA area size can be defined in kernel's .config. - * This is useful mainly for distro maintainers to create a kernel - * that works correctly for most supported systems. - * The size can be set in bytes or as a percentage of the total memory - * in the system. - * - * Users, who want to set the size of global CMA area for their system - * should use cma= kernel parameter. - */ -static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; -static phys_addr_t size_cmdline = -1; -static phys_addr_t base_cmdline; -static phys_addr_t limit_cmdline; - -static int __init early_cma(char *p) -{ - pr_debug("%s(%s)\n", __func__, p); - size_cmdline = memparse(p, &p); - if (*p != '@') - return 0; - base_cmdline = memparse(p + 1, &p); - if (*p != '-') { - limit_cmdline = base_cmdline + size_cmdline; - return 0; - } - limit_cmdline = memparse(p + 1, &p); - - return 0; -} -early_param("cma", early_cma); - -#ifdef CONFIG_CMA_SIZE_PERCENTAGE - -static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) -{ - struct memblock_region *reg; - unsigned long total_pages = 0; - - /* - * We cannot use memblock_phys_mem_size() here, because - * memblock_analyze() has not been called yet. - */ - for_each_memblock(memory, reg) - total_pages += memblock_region_memory_end_pfn(reg) - - memblock_region_memory_base_pfn(reg); - - return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; -} - -#else - -static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) -{ - return 0; -} - -#endif - -/** - * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling - * @limit: End address of the reserved memory (optional, 0 for any). - * - * This function reserves memory from early allocator. It should be - * called by arch specific code once the early allocator (memblock or bootmem) - * has been activated and all other subsystems have already allocated/reserved - * memory. - */ -void __init dma_contiguous_reserve(phys_addr_t limit) -{ - phys_addr_t selected_size = 0; - phys_addr_t selected_base = 0; - phys_addr_t selected_limit = limit; - bool fixed = false; - - pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); - - if (size_cmdline != -1) { - selected_size = size_cmdline; - selected_base = base_cmdline; - selected_limit = min_not_zero(limit_cmdline, limit); - if (base_cmdline + size_cmdline == limit_cmdline) - fixed = true; - } else { -#ifdef CONFIG_CMA_SIZE_SEL_MBYTES - selected_size = size_bytes; -#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) - selected_size = cma_early_percent_memory(); -#elif defined(CONFIG_CMA_SIZE_SEL_MIN) - selected_size = min(size_bytes, cma_early_percent_memory()); -#elif defined(CONFIG_CMA_SIZE_SEL_MAX) - selected_size = max(size_bytes, cma_early_percent_memory()); -#endif - } - - if (selected_size && !dma_contiguous_default_area) { - pr_debug("%s: reserving %ld MiB for global area\n", __func__, - (unsigned long)selected_size / SZ_1M); - - dma_contiguous_reserve_area(selected_size, selected_base, - selected_limit, - &dma_contiguous_default_area, - fixed); - } -} - -/** - * dma_contiguous_reserve_area() - reserve custom contiguous area - * @size: Size of the reserved area (in bytes), - * @base: Base address of the reserved area optional, use 0 for any - * @limit: End address of the reserved memory (optional, 0 for any). - * @res_cma: Pointer to store the created cma region. - * @fixed: hint about where to place the reserved area - * - * This function reserves memory from early allocator. It should be - * called by arch specific code once the early allocator (memblock or bootmem) - * has been activated and all other subsystems have already allocated/reserved - * memory. This function allows to create custom reserved areas for specific - * devices. - * - * If @fixed is true, reserve contiguous area at exactly @base. If false, - * reserve in range from @base to @limit. - */ -int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, - phys_addr_t limit, struct cma **res_cma, - bool fixed) -{ - int ret; - - ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, - "reserved", res_cma); - if (ret) - return ret; - - /* Architecture specific contiguous memory fixup. */ - dma_contiguous_early_fixup(cma_get_base(*res_cma), - cma_get_size(*res_cma)); - - return 0; -} - -/** - * dma_alloc_from_contiguous() - allocate pages from contiguous area - * @dev: Pointer to device for which the allocation is performed. - * @count: Requested number of pages. - * @align: Requested alignment of pages (in PAGE_SIZE order). - * @gfp_mask: GFP flags to use for this allocation. - * - * This function allocates memory buffer for specified device. It uses - * device specific contiguous memory area if available or the default - * global one. Requires architecture specific dev_get_cma_area() helper - * function. - */ -struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, - unsigned int align, gfp_t gfp_mask) -{ - if (align > CONFIG_CMA_ALIGNMENT) - align = CONFIG_CMA_ALIGNMENT; - - return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask); -} - -/** - * dma_release_from_contiguous() - release allocated pages - * @dev: Pointer to device for which the pages were allocated. - * @pages: Allocated pages. - * @count: Number of allocated pages. - * - * This function releases memory allocated by dma_alloc_from_contiguous(). - * It returns false when provided pages do not belong to contiguous area and - * true otherwise. - */ -bool dma_release_from_contiguous(struct device *dev, struct page *pages, - int count) -{ - return cma_release(dev_get_cma_area(dev), pages, count); -} - -/* - * Support for reserved memory regions defined in device tree - */ -#ifdef CONFIG_OF_RESERVED_MEM -#include -#include -#include - -#undef pr_fmt -#define pr_fmt(fmt) fmt - -static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) -{ - dev_set_cma_area(dev, rmem->priv); - return 0; -} - -static void rmem_cma_device_release(struct reserved_mem *rmem, - struct device *dev) -{ - dev_set_cma_area(dev, NULL); -} - -static const struct reserved_mem_ops rmem_cma_ops = { - .device_init = rmem_cma_device_init, - .device_release = rmem_cma_device_release, -}; - -static int __init rmem_cma_setup(struct reserved_mem *rmem) -{ - phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); - phys_addr_t mask = align - 1; - unsigned long node = rmem->fdt_node; - struct cma *cma; - int err; - - if (!of_get_flat_dt_prop(node, "reusable", NULL) || - of_get_flat_dt_prop(node, "no-map", NULL)) - return -EINVAL; - - if ((rmem->base & mask) || (rmem->size & mask)) { - pr_err("Reserved memory: incorrect alignment of CMA region\n"); - return -EINVAL; - } - - err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); - if (err) { - pr_err("Reserved memory: unable to setup CMA region\n"); - return err; - } - /* Architecture specific contiguous memory fixup. */ - dma_contiguous_early_fixup(rmem->base, rmem->size); - - if (of_get_flat_dt_prop(node, "linux,cma-default", NULL)) - dma_contiguous_set_default(cma); - - rmem->ops = &rmem_cma_ops; - rmem->priv = cma; - - pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", - &rmem->base, (unsigned long)rmem->size / SZ_1M); - - return 0; -} -RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup); -#endif diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c deleted file mode 100644 index f831a582209c..000000000000 --- a/drivers/base/dma-mapping.c +++ /dev/null @@ -1,345 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * drivers/base/dma-mapping.c - arch-independent dma-mapping routines - * - * Copyright (c) 2006 SUSE Linux Products GmbH - * Copyright (c) 2006 Tejun Heo - */ - -#include -#include -#include -#include -#include -#include -#include - -/* - * Managed DMA API - */ -struct dma_devres { - size_t size; - void *vaddr; - dma_addr_t dma_handle; - unsigned long attrs; -}; - -static void dmam_release(struct device *dev, void *res) -{ - struct dma_devres *this = res; - - dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, - this->attrs); -} - -static int dmam_match(struct device *dev, void *res, void *match_data) -{ - struct dma_devres *this = res, *match = match_data; - - if (this->vaddr == match->vaddr) { - WARN_ON(this->size != match->size || - this->dma_handle != match->dma_handle); - return 1; - } - return 0; -} - -/** - * dmam_alloc_coherent - Managed dma_alloc_coherent() - * @dev: Device to allocate coherent memory for - * @size: Size of allocation - * @dma_handle: Out argument for allocated DMA handle - * @gfp: Allocation flags - * - * Managed dma_alloc_coherent(). Memory allocated using this function - * will be automatically released on driver detach. - * - * RETURNS: - * Pointer to allocated memory on success, NULL on failure. - */ -void *dmam_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp) -{ - struct dma_devres *dr; - void *vaddr; - - dr = devres_alloc(dmam_release, sizeof(*dr), gfp); - if (!dr) - return NULL; - - vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); - if (!vaddr) { - devres_free(dr); - return NULL; - } - - dr->vaddr = vaddr; - dr->dma_handle = *dma_handle; - dr->size = size; - - devres_add(dev, dr); - - return vaddr; -} -EXPORT_SYMBOL(dmam_alloc_coherent); - -/** - * dmam_free_coherent - Managed dma_free_coherent() - * @dev: Device to free coherent memory for - * @size: Size of allocation - * @vaddr: Virtual address of the memory to free - * @dma_handle: DMA handle of the memory to free - * - * Managed dma_free_coherent(). - */ -void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ - struct dma_devres match_data = { size, vaddr, dma_handle }; - - dma_free_coherent(dev, size, vaddr, dma_handle); - WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); -} -EXPORT_SYMBOL(dmam_free_coherent); - -/** - * dmam_alloc_attrs - Managed dma_alloc_attrs() - * @dev: Device to allocate non_coherent memory for - * @size: Size of allocation - * @dma_handle: Out argument for allocated DMA handle - * @gfp: Allocation flags - * @attrs: Flags in the DMA_ATTR_* namespace. - * - * Managed dma_alloc_attrs(). Memory allocated using this function will be - * automatically released on driver detach. - * - * RETURNS: - * Pointer to allocated memory on success, NULL on failure. - */ -void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - struct dma_devres *dr; - void *vaddr; - - dr = devres_alloc(dmam_release, sizeof(*dr), gfp); - if (!dr) - return NULL; - - vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); - if (!vaddr) { - devres_free(dr); - return NULL; - } - - dr->vaddr = vaddr; - dr->dma_handle = *dma_handle; - dr->size = size; - dr->attrs = attrs; - - devres_add(dev, dr); - - return vaddr; -} -EXPORT_SYMBOL(dmam_alloc_attrs); - -#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT - -static void dmam_coherent_decl_release(struct device *dev, void *res) -{ - dma_release_declared_memory(dev); -} - -/** - * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() - * @dev: Device to declare coherent memory for - * @phys_addr: Physical address of coherent memory to be declared - * @device_addr: Device address of coherent memory to be declared - * @size: Size of coherent memory to be declared - * @flags: Flags - * - * Managed dma_declare_coherent_memory(). - * - * RETURNS: - * 0 on success, -errno on failure. - */ -int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, - dma_addr_t device_addr, size_t size, int flags) -{ - void *res; - int rc; - - res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); - if (!res) - return -ENOMEM; - - rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, - flags); - if (!rc) - devres_add(dev, res); - else - devres_free(res); - - return rc; -} -EXPORT_SYMBOL(dmam_declare_coherent_memory); - -/** - * dmam_release_declared_memory - Managed dma_release_declared_memory(). - * @dev: Device to release declared coherent memory for - * - * Managed dmam_release_declared_memory(). - */ -void dmam_release_declared_memory(struct device *dev) -{ - WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); -} -EXPORT_SYMBOL(dmam_release_declared_memory); - -#endif - -/* - * Create scatter-list for the already allocated DMA buffer. - */ -int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size) -{ - struct page *page = virt_to_page(cpu_addr); - int ret; - - ret = sg_alloc_table(sgt, 1, GFP_KERNEL); - if (unlikely(ret)) - return ret; - - sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); - return 0; -} -EXPORT_SYMBOL(dma_common_get_sgtable); - -/* - * Create userspace mapping for the DMA-coherent memory. - */ -int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) -{ - int ret = -ENXIO; -#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP - unsigned long user_count = vma_pages(vma); - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long off = vma->vm_pgoff; - - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - - if (off < count && user_count <= (count - off)) - ret = remap_pfn_range(vma, vma->vm_start, - page_to_pfn(virt_to_page(cpu_addr)) + off, - user_count << PAGE_SHIFT, - vma->vm_page_prot); -#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ - - return ret; -} -EXPORT_SYMBOL(dma_common_mmap); - -#ifdef CONFIG_MMU -static struct vm_struct *__dma_common_pages_remap(struct page **pages, - size_t size, unsigned long vm_flags, pgprot_t prot, - const void *caller) -{ - struct vm_struct *area; - - area = get_vm_area_caller(size, vm_flags, caller); - if (!area) - return NULL; - - if (map_vm_area(area, prot, pages)) { - vunmap(area->addr); - return NULL; - } - - return area; -} - -/* - * remaps an array of PAGE_SIZE pages into another vm_area - * Cannot be used in non-sleeping contexts - */ -void *dma_common_pages_remap(struct page **pages, size_t size, - unsigned long vm_flags, pgprot_t prot, - const void *caller) -{ - struct vm_struct *area; - - area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); - if (!area) - return NULL; - - area->pages = pages; - - return area->addr; -} - -/* - * remaps an allocated contiguous region into another vm_area. - * Cannot be used in non-sleeping contexts - */ - -void *dma_common_contiguous_remap(struct page *page, size_t size, - unsigned long vm_flags, - pgprot_t prot, const void *caller) -{ - int i; - struct page **pages; - struct vm_struct *area; - - pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); - if (!pages) - return NULL; - - for (i = 0; i < (size >> PAGE_SHIFT); i++) - pages[i] = nth_page(page, i); - - area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); - - kfree(pages); - - if (!area) - return NULL; - return area->addr; -} - -/* - * unmaps a range previously mapped by dma_common_*_remap - */ -void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) -{ - struct vm_struct *area = find_vm_area(cpu_addr); - - if (!area || (area->flags & vm_flags) != vm_flags) { - WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); - return; - } - - unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); - vunmap(cpu_addr); -} -#endif - -/* - * enables DMA API use for a device - */ -int dma_configure(struct device *dev) -{ - if (dev->bus->dma_configure) - return dev->bus->dma_configure(dev); - return 0; -} - -void dma_deconfigure(struct device *dev) -{ - of_dma_deconfigure(dev); - acpi_dma_deconfigure(dev); -} diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index b67bf6ac907d..3c5a4cb3eb95 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -48,7 +48,7 @@ * CMA should not be used by the device drivers directly. It is * only a helper framework for dma-mapping subsystem. * - * For more information, see kernel-docs in drivers/base/dma-contiguous.c + * For more information, see kernel-docs in kernel/dma/contiguous.c */ #ifdef __KERNEL__ diff --git a/init/Kconfig b/init/Kconfig index 5a52f07259a2..fde3d09e8b27 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1719,10 +1719,6 @@ source "arch/Kconfig" endmenu # General setup -config HAVE_GENERIC_DMA_COHERENT - bool - default n - config RT_MUTEXES bool diff --git a/kernel/Makefile b/kernel/Makefile index d2001624fe7a..04bc07c2b42a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -41,6 +41,7 @@ obj-y += printk/ obj-y += irq/ obj-y += rcu/ obj-y += livepatch/ +obj-y += dma/ obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o obj-$(CONFIG_FREEZER) += freezer.o diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig new file mode 100644 index 000000000000..9bd54304446f --- /dev/null +++ b/kernel/dma/Kconfig @@ -0,0 +1,50 @@ + +config HAS_DMA + bool + depends on !NO_DMA + default y + +config NEED_SG_DMA_LENGTH + bool + +config NEED_DMA_MAP_STATE + bool + +config ARCH_DMA_ADDR_T_64BIT + def_bool 64BIT || PHYS_ADDR_T_64BIT + +config HAVE_GENERIC_DMA_COHERENT + bool + +config ARCH_HAS_SYNC_DMA_FOR_DEVICE + bool + +config ARCH_HAS_SYNC_DMA_FOR_CPU + bool + select NEED_DMA_MAP_STATE + +config DMA_DIRECT_OPS + bool + depends on HAS_DMA + +config DMA_NONCOHERENT_OPS + bool + depends on HAS_DMA + select DMA_DIRECT_OPS + +config DMA_NONCOHERENT_MMAP + bool + depends on DMA_NONCOHERENT_OPS + +config DMA_NONCOHERENT_CACHE_SYNC + bool + depends on DMA_NONCOHERENT_OPS + +config DMA_VIRT_OPS + bool + depends on HAS_DMA + +config SWIOTLB + bool + select DMA_DIRECT_OPS + select NEED_DMA_MAP_STATE diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile new file mode 100644 index 000000000000..6de44e4eb454 --- /dev/null +++ b/kernel/dma/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_HAS_DMA) += mapping.o +obj-$(CONFIG_DMA_CMA) += contiguous.o +obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o +obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o +obj-$(CONFIG_DMA_NONCOHERENT_OPS) += noncoherent.o +obj-$(CONFIG_DMA_VIRT_OPS) += virt.o +obj-$(CONFIG_DMA_API_DEBUG) += debug.o +obj-$(CONFIG_SWIOTLB) += swiotlb.o + diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c new file mode 100644 index 000000000000..597d40893862 --- /dev/null +++ b/kernel/dma/coherent.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Coherent per-device memory handling. + * Borrowed from i386 + */ +#include +#include +#include +#include +#include + +struct dma_coherent_mem { + void *virt_base; + dma_addr_t device_base; + unsigned long pfn_base; + int size; + int flags; + unsigned long *bitmap; + spinlock_t spinlock; + bool use_dev_dma_pfn_offset; +}; + +static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; + +static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) +{ + if (dev && dev->dma_mem) + return dev->dma_mem; + return NULL; +} + +static inline dma_addr_t dma_get_device_base(struct device *dev, + struct dma_coherent_mem * mem) +{ + if (mem->use_dev_dma_pfn_offset) + return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; + else + return mem->device_base; +} + +static int dma_init_coherent_memory( + phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, + struct dma_coherent_mem **mem) +{ + struct dma_coherent_mem *dma_mem = NULL; + void __iomem *mem_base = NULL; + int pages = size >> PAGE_SHIFT; + int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); + int ret; + + if (!size) { + ret = -EINVAL; + goto out; + } + + mem_base = memremap(phys_addr, size, MEMREMAP_WC); + if (!mem_base) { + ret = -EINVAL; + goto out; + } + dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); + if (!dma_mem) { + ret = -ENOMEM; + goto out; + } + dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!dma_mem->bitmap) { + ret = -ENOMEM; + goto out; + } + + dma_mem->virt_base = mem_base; + dma_mem->device_base = device_addr; + dma_mem->pfn_base = PFN_DOWN(phys_addr); + dma_mem->size = pages; + dma_mem->flags = flags; + spin_lock_init(&dma_mem->spinlock); + + *mem = dma_mem; + return 0; + +out: + kfree(dma_mem); + if (mem_base) + memunmap(mem_base); + return ret; +} + +static void dma_release_coherent_memory(struct dma_coherent_mem *mem) +{ + if (!mem) + return; + + memunmap(mem->virt_base); + kfree(mem->bitmap); + kfree(mem); +} + +static int dma_assign_coherent_memory(struct device *dev, + struct dma_coherent_mem *mem) +{ + if (!dev) + return -ENODEV; + + if (dev->dma_mem) + return -EBUSY; + + dev->dma_mem = mem; + return 0; +} + +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags) +{ + struct dma_coherent_mem *mem; + int ret; + + ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem); + if (ret) + return ret; + + ret = dma_assign_coherent_memory(dev, mem); + if (ret) + dma_release_coherent_memory(mem); + return ret; +} +EXPORT_SYMBOL(dma_declare_coherent_memory); + +void dma_release_declared_memory(struct device *dev) +{ + struct dma_coherent_mem *mem = dev->dma_mem; + + if (!mem) + return; + dma_release_coherent_memory(mem); + dev->dma_mem = NULL; +} +EXPORT_SYMBOL(dma_release_declared_memory); + +void *dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size) +{ + struct dma_coherent_mem *mem = dev->dma_mem; + unsigned long flags; + int pos, err; + + size += device_addr & ~PAGE_MASK; + + if (!mem) + return ERR_PTR(-EINVAL); + + spin_lock_irqsave(&mem->spinlock, flags); + pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem)); + err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); + spin_unlock_irqrestore(&mem->spinlock, flags); + + if (err != 0) + return ERR_PTR(err); + return mem->virt_base + (pos << PAGE_SHIFT); +} +EXPORT_SYMBOL(dma_mark_declared_memory_occupied); + +static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, + ssize_t size, dma_addr_t *dma_handle) +{ + int order = get_order(size); + unsigned long flags; + int pageno; + void *ret; + + spin_lock_irqsave(&mem->spinlock, flags); + + if (unlikely(size > (mem->size << PAGE_SHIFT))) + goto err; + + pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); + if (unlikely(pageno < 0)) + goto err; + + /* + * Memory was found in the coherent area. + */ + *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); + ret = mem->virt_base + (pageno << PAGE_SHIFT); + spin_unlock_irqrestore(&mem->spinlock, flags); + memset(ret, 0, size); + return ret; +err: + spin_unlock_irqrestore(&mem->spinlock, flags); + return NULL; +} + +/** + * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool + * @dev: device from which we allocate memory + * @size: size of requested memory area + * @dma_handle: This will be filled with the correct dma handle + * @ret: This pointer will be filled with the virtual address + * to allocated area. + * + * This function should be only called from per-arch dma_alloc_coherent() + * to support allocation from per-device coherent memory pools. + * + * Returns 0 if dma_alloc_coherent should continue with allocating from + * generic memory areas, or !0 if dma_alloc_coherent should return @ret. + */ +int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + if (!mem) + return 0; + + *ret = __dma_alloc_from_coherent(mem, size, dma_handle); + if (*ret) + return 1; + + /* + * In the case where the allocation can not be satisfied from the + * per-device area, try to fall back to generic memory if the + * constraints allow it. + */ + return mem->flags & DMA_MEMORY_EXCLUSIVE; +} +EXPORT_SYMBOL(dma_alloc_from_dev_coherent); + +void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) +{ + if (!dma_coherent_default_memory) + return NULL; + + return __dma_alloc_from_coherent(dma_coherent_default_memory, size, + dma_handle); +} + +static int __dma_release_from_coherent(struct dma_coherent_mem *mem, + int order, void *vaddr) +{ + if (mem && vaddr >= mem->virt_base && vaddr < + (mem->virt_base + (mem->size << PAGE_SHIFT))) { + int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; + unsigned long flags; + + spin_lock_irqsave(&mem->spinlock, flags); + bitmap_release_region(mem->bitmap, page, order); + spin_unlock_irqrestore(&mem->spinlock, flags); + return 1; + } + return 0; +} + +/** + * dma_release_from_dev_coherent() - free memory to device coherent memory pool + * @dev: device from which the memory was allocated + * @order: the order of pages allocated + * @vaddr: virtual address of allocated pages + * + * This checks whether the memory was allocated from the per-device + * coherent memory pool and if so, releases that memory. + * + * Returns 1 if we correctly released the memory, or 0 if the caller should + * proceed with releasing memory from generic pools. + */ +int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + return __dma_release_from_coherent(mem, order, vaddr); +} +EXPORT_SYMBOL(dma_release_from_dev_coherent); + +int dma_release_from_global_coherent(int order, void *vaddr) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_release_from_coherent(dma_coherent_default_memory, order, + vaddr); +} + +static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, + struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) +{ + if (mem && vaddr >= mem->virt_base && vaddr + size <= + (mem->virt_base + (mem->size << PAGE_SHIFT))) { + unsigned long off = vma->vm_pgoff; + int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; + int user_count = vma_pages(vma); + int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + + *ret = -ENXIO; + if (off < count && user_count <= count - off) { + unsigned long pfn = mem->pfn_base + start + off; + *ret = remap_pfn_range(vma, vma->vm_start, pfn, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + } + return 1; + } + return 0; +} + +/** + * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool + * @dev: device from which the memory was allocated + * @vma: vm_area for the userspace memory + * @vaddr: cpu address returned by dma_alloc_from_dev_coherent + * @size: size of the memory buffer allocated + * @ret: result from remap_pfn_range() + * + * This checks whether the memory was allocated from the per-device + * coherent memory pool and if so, maps that memory to the provided vma. + * + * Returns 1 if @vaddr belongs to the device coherent pool and the caller + * should return @ret, or 0 if they should proceed with mapping memory from + * generic areas. + */ +int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, + void *vaddr, size_t size, int *ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); +} +EXPORT_SYMBOL(dma_mmap_from_dev_coherent); + +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, + size_t size, int *ret) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, + vaddr, size, ret); +} + +/* + * Support for reserved memory regions defined in device tree + */ +#ifdef CONFIG_OF_RESERVED_MEM +#include +#include +#include + +static struct reserved_mem *dma_reserved_default_memory __initdata; + +static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) +{ + struct dma_coherent_mem *mem = rmem->priv; + int ret; + + if (!mem) { + ret = dma_init_coherent_memory(rmem->base, rmem->base, + rmem->size, + DMA_MEMORY_EXCLUSIVE, &mem); + if (ret) { + pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + return ret; + } + } + mem->use_dev_dma_pfn_offset = true; + rmem->priv = mem; + dma_assign_coherent_memory(dev, mem); + return 0; +} + +static void rmem_dma_device_release(struct reserved_mem *rmem, + struct device *dev) +{ + if (dev) + dev->dma_mem = NULL; +} + +static const struct reserved_mem_ops rmem_dma_ops = { + .device_init = rmem_dma_device_init, + .device_release = rmem_dma_device_release, +}; + +static int __init rmem_dma_setup(struct reserved_mem *rmem) +{ + unsigned long node = rmem->fdt_node; + + if (of_get_flat_dt_prop(node, "reusable", NULL)) + return -EINVAL; + +#ifdef CONFIG_ARM + if (!of_get_flat_dt_prop(node, "no-map", NULL)) { + pr_err("Reserved memory: regions without no-map are not yet supported\n"); + return -EINVAL; + } + + if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { + WARN(dma_reserved_default_memory, + "Reserved memory: region for default DMA coherent area is redefined\n"); + dma_reserved_default_memory = rmem; + } +#endif + + rmem->ops = &rmem_dma_ops; + pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + return 0; +} + +static int __init dma_init_reserved_memory(void) +{ + const struct reserved_mem_ops *ops; + int ret; + + if (!dma_reserved_default_memory) + return -ENOMEM; + + ops = dma_reserved_default_memory->ops; + + /* + * We rely on rmem_dma_device_init() does not propagate error of + * dma_assign_coherent_memory() for "NULL" device. + */ + ret = ops->device_init(dma_reserved_default_memory, NULL); + + if (!ret) { + dma_coherent_default_memory = dma_reserved_default_memory->priv; + pr_info("DMA: default coherent area is set\n"); + } + + return ret; +} + +core_initcall(dma_init_reserved_memory); + +RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); +#endif diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c new file mode 100644 index 000000000000..d987dcd1bd56 --- /dev/null +++ b/kernel/dma/contiguous.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Contiguous Memory Allocator for DMA mapping framework + * Copyright (c) 2010-2011 by Samsung Electronics. + * Written by: + * Marek Szyprowski + * Michal Nazarewicz + */ + +#define pr_fmt(fmt) "cma: " fmt + +#ifdef CONFIG_CMA_DEBUG +#ifndef DEBUG +# define DEBUG +#endif +#endif + +#include +#include + +#include +#include +#include +#include +#include + +#ifdef CONFIG_CMA_SIZE_MBYTES +#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES +#else +#define CMA_SIZE_MBYTES 0 +#endif + +struct cma *dma_contiguous_default_area; + +/* + * Default global CMA area size can be defined in kernel's .config. + * This is useful mainly for distro maintainers to create a kernel + * that works correctly for most supported systems. + * The size can be set in bytes or as a percentage of the total memory + * in the system. + * + * Users, who want to set the size of global CMA area for their system + * should use cma= kernel parameter. + */ +static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; +static phys_addr_t size_cmdline = -1; +static phys_addr_t base_cmdline; +static phys_addr_t limit_cmdline; + +static int __init early_cma(char *p) +{ + pr_debug("%s(%s)\n", __func__, p); + size_cmdline = memparse(p, &p); + if (*p != '@') + return 0; + base_cmdline = memparse(p + 1, &p); + if (*p != '-') { + limit_cmdline = base_cmdline + size_cmdline; + return 0; + } + limit_cmdline = memparse(p + 1, &p); + + return 0; +} +early_param("cma", early_cma); + +#ifdef CONFIG_CMA_SIZE_PERCENTAGE + +static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) +{ + struct memblock_region *reg; + unsigned long total_pages = 0; + + /* + * We cannot use memblock_phys_mem_size() here, because + * memblock_analyze() has not been called yet. + */ + for_each_memblock(memory, reg) + total_pages += memblock_region_memory_end_pfn(reg) - + memblock_region_memory_base_pfn(reg); + + return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; +} + +#else + +static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) +{ + return 0; +} + +#endif + +/** + * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling + * @limit: End address of the reserved memory (optional, 0 for any). + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. + */ +void __init dma_contiguous_reserve(phys_addr_t limit) +{ + phys_addr_t selected_size = 0; + phys_addr_t selected_base = 0; + phys_addr_t selected_limit = limit; + bool fixed = false; + + pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); + + if (size_cmdline != -1) { + selected_size = size_cmdline; + selected_base = base_cmdline; + selected_limit = min_not_zero(limit_cmdline, limit); + if (base_cmdline + size_cmdline == limit_cmdline) + fixed = true; + } else { +#ifdef CONFIG_CMA_SIZE_SEL_MBYTES + selected_size = size_bytes; +#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) + selected_size = cma_early_percent_memory(); +#elif defined(CONFIG_CMA_SIZE_SEL_MIN) + selected_size = min(size_bytes, cma_early_percent_memory()); +#elif defined(CONFIG_CMA_SIZE_SEL_MAX) + selected_size = max(size_bytes, cma_early_percent_memory()); +#endif + } + + if (selected_size && !dma_contiguous_default_area) { + pr_debug("%s: reserving %ld MiB for global area\n", __func__, + (unsigned long)selected_size / SZ_1M); + + dma_contiguous_reserve_area(selected_size, selected_base, + selected_limit, + &dma_contiguous_default_area, + fixed); + } +} + +/** + * dma_contiguous_reserve_area() - reserve custom contiguous area + * @size: Size of the reserved area (in bytes), + * @base: Base address of the reserved area optional, use 0 for any + * @limit: End address of the reserved memory (optional, 0 for any). + * @res_cma: Pointer to store the created cma region. + * @fixed: hint about where to place the reserved area + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas for specific + * devices. + * + * If @fixed is true, reserve contiguous area at exactly @base. If false, + * reserve in range from @base to @limit. + */ +int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, + phys_addr_t limit, struct cma **res_cma, + bool fixed) +{ + int ret; + + ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, + "reserved", res_cma); + if (ret) + return ret; + + /* Architecture specific contiguous memory fixup. */ + dma_contiguous_early_fixup(cma_get_base(*res_cma), + cma_get_size(*res_cma)); + + return 0; +} + +/** + * dma_alloc_from_contiguous() - allocate pages from contiguous area + * @dev: Pointer to device for which the allocation is performed. + * @count: Requested number of pages. + * @align: Requested alignment of pages (in PAGE_SIZE order). + * @gfp_mask: GFP flags to use for this allocation. + * + * This function allocates memory buffer for specified device. It uses + * device specific contiguous memory area if available or the default + * global one. Requires architecture specific dev_get_cma_area() helper + * function. + */ +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, + unsigned int align, gfp_t gfp_mask) +{ + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; + + return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask); +} + +/** + * dma_release_from_contiguous() - release allocated pages + * @dev: Pointer to device for which the pages were allocated. + * @pages: Allocated pages. + * @count: Number of allocated pages. + * + * This function releases memory allocated by dma_alloc_from_contiguous(). + * It returns false when provided pages do not belong to contiguous area and + * true otherwise. + */ +bool dma_release_from_contiguous(struct device *dev, struct page *pages, + int count) +{ + return cma_release(dev_get_cma_area(dev), pages, count); +} + +/* + * Support for reserved memory regions defined in device tree + */ +#ifdef CONFIG_OF_RESERVED_MEM +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) fmt + +static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) +{ + dev_set_cma_area(dev, rmem->priv); + return 0; +} + +static void rmem_cma_device_release(struct reserved_mem *rmem, + struct device *dev) +{ + dev_set_cma_area(dev, NULL); +} + +static const struct reserved_mem_ops rmem_cma_ops = { + .device_init = rmem_cma_device_init, + .device_release = rmem_cma_device_release, +}; + +static int __init rmem_cma_setup(struct reserved_mem *rmem) +{ + phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); + phys_addr_t mask = align - 1; + unsigned long node = rmem->fdt_node; + struct cma *cma; + int err; + + if (!of_get_flat_dt_prop(node, "reusable", NULL) || + of_get_flat_dt_prop(node, "no-map", NULL)) + return -EINVAL; + + if ((rmem->base & mask) || (rmem->size & mask)) { + pr_err("Reserved memory: incorrect alignment of CMA region\n"); + return -EINVAL; + } + + err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); + if (err) { + pr_err("Reserved memory: unable to setup CMA region\n"); + return err; + } + /* Architecture specific contiguous memory fixup. */ + dma_contiguous_early_fixup(rmem->base, rmem->size); + + if (of_get_flat_dt_prop(node, "linux,cma-default", NULL)) + dma_contiguous_set_default(cma); + + rmem->ops = &rmem_cma_ops; + rmem->priv = cma; + + pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", + &rmem->base, (unsigned long)rmem->size / SZ_1M); + + return 0; +} +RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup); +#endif diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c new file mode 100644 index 000000000000..c007d25bee09 --- /dev/null +++ b/kernel/dma/debug.c @@ -0,0 +1,1773 @@ +/* + * Copyright (C) 2008 Advanced Micro Devices, Inc. + * + * Author: Joerg Roedel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define HASH_SIZE 1024ULL +#define HASH_FN_SHIFT 13 +#define HASH_FN_MASK (HASH_SIZE - 1) + +/* allow architectures to override this if absolutely required */ +#ifndef PREALLOC_DMA_DEBUG_ENTRIES +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) +#endif + +enum { + dma_debug_single, + dma_debug_page, + dma_debug_sg, + dma_debug_coherent, + dma_debug_resource, +}; + +enum map_err_types { + MAP_ERR_CHECK_NOT_APPLICABLE, + MAP_ERR_NOT_CHECKED, + MAP_ERR_CHECKED, +}; + +#define DMA_DEBUG_STACKTRACE_ENTRIES 5 + +/** + * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping + * @list: node on pre-allocated free_entries list + * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent + * @type: single, page, sg, coherent + * @pfn: page frame of the start address + * @offset: offset of mapping relative to pfn + * @size: length of the mapping + * @direction: enum dma_data_direction + * @sg_call_ents: 'nents' from dma_map_sg + * @sg_mapped_ents: 'mapped_ents' from dma_map_sg + * @map_err_type: track whether dma_mapping_error() was checked + * @stacktrace: support backtraces when a violation is detected + */ +struct dma_debug_entry { + struct list_head list; + struct device *dev; + int type; + unsigned long pfn; + size_t offset; + u64 dev_addr; + u64 size; + int direction; + int sg_call_ents; + int sg_mapped_ents; + enum map_err_types map_err_type; +#ifdef CONFIG_STACKTRACE + struct stack_trace stacktrace; + unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; +#endif +}; + +typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); + +struct hash_bucket { + struct list_head list; + spinlock_t lock; +} ____cacheline_aligned_in_smp; + +/* Hash list to save the allocated dma addresses */ +static struct hash_bucket dma_entry_hash[HASH_SIZE]; +/* List of pre-allocated dma_debug_entry's */ +static LIST_HEAD(free_entries); +/* Lock for the list above */ +static DEFINE_SPINLOCK(free_entries_lock); + +/* Global disable flag - will be set in case of an error */ +static bool global_disable __read_mostly; + +/* Early initialization disable flag, set at the end of dma_debug_init */ +static bool dma_debug_initialized __read_mostly; + +static inline bool dma_debug_disabled(void) +{ + return global_disable || !dma_debug_initialized; +} + +/* Global error count */ +static u32 error_count; + +/* Global error show enable*/ +static u32 show_all_errors __read_mostly; +/* Number of errors to show */ +static u32 show_num_errors = 1; + +static u32 num_free_entries; +static u32 min_free_entries; +static u32 nr_total_entries; + +/* number of preallocated entries requested by kernel cmdline */ +static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; + +/* debugfs dentry's for the stuff above */ +static struct dentry *dma_debug_dent __read_mostly; +static struct dentry *global_disable_dent __read_mostly; +static struct dentry *error_count_dent __read_mostly; +static struct dentry *show_all_errors_dent __read_mostly; +static struct dentry *show_num_errors_dent __read_mostly; +static struct dentry *num_free_entries_dent __read_mostly; +static struct dentry *min_free_entries_dent __read_mostly; +static struct dentry *filter_dent __read_mostly; + +/* per-driver filter related state */ + +#define NAME_MAX_LEN 64 + +static char current_driver_name[NAME_MAX_LEN] __read_mostly; +static struct device_driver *current_driver __read_mostly; + +static DEFINE_RWLOCK(driver_name_lock); + +static const char *const maperr2str[] = { + [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", + [MAP_ERR_NOT_CHECKED] = "dma map error not checked", + [MAP_ERR_CHECKED] = "dma map error checked", +}; + +static const char *type2name[5] = { "single", "page", + "scather-gather", "coherent", + "resource" }; + +static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", + "DMA_FROM_DEVICE", "DMA_NONE" }; + +/* + * The access to some variables in this macro is racy. We can't use atomic_t + * here because all these variables are exported to debugfs. Some of them even + * writeable. This is also the reason why a lock won't help much. But anyway, + * the races are no big deal. Here is why: + * + * error_count: the addition is racy, but the worst thing that can happen is + * that we don't count some errors + * show_num_errors: the subtraction is racy. Also no big deal because in + * worst case this will result in one warning more in the + * system log than the user configured. This variable is + * writeable via debugfs. + */ +static inline void dump_entry_trace(struct dma_debug_entry *entry) +{ +#ifdef CONFIG_STACKTRACE + if (entry) { + pr_warning("Mapped at:\n"); + print_stack_trace(&entry->stacktrace, 0); + } +#endif +} + +static bool driver_filter(struct device *dev) +{ + struct device_driver *drv; + unsigned long flags; + bool ret; + + /* driver filter off */ + if (likely(!current_driver_name[0])) + return true; + + /* driver filter on and initialized */ + if (current_driver && dev && dev->driver == current_driver) + return true; + + /* driver filter on, but we can't filter on a NULL device... */ + if (!dev) + return false; + + if (current_driver || !current_driver_name[0]) + return false; + + /* driver filter on but not yet initialized */ + drv = dev->driver; + if (!drv) + return false; + + /* lock to protect against change of current_driver_name */ + read_lock_irqsave(&driver_name_lock, flags); + + ret = false; + if (drv->name && + strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { + current_driver = drv; + ret = true; + } + + read_unlock_irqrestore(&driver_name_lock, flags); + + return ret; +} + +#define err_printk(dev, entry, format, arg...) do { \ + error_count += 1; \ + if (driver_filter(dev) && \ + (show_all_errors || show_num_errors > 0)) { \ + WARN(1, "%s %s: " format, \ + dev ? dev_driver_string(dev) : "NULL", \ + dev ? dev_name(dev) : "NULL", ## arg); \ + dump_entry_trace(entry); \ + } \ + if (!show_all_errors && show_num_errors > 0) \ + show_num_errors -= 1; \ + } while (0); + +/* + * Hash related functions + * + * Every DMA-API request is saved into a struct dma_debug_entry. To + * have quick access to these structs they are stored into a hash. + */ +static int hash_fn(struct dma_debug_entry *entry) +{ + /* + * Hash function is based on the dma address. + * We use bits 20-27 here as the index into the hash + */ + return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; +} + +/* + * Request exclusive access to a hash bucket for a given dma_debug_entry. + */ +static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, + unsigned long *flags) + __acquires(&dma_entry_hash[idx].lock) +{ + int idx = hash_fn(entry); + unsigned long __flags; + + spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); + *flags = __flags; + return &dma_entry_hash[idx]; +} + +/* + * Give up exclusive access to the hash bucket + */ +static void put_hash_bucket(struct hash_bucket *bucket, + unsigned long *flags) + __releases(&bucket->lock) +{ + unsigned long __flags = *flags; + + spin_unlock_irqrestore(&bucket->lock, __flags); +} + +static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) +{ + return ((a->dev_addr == b->dev_addr) && + (a->dev == b->dev)) ? true : false; +} + +static bool containing_match(struct dma_debug_entry *a, + struct dma_debug_entry *b) +{ + if (a->dev != b->dev) + return false; + + if ((b->dev_addr <= a->dev_addr) && + ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) + return true; + + return false; +} + +/* + * Search a given entry in the hash bucket list + */ +static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, + struct dma_debug_entry *ref, + match_fn match) +{ + struct dma_debug_entry *entry, *ret = NULL; + int matches = 0, match_lvl, last_lvl = -1; + + list_for_each_entry(entry, &bucket->list, list) { + if (!match(ref, entry)) + continue; + + /* + * Some drivers map the same physical address multiple + * times. Without a hardware IOMMU this results in the + * same device addresses being put into the dma-debug + * hash multiple times too. This can result in false + * positives being reported. Therefore we implement a + * best-fit algorithm here which returns the entry from + * the hash which fits best to the reference value + * instead of the first-fit. + */ + matches += 1; + match_lvl = 0; + entry->size == ref->size ? ++match_lvl : 0; + entry->type == ref->type ? ++match_lvl : 0; + entry->direction == ref->direction ? ++match_lvl : 0; + entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; + + if (match_lvl == 4) { + /* perfect-fit - return the result */ + return entry; + } else if (match_lvl > last_lvl) { + /* + * We found an entry that fits better then the + * previous one or it is the 1st match. + */ + last_lvl = match_lvl; + ret = entry; + } + } + + /* + * If we have multiple matches but no perfect-fit, just return + * NULL. + */ + ret = (matches == 1) ? ret : NULL; + + return ret; +} + +static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, + struct dma_debug_entry *ref) +{ + return __hash_bucket_find(bucket, ref, exact_match); +} + +static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, + struct dma_debug_entry *ref, + unsigned long *flags) +{ + + unsigned int max_range = dma_get_max_seg_size(ref->dev); + struct dma_debug_entry *entry, index = *ref; + unsigned int range = 0; + + while (range <= max_range) { + entry = __hash_bucket_find(*bucket, ref, containing_match); + + if (entry) + return entry; + + /* + * Nothing found, go back a hash bucket + */ + put_hash_bucket(*bucket, flags); + range += (1 << HASH_FN_SHIFT); + index.dev_addr -= (1 << HASH_FN_SHIFT); + *bucket = get_hash_bucket(&index, flags); + } + + return NULL; +} + +/* + * Add an entry to a hash bucket + */ +static void hash_bucket_add(struct hash_bucket *bucket, + struct dma_debug_entry *entry) +{ + list_add_tail(&entry->list, &bucket->list); +} + +/* + * Remove entry from a hash bucket list + */ +static void hash_bucket_del(struct dma_debug_entry *entry) +{ + list_del(&entry->list); +} + +static unsigned long long phys_addr(struct dma_debug_entry *entry) +{ + if (entry->type == dma_debug_resource) + return __pfn_to_phys(entry->pfn) + entry->offset; + + return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; +} + +/* + * Dump mapping entries for debugging purposes + */ +void debug_dma_dump_mappings(struct device *dev) +{ + int idx; + + for (idx = 0; idx < HASH_SIZE; idx++) { + struct hash_bucket *bucket = &dma_entry_hash[idx]; + struct dma_debug_entry *entry; + unsigned long flags; + + spin_lock_irqsave(&bucket->lock, flags); + + list_for_each_entry(entry, &bucket->list, list) { + if (!dev || dev == entry->dev) { + dev_info(entry->dev, + "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", + type2name[entry->type], idx, + phys_addr(entry), entry->pfn, + entry->dev_addr, entry->size, + dir2name[entry->direction], + maperr2str[entry->map_err_type]); + } + } + + spin_unlock_irqrestore(&bucket->lock, flags); + } +} + +/* + * For each mapping (initial cacheline in the case of + * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a + * scatterlist, or the cacheline specified in dma_map_single) insert + * into this tree using the cacheline as the key. At + * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If + * the entry already exists at insertion time add a tag as a reference + * count for the overlapping mappings. For now, the overlap tracking + * just ensures that 'unmaps' balance 'maps' before marking the + * cacheline idle, but we should also be flagging overlaps as an API + * violation. + * + * Memory usage is mostly constrained by the maximum number of available + * dma-debug entries in that we need a free dma_debug_entry before + * inserting into the tree. In the case of dma_map_page and + * dma_alloc_coherent there is only one dma_debug_entry and one + * dma_active_cacheline entry to track per event. dma_map_sg(), on the + * other hand, consumes a single dma_debug_entry, but inserts 'nents' + * entries into the tree. + * + * At any time debug_dma_assert_idle() can be called to trigger a + * warning if any cachelines in the given page are in the active set. + */ +static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); +static DEFINE_SPINLOCK(radix_lock); +#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) +#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) +#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) + +static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) +{ + return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + + (entry->offset >> L1_CACHE_SHIFT); +} + +static int active_cacheline_read_overlap(phys_addr_t cln) +{ + int overlap = 0, i; + + for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) + if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) + overlap |= 1 << i; + return overlap; +} + +static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) +{ + int i; + + if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) + return overlap; + + for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) + if (overlap & 1 << i) + radix_tree_tag_set(&dma_active_cacheline, cln, i); + else + radix_tree_tag_clear(&dma_active_cacheline, cln, i); + + return overlap; +} + +static void active_cacheline_inc_overlap(phys_addr_t cln) +{ + int overlap = active_cacheline_read_overlap(cln); + + overlap = active_cacheline_set_overlap(cln, ++overlap); + + /* If we overflowed the overlap counter then we're potentially + * leaking dma-mappings. Otherwise, if maps and unmaps are + * balanced then this overflow may cause false negatives in + * debug_dma_assert_idle() as the cacheline may be marked idle + * prematurely. + */ + WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, + "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", + ACTIVE_CACHELINE_MAX_OVERLAP, &cln); +} + +static int active_cacheline_dec_overlap(phys_addr_t cln) +{ + int overlap = active_cacheline_read_overlap(cln); + + return active_cacheline_set_overlap(cln, --overlap); +} + +static int active_cacheline_insert(struct dma_debug_entry *entry) +{ + phys_addr_t cln = to_cacheline_number(entry); + unsigned long flags; + int rc; + + /* If the device is not writing memory then we don't have any + * concerns about the cpu consuming stale data. This mitigates + * legitimate usages of overlapping mappings. + */ + if (entry->direction == DMA_TO_DEVICE) + return 0; + + spin_lock_irqsave(&radix_lock, flags); + rc = radix_tree_insert(&dma_active_cacheline, cln, entry); + if (rc == -EEXIST) + active_cacheline_inc_overlap(cln); + spin_unlock_irqrestore(&radix_lock, flags); + + return rc; +} + +static void active_cacheline_remove(struct dma_debug_entry *entry) +{ + phys_addr_t cln = to_cacheline_number(entry); + unsigned long flags; + + /* ...mirror the insert case */ + if (entry->direction == DMA_TO_DEVICE) + return; + + spin_lock_irqsave(&radix_lock, flags); + /* since we are counting overlaps the final put of the + * cacheline will occur when the overlap count is 0. + * active_cacheline_dec_overlap() returns -1 in that case + */ + if (active_cacheline_dec_overlap(cln) < 0) + radix_tree_delete(&dma_active_cacheline, cln); + spin_unlock_irqrestore(&radix_lock, flags); +} + +/** + * debug_dma_assert_idle() - assert that a page is not undergoing dma + * @page: page to lookup in the dma_active_cacheline tree + * + * Place a call to this routine in cases where the cpu touching the page + * before the dma completes (page is dma_unmapped) will lead to data + * corruption. + */ +void debug_dma_assert_idle(struct page *page) +{ + static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; + struct dma_debug_entry *entry = NULL; + void **results = (void **) &ents; + unsigned int nents, i; + unsigned long flags; + phys_addr_t cln; + + if (dma_debug_disabled()) + return; + + if (!page) + return; + + cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; + spin_lock_irqsave(&radix_lock, flags); + nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, + CACHELINES_PER_PAGE); + for (i = 0; i < nents; i++) { + phys_addr_t ent_cln = to_cacheline_number(ents[i]); + + if (ent_cln == cln) { + entry = ents[i]; + break; + } else if (ent_cln >= cln + CACHELINES_PER_PAGE) + break; + } + spin_unlock_irqrestore(&radix_lock, flags); + + if (!entry) + return; + + cln = to_cacheline_number(entry); + err_printk(entry->dev, entry, + "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", + &cln); +} + +/* + * Wrapper function for adding an entry to the hash. + * This function takes care of locking itself. + */ +static void add_dma_entry(struct dma_debug_entry *entry) +{ + struct hash_bucket *bucket; + unsigned long flags; + int rc; + + bucket = get_hash_bucket(entry, &flags); + hash_bucket_add(bucket, entry); + put_hash_bucket(bucket, &flags); + + rc = active_cacheline_insert(entry); + if (rc == -ENOMEM) { + pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); + global_disable = true; + } + + /* TODO: report -EEXIST errors here as overlapping mappings are + * not supported by the DMA API + */ +} + +static struct dma_debug_entry *__dma_entry_alloc(void) +{ + struct dma_debug_entry *entry; + + entry = list_entry(free_entries.next, struct dma_debug_entry, list); + list_del(&entry->list); + memset(entry, 0, sizeof(*entry)); + + num_free_entries -= 1; + if (num_free_entries < min_free_entries) + min_free_entries = num_free_entries; + + return entry; +} + +/* struct dma_entry allocator + * + * The next two functions implement the allocator for + * struct dma_debug_entries. + */ +static struct dma_debug_entry *dma_entry_alloc(void) +{ + struct dma_debug_entry *entry; + unsigned long flags; + + spin_lock_irqsave(&free_entries_lock, flags); + + if (list_empty(&free_entries)) { + global_disable = true; + spin_unlock_irqrestore(&free_entries_lock, flags); + pr_err("DMA-API: debugging out of memory - disabling\n"); + return NULL; + } + + entry = __dma_entry_alloc(); + + spin_unlock_irqrestore(&free_entries_lock, flags); + +#ifdef CONFIG_STACKTRACE + entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; + entry->stacktrace.entries = entry->st_entries; + entry->stacktrace.skip = 2; + save_stack_trace(&entry->stacktrace); +#endif + + return entry; +} + +static void dma_entry_free(struct dma_debug_entry *entry) +{ + unsigned long flags; + + active_cacheline_remove(entry); + + /* + * add to beginning of the list - this way the entries are + * more likely cache hot when they are reallocated. + */ + spin_lock_irqsave(&free_entries_lock, flags); + list_add(&entry->list, &free_entries); + num_free_entries += 1; + spin_unlock_irqrestore(&free_entries_lock, flags); +} + +int dma_debug_resize_entries(u32 num_entries) +{ + int i, delta, ret = 0; + unsigned long flags; + struct dma_debug_entry *entry; + LIST_HEAD(tmp); + + spin_lock_irqsave(&free_entries_lock, flags); + + if (nr_total_entries < num_entries) { + delta = num_entries - nr_total_entries; + + spin_unlock_irqrestore(&free_entries_lock, flags); + + for (i = 0; i < delta; i++) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + break; + + list_add_tail(&entry->list, &tmp); + } + + spin_lock_irqsave(&free_entries_lock, flags); + + list_splice(&tmp, &free_entries); + nr_total_entries += i; + num_free_entries += i; + } else { + delta = nr_total_entries - num_entries; + + for (i = 0; i < delta && !list_empty(&free_entries); i++) { + entry = __dma_entry_alloc(); + kfree(entry); + } + + nr_total_entries -= i; + } + + if (nr_total_entries != num_entries) + ret = 1; + + spin_unlock_irqrestore(&free_entries_lock, flags); + + return ret; +} + +/* + * DMA-API debugging init code + * + * The init code does two things: + * 1. Initialize core data structures + * 2. Preallocate a given number of dma_debug_entry structs + */ + +static int prealloc_memory(u32 num_entries) +{ + struct dma_debug_entry *entry, *next_entry; + int i; + + for (i = 0; i < num_entries; ++i) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + goto out_err; + + list_add_tail(&entry->list, &free_entries); + } + + num_free_entries = num_entries; + min_free_entries = num_entries; + + pr_info("DMA-API: preallocated %d debug entries\n", num_entries); + + return 0; + +out_err: + + list_for_each_entry_safe(entry, next_entry, &free_entries, list) { + list_del(&entry->list); + kfree(entry); + } + + return -ENOMEM; +} + +static ssize_t filter_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[NAME_MAX_LEN + 1]; + unsigned long flags; + int len; + + if (!current_driver_name[0]) + return 0; + + /* + * We can't copy to userspace directly because current_driver_name can + * only be read under the driver_name_lock with irqs disabled. So + * create a temporary copy first. + */ + read_lock_irqsave(&driver_name_lock, flags); + len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); + read_unlock_irqrestore(&driver_name_lock, flags); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t filter_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + char buf[NAME_MAX_LEN]; + unsigned long flags; + size_t len; + int i; + + /* + * We can't copy from userspace directly. Access to + * current_driver_name is protected with a write_lock with irqs + * disabled. Since copy_from_user can fault and may sleep we + * need to copy to temporary buffer first + */ + len = min(count, (size_t)(NAME_MAX_LEN - 1)); + if (copy_from_user(buf, userbuf, len)) + return -EFAULT; + + buf[len] = 0; + + write_lock_irqsave(&driver_name_lock, flags); + + /* + * Now handle the string we got from userspace very carefully. + * The rules are: + * - only use the first token we got + * - token delimiter is everything looking like a space + * character (' ', '\n', '\t' ...) + * + */ + if (!isalnum(buf[0])) { + /* + * If the first character userspace gave us is not + * alphanumerical then assume the filter should be + * switched off. + */ + if (current_driver_name[0]) + pr_info("DMA-API: switching off dma-debug driver filter\n"); + current_driver_name[0] = 0; + current_driver = NULL; + goto out_unlock; + } + + /* + * Now parse out the first token and use it as the name for the + * driver to filter for. + */ + for (i = 0; i < NAME_MAX_LEN - 1; ++i) { + current_driver_name[i] = buf[i]; + if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) + break; + } + current_driver_name[i] = 0; + current_driver = NULL; + + pr_info("DMA-API: enable driver filter for driver [%s]\n", + current_driver_name); + +out_unlock: + write_unlock_irqrestore(&driver_name_lock, flags); + + return count; +} + +static const struct file_operations filter_fops = { + .read = filter_read, + .write = filter_write, + .llseek = default_llseek, +}; + +static int dma_debug_fs_init(void) +{ + dma_debug_dent = debugfs_create_dir("dma-api", NULL); + if (!dma_debug_dent) { + pr_err("DMA-API: can not create debugfs directory\n"); + return -ENOMEM; + } + + global_disable_dent = debugfs_create_bool("disabled", 0444, + dma_debug_dent, + &global_disable); + if (!global_disable_dent) + goto out_err; + + error_count_dent = debugfs_create_u32("error_count", 0444, + dma_debug_dent, &error_count); + if (!error_count_dent) + goto out_err; + + show_all_errors_dent = debugfs_create_u32("all_errors", 0644, + dma_debug_dent, + &show_all_errors); + if (!show_all_errors_dent) + goto out_err; + + show_num_errors_dent = debugfs_create_u32("num_errors", 0644, + dma_debug_dent, + &show_num_errors); + if (!show_num_errors_dent) + goto out_err; + + num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, + dma_debug_dent, + &num_free_entries); + if (!num_free_entries_dent) + goto out_err; + + min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, + dma_debug_dent, + &min_free_entries); + if (!min_free_entries_dent) + goto out_err; + + filter_dent = debugfs_create_file("driver_filter", 0644, + dma_debug_dent, NULL, &filter_fops); + if (!filter_dent) + goto out_err; + + return 0; + +out_err: + debugfs_remove_recursive(dma_debug_dent); + + return -ENOMEM; +} + +static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) +{ + struct dma_debug_entry *entry; + unsigned long flags; + int count = 0, i; + + for (i = 0; i < HASH_SIZE; ++i) { + spin_lock_irqsave(&dma_entry_hash[i].lock, flags); + list_for_each_entry(entry, &dma_entry_hash[i].list, list) { + if (entry->dev == dev) { + count += 1; + *out_entry = entry; + } + } + spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); + } + + return count; +} + +static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) +{ + struct device *dev = data; + struct dma_debug_entry *uninitialized_var(entry); + int count; + + if (dma_debug_disabled()) + return 0; + + switch (action) { + case BUS_NOTIFY_UNBOUND_DRIVER: + count = device_dma_allocations(dev, &entry); + if (count == 0) + break; + err_printk(dev, entry, "DMA-API: device driver has pending " + "DMA allocations while released from device " + "[count=%d]\n" + "One of leaked entries details: " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [mapped as %s]\n", + count, entry->dev_addr, entry->size, + dir2name[entry->direction], type2name[entry->type]); + break; + default: + break; + } + + return 0; +} + +void dma_debug_add_bus(struct bus_type *bus) +{ + struct notifier_block *nb; + + if (dma_debug_disabled()) + return; + + nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); + if (nb == NULL) { + pr_err("dma_debug_add_bus: out of memory\n"); + return; + } + + nb->notifier_call = dma_debug_device_change; + + bus_register_notifier(bus, nb); +} + +static int dma_debug_init(void) +{ + int i; + + /* Do not use dma_debug_initialized here, since we really want to be + * called to set dma_debug_initialized + */ + if (global_disable) + return 0; + + for (i = 0; i < HASH_SIZE; ++i) { + INIT_LIST_HEAD(&dma_entry_hash[i].list); + spin_lock_init(&dma_entry_hash[i].lock); + } + + if (dma_debug_fs_init() != 0) { + pr_err("DMA-API: error creating debugfs entries - disabling\n"); + global_disable = true; + + return 0; + } + + if (prealloc_memory(nr_prealloc_entries) != 0) { + pr_err("DMA-API: debugging out of memory error - disabled\n"); + global_disable = true; + + return 0; + } + + nr_total_entries = num_free_entries; + + dma_debug_initialized = true; + + pr_info("DMA-API: debugging enabled by kernel config\n"); + return 0; +} +core_initcall(dma_debug_init); + +static __init int dma_debug_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (strncmp(str, "off", 3) == 0) { + pr_info("DMA-API: debugging disabled on kernel command line\n"); + global_disable = true; + } + + return 0; +} + +static __init int dma_debug_entries_cmdline(char *str) +{ + if (!str) + return -EINVAL; + if (!get_option(&str, &nr_prealloc_entries)) + nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; + return 0; +} + +__setup("dma_debug=", dma_debug_cmdline); +__setup("dma_debug_entries=", dma_debug_entries_cmdline); + +static void check_unmap(struct dma_debug_entry *ref) +{ + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + + bucket = get_hash_bucket(ref, &flags); + entry = bucket_find_exact(bucket, ref); + + if (!entry) { + /* must drop lock before calling dma_mapping_error */ + put_hash_bucket(bucket, &flags); + + if (dma_mapping_error(ref->dev, ref->dev_addr)) { + err_printk(ref->dev, NULL, + "DMA-API: device driver tries to free an " + "invalid DMA memory address\n"); + } else { + err_printk(ref->dev, NULL, + "DMA-API: device driver tries to free DMA " + "memory it has not allocated [device " + "address=0x%016llx] [size=%llu bytes]\n", + ref->dev_addr, ref->size); + } + return; + } + + if (ref->size != entry->size) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA memory with different size " + "[device address=0x%016llx] [map size=%llu bytes] " + "[unmap size=%llu bytes]\n", + ref->dev_addr, entry->size, ref->size); + } + + if (ref->type != entry->type) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA memory with wrong function " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped as %s] [unmapped as %s]\n", + ref->dev_addr, ref->size, + type2name[entry->type], type2name[ref->type]); + } else if ((entry->type == dma_debug_coherent) && + (phys_addr(ref) != phys_addr(entry))) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA memory with different CPU address " + "[device address=0x%016llx] [size=%llu bytes] " + "[cpu alloc address=0x%016llx] " + "[cpu free address=0x%016llx]", + ref->dev_addr, ref->size, + phys_addr(entry), + phys_addr(ref)); + } + + if (ref->sg_call_ents && ref->type == dma_debug_sg && + ref->sg_call_ents != entry->sg_call_ents) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA sg list with different entry count " + "[map count=%d] [unmap count=%d]\n", + entry->sg_call_ents, ref->sg_call_ents); + } + + /* + * This may be no bug in reality - but most implementations of the + * DMA API don't handle this properly, so check for it here + */ + if (ref->direction != entry->direction) { + err_printk(ref->dev, entry, "DMA-API: device driver frees " + "DMA memory with different direction " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [unmapped with %s]\n", + ref->dev_addr, ref->size, + dir2name[entry->direction], + dir2name[ref->direction]); + } + + /* + * Drivers should use dma_mapping_error() to check the returned + * addresses of dma_map_single() and dma_map_page(). + * If not, print this warning message. See Documentation/DMA-API.txt. + */ + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + err_printk(ref->dev, entry, + "DMA-API: device driver failed to check map error" + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped as %s]", + ref->dev_addr, ref->size, + type2name[entry->type]); + } + + hash_bucket_del(entry); + dma_entry_free(entry); + + put_hash_bucket(bucket, &flags); +} + +static void check_for_stack(struct device *dev, + struct page *page, size_t offset) +{ + void *addr; + struct vm_struct *stack_vm_area = task_stack_vm_area(current); + + if (!stack_vm_area) { + /* Stack is direct-mapped. */ + if (PageHighMem(page)) + return; + addr = page_address(page) + offset; + if (object_is_on_stack(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr); + } else { + /* Stack is vmalloced. */ + int i; + + for (i = 0; i < stack_vm_area->nr_pages; i++) { + if (page != stack_vm_area->pages[i]) + continue; + + addr = (u8 *)current->stack + i * PAGE_SIZE + offset; + err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr); + break; + } + } +} + +static inline bool overlap(void *addr, unsigned long len, void *start, void *end) +{ + unsigned long a1 = (unsigned long)addr; + unsigned long b1 = a1 + len; + unsigned long a2 = (unsigned long)start; + unsigned long b2 = (unsigned long)end; + + return !(b1 <= a2 || a1 >= b2); +} + +static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) +{ + if (overlap(addr, len, _stext, _etext) || + overlap(addr, len, __start_rodata, __end_rodata)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); +} + +static void check_sync(struct device *dev, + struct dma_debug_entry *ref, + bool to_cpu) +{ + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + + bucket = get_hash_bucket(ref, &flags); + + entry = bucket_find_contain(&bucket, ref, &flags); + + if (!entry) { + err_printk(dev, NULL, "DMA-API: device driver tries " + "to sync DMA memory it has not allocated " + "[device address=0x%016llx] [size=%llu bytes]\n", + (unsigned long long)ref->dev_addr, ref->size); + goto out; + } + + if (ref->size > entry->size) { + err_printk(dev, entry, "DMA-API: device driver syncs" + " DMA memory outside allocated range " + "[device address=0x%016llx] " + "[allocation size=%llu bytes] " + "[sync offset+size=%llu]\n", + entry->dev_addr, entry->size, + ref->size); + } + + if (entry->direction == DMA_BIDIRECTIONAL) + goto out; + + if (ref->direction != entry->direction) { + err_printk(dev, entry, "DMA-API: device driver syncs " + "DMA memory with different direction " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [synced with %s]\n", + (unsigned long long)ref->dev_addr, entry->size, + dir2name[entry->direction], + dir2name[ref->direction]); + } + + if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && + !(ref->direction == DMA_TO_DEVICE)) + err_printk(dev, entry, "DMA-API: device driver syncs " + "device read-only DMA memory for cpu " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [synced with %s]\n", + (unsigned long long)ref->dev_addr, entry->size, + dir2name[entry->direction], + dir2name[ref->direction]); + + if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && + !(ref->direction == DMA_FROM_DEVICE)) + err_printk(dev, entry, "DMA-API: device driver syncs " + "device write-only DMA memory to device " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [synced with %s]\n", + (unsigned long long)ref->dev_addr, entry->size, + dir2name[entry->direction], + dir2name[ref->direction]); + + if (ref->sg_call_ents && ref->type == dma_debug_sg && + ref->sg_call_ents != entry->sg_call_ents) { + err_printk(ref->dev, entry, "DMA-API: device driver syncs " + "DMA sg list with different entry count " + "[map count=%d] [sync count=%d]\n", + entry->sg_call_ents, ref->sg_call_ents); + } + +out: + put_hash_bucket(bucket, &flags); +} + +static void check_sg_segment(struct device *dev, struct scatterlist *sg) +{ +#ifdef CONFIG_DMA_API_DEBUG_SG + unsigned int max_seg = dma_get_max_seg_size(dev); + u64 start, end, boundary = dma_get_seg_boundary(dev); + + /* + * Either the driver forgot to set dma_parms appropriately, or + * whoever generated the list forgot to check them. + */ + if (sg->length > max_seg) + err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", + sg->length, max_seg); + /* + * In some cases this could potentially be the DMA API + * implementation's fault, but it would usually imply that + * the scatterlist was built inappropriately to begin with. + */ + start = sg_dma_address(sg); + end = start + sg_dma_len(sg) - 1; + if ((start ^ end) & ~boundary) + err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", + start, end, boundary); +#endif +} + +void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, + size_t size, int direction, dma_addr_t dma_addr, + bool map_single) +{ + struct dma_debug_entry *entry; + + if (unlikely(dma_debug_disabled())) + return; + + if (dma_mapping_error(dev, dma_addr)) + return; + + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->dev = dev; + entry->type = dma_debug_page; + entry->pfn = page_to_pfn(page); + entry->offset = offset, + entry->dev_addr = dma_addr; + entry->size = size; + entry->direction = direction; + entry->map_err_type = MAP_ERR_NOT_CHECKED; + + if (map_single) + entry->type = dma_debug_single; + + check_for_stack(dev, page, offset); + + if (!PageHighMem(page)) { + void *addr = page_address(page) + offset; + + check_for_illegal_area(dev, addr, size); + } + + add_dma_entry(entry); +} +EXPORT_SYMBOL(debug_dma_map_page); + +void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + struct dma_debug_entry ref; + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + + if (unlikely(dma_debug_disabled())) + return; + + ref.dev = dev; + ref.dev_addr = dma_addr; + bucket = get_hash_bucket(&ref, &flags); + + list_for_each_entry(entry, &bucket->list, list) { + if (!exact_match(&ref, entry)) + continue; + + /* + * The same physical address can be mapped multiple + * times. Without a hardware IOMMU this results in the + * same device addresses being put into the dma-debug + * hash multiple times too. This can result in false + * positives being reported. Therefore we implement a + * best-fit algorithm here which updates the first entry + * from the hash which fits the reference value and is + * not currently listed as being checked. + */ + if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { + entry->map_err_type = MAP_ERR_CHECKED; + break; + } + } + + put_hash_bucket(bucket, &flags); +} +EXPORT_SYMBOL(debug_dma_mapping_error); + +void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, bool map_single) +{ + struct dma_debug_entry ref = { + .type = dma_debug_page, + .dev = dev, + .dev_addr = addr, + .size = size, + .direction = direction, + }; + + if (unlikely(dma_debug_disabled())) + return; + + if (map_single) + ref.type = dma_debug_single; + + check_unmap(&ref); +} +EXPORT_SYMBOL(debug_dma_unmap_page); + +void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction) +{ + struct dma_debug_entry *entry; + struct scatterlist *s; + int i; + + if (unlikely(dma_debug_disabled())) + return; + + for_each_sg(sg, s, mapped_ents, i) { + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->type = dma_debug_sg; + entry->dev = dev; + entry->pfn = page_to_pfn(sg_page(s)); + entry->offset = s->offset, + entry->size = sg_dma_len(s); + entry->dev_addr = sg_dma_address(s); + entry->direction = direction; + entry->sg_call_ents = nents; + entry->sg_mapped_ents = mapped_ents; + + check_for_stack(dev, sg_page(s), s->offset); + + if (!PageHighMem(sg_page(s))) { + check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); + } + + check_sg_segment(dev, s); + + add_dma_entry(entry); + } +} +EXPORT_SYMBOL(debug_dma_map_sg); + +static int get_nr_mapped_entries(struct device *dev, + struct dma_debug_entry *ref) +{ + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + int mapped_ents; + + bucket = get_hash_bucket(ref, &flags); + entry = bucket_find_exact(bucket, ref); + mapped_ents = 0; + + if (entry) + mapped_ents = entry->sg_mapped_ents; + put_hash_bucket(bucket, &flags); + + return mapped_ents; +} + +void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, int dir) +{ + struct scatterlist *s; + int mapped_ents = 0, i; + + if (unlikely(dma_debug_disabled())) + return; + + for_each_sg(sglist, s, nelems, i) { + + struct dma_debug_entry ref = { + .type = dma_debug_sg, + .dev = dev, + .pfn = page_to_pfn(sg_page(s)), + .offset = s->offset, + .dev_addr = sg_dma_address(s), + .size = sg_dma_len(s), + .direction = dir, + .sg_call_ents = nelems, + }; + + if (mapped_ents && i >= mapped_ents) + break; + + if (!i) + mapped_ents = get_nr_mapped_entries(dev, &ref); + + check_unmap(&ref); + } +} +EXPORT_SYMBOL(debug_dma_unmap_sg); + +void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt) +{ + struct dma_debug_entry *entry; + + if (unlikely(dma_debug_disabled())) + return; + + if (unlikely(virt == NULL)) + return; + + /* handle vmalloc and linear addresses */ + if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) + return; + + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->type = dma_debug_coherent; + entry->dev = dev; + entry->offset = offset_in_page(virt); + entry->size = size; + entry->dev_addr = dma_addr; + entry->direction = DMA_BIDIRECTIONAL; + + if (is_vmalloc_addr(virt)) + entry->pfn = vmalloc_to_pfn(virt); + else + entry->pfn = page_to_pfn(virt_to_page(virt)); + + add_dma_entry(entry); +} +EXPORT_SYMBOL(debug_dma_alloc_coherent); + +void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr) +{ + struct dma_debug_entry ref = { + .type = dma_debug_coherent, + .dev = dev, + .offset = offset_in_page(virt), + .dev_addr = addr, + .size = size, + .direction = DMA_BIDIRECTIONAL, + }; + + /* handle vmalloc and linear addresses */ + if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) + return; + + if (is_vmalloc_addr(virt)) + ref.pfn = vmalloc_to_pfn(virt); + else + ref.pfn = page_to_pfn(virt_to_page(virt)); + + if (unlikely(dma_debug_disabled())) + return; + + check_unmap(&ref); +} +EXPORT_SYMBOL(debug_dma_free_coherent); + +void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, + int direction, dma_addr_t dma_addr) +{ + struct dma_debug_entry *entry; + + if (unlikely(dma_debug_disabled())) + return; + + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->type = dma_debug_resource; + entry->dev = dev; + entry->pfn = PHYS_PFN(addr); + entry->offset = offset_in_page(addr); + entry->size = size; + entry->dev_addr = dma_addr; + entry->direction = direction; + entry->map_err_type = MAP_ERR_NOT_CHECKED; + + add_dma_entry(entry); +} +EXPORT_SYMBOL(debug_dma_map_resource); + +void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction) +{ + struct dma_debug_entry ref = { + .type = dma_debug_resource, + .dev = dev, + .dev_addr = dma_addr, + .size = size, + .direction = direction, + }; + + if (unlikely(dma_debug_disabled())) + return; + + check_unmap(&ref); +} +EXPORT_SYMBOL(debug_dma_unmap_resource); + +void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) +{ + struct dma_debug_entry ref; + + if (unlikely(dma_debug_disabled())) + return; + + ref.type = dma_debug_single; + ref.dev = dev; + ref.dev_addr = dma_handle; + ref.size = size; + ref.direction = direction; + ref.sg_call_ents = 0; + + check_sync(dev, &ref, true); +} +EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); + +void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + int direction) +{ + struct dma_debug_entry ref; + + if (unlikely(dma_debug_disabled())) + return; + + ref.type = dma_debug_single; + ref.dev = dev; + ref.dev_addr = dma_handle; + ref.size = size; + ref.direction = direction; + ref.sg_call_ents = 0; + + check_sync(dev, &ref, false); +} +EXPORT_SYMBOL(debug_dma_sync_single_for_device); + +void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, size_t size, + int direction) +{ + struct dma_debug_entry ref; + + if (unlikely(dma_debug_disabled())) + return; + + ref.type = dma_debug_single; + ref.dev = dev; + ref.dev_addr = dma_handle; + ref.size = offset + size; + ref.direction = direction; + ref.sg_call_ents = 0; + + check_sync(dev, &ref, true); +} +EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); + +void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, int direction) +{ + struct dma_debug_entry ref; + + if (unlikely(dma_debug_disabled())) + return; + + ref.type = dma_debug_single; + ref.dev = dev; + ref.dev_addr = dma_handle; + ref.size = offset + size; + ref.direction = direction; + ref.sg_call_ents = 0; + + check_sync(dev, &ref, false); +} +EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); + +void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, int direction) +{ + struct scatterlist *s; + int mapped_ents = 0, i; + + if (unlikely(dma_debug_disabled())) + return; + + for_each_sg(sg, s, nelems, i) { + + struct dma_debug_entry ref = { + .type = dma_debug_sg, + .dev = dev, + .pfn = page_to_pfn(sg_page(s)), + .offset = s->offset, + .dev_addr = sg_dma_address(s), + .size = sg_dma_len(s), + .direction = direction, + .sg_call_ents = nelems, + }; + + if (!i) + mapped_ents = get_nr_mapped_entries(dev, &ref); + + if (i >= mapped_ents) + break; + + check_sync(dev, &ref, true); + } +} +EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); + +void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, int direction) +{ + struct scatterlist *s; + int mapped_ents = 0, i; + + if (unlikely(dma_debug_disabled())) + return; + + for_each_sg(sg, s, nelems, i) { + + struct dma_debug_entry ref = { + .type = dma_debug_sg, + .dev = dev, + .pfn = page_to_pfn(sg_page(s)), + .offset = s->offset, + .dev_addr = sg_dma_address(s), + .size = sg_dma_len(s), + .direction = direction, + .sg_call_ents = nelems, + }; + if (!i) + mapped_ents = get_nr_mapped_entries(dev, &ref); + + if (i >= mapped_ents) + break; + + check_sync(dev, &ref, false); + } +} +EXPORT_SYMBOL(debug_dma_sync_sg_for_device); + +static int __init dma_debug_driver_setup(char *str) +{ + int i; + + for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { + current_driver_name[i] = *str; + if (*str == 0) + break; + } + + if (current_driver_name[0]) + pr_info("DMA-API: enable driver filter for driver [%s]\n", + current_driver_name); + + + return 1; +} +__setup("dma_debug_driver=", dma_debug_driver_setup); diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c new file mode 100644 index 000000000000..8be8106270c2 --- /dev/null +++ b/kernel/dma/direct.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA operations that map physical memory directly without using an IOMMU or + * flushing caches. + */ +#include +#include +#include +#include +#include +#include +#include + +#define DIRECT_MAPPING_ERROR 0 + +/* + * Most architectures use ZONE_DMA for the first 16 Megabytes, but + * some use it for entirely different regions: + */ +#ifndef ARCH_ZONE_DMA_BITS +#define ARCH_ZONE_DMA_BITS 24 +#endif + +/* + * For AMD SEV all DMA must be to unencrypted addresses. + */ +static inline bool force_dma_unencrypted(void) +{ + return sev_active(); +} + +static bool +check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, + const char *caller) +{ + if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { + if (!dev->dma_mask) { + dev_err(dev, + "%s: call on device without dma_mask\n", + caller); + return false; + } + + if (*dev->dma_mask >= DMA_BIT_MASK(32)) { + dev_err(dev, + "%s: overflow %pad+%zu of device mask %llx\n", + caller, &dma_addr, size, *dev->dma_mask); + } + return false; + } + return true; +} + +static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) +{ + dma_addr_t addr = force_dma_unencrypted() ? + __phys_to_dma(dev, phys) : phys_to_dma(dev, phys); + return addr + size - 1 <= dev->coherent_dma_mask; +} + +void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) +{ + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + int page_order = get_order(size); + struct page *page = NULL; + void *ret; + + /* we always manually zero the memory once we are done: */ + gfp &= ~__GFP_ZERO; + + /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ + if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) + gfp |= GFP_DMA; + if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) + gfp |= GFP_DMA32; + +again: + /* CMA can be used only in the context which permits sleeping */ + if (gfpflags_allow_blocking(gfp)) { + page = dma_alloc_from_contiguous(dev, count, page_order, gfp); + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + dma_release_from_contiguous(dev, page, count); + page = NULL; + } + } + if (!page) + page = alloc_pages_node(dev_to_node(dev), gfp, page_order); + + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + __free_pages(page, page_order); + page = NULL; + + if (IS_ENABLED(CONFIG_ZONE_DMA32) && + dev->coherent_dma_mask < DMA_BIT_MASK(64) && + !(gfp & (GFP_DMA32 | GFP_DMA))) { + gfp |= GFP_DMA32; + goto again; + } + + if (IS_ENABLED(CONFIG_ZONE_DMA) && + dev->coherent_dma_mask < DMA_BIT_MASK(32) && + !(gfp & GFP_DMA)) { + gfp = (gfp & ~GFP_DMA32) | GFP_DMA; + goto again; + } + } + + if (!page) + return NULL; + ret = page_address(page); + if (force_dma_unencrypted()) { + set_memory_decrypted((unsigned long)ret, 1 << page_order); + *dma_handle = __phys_to_dma(dev, page_to_phys(page)); + } else { + *dma_handle = phys_to_dma(dev, page_to_phys(page)); + } + memset(ret, 0, size); + return ret; +} + +/* + * NOTE: this function must never look at the dma_addr argument, because we want + * to be able to use it as a helper for iommu implementations as well. + */ +void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs) +{ + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned int page_order = get_order(size); + + if (force_dma_unencrypted()) + set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); + if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) + free_pages((unsigned long)cpu_addr, page_order); +} + +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; + + if (!check_addr(dev, dma_addr, size, __func__)) + return DIRECT_MAPPING_ERROR; + return dma_addr; +} + +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + BUG_ON(!sg_page(sg)); + + sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); + if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) + return 0; + sg_dma_len(sg) = sg->length; + } + + return nents; +} + +int dma_direct_supported(struct device *dev, u64 mask) +{ +#ifdef CONFIG_ZONE_DMA + if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) + return 0; +#else + /* + * Because 32-bit DMA masks are so common we expect every architecture + * to be able to satisfy them - either by not supporting more physical + * memory, or by providing a ZONE_DMA32. If neither is the case, the + * architecture needs to use an IOMMU instead of the direct mapping. + */ + if (mask < DMA_BIT_MASK(32)) + return 0; +#endif + /* + * Various PCI/PCIe bridges have broken support for > 32bit DMA even + * if the device itself might support it. + */ + if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32)) + return 0; + return 1; +} + +int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr == DIRECT_MAPPING_ERROR; +} + +const struct dma_map_ops dma_direct_ops = { + .alloc = dma_direct_alloc, + .free = dma_direct_free, + .map_page = dma_direct_map_page, + .map_sg = dma_direct_map_sg, + .dma_supported = dma_direct_supported, + .mapping_error = dma_direct_mapping_error, +}; +EXPORT_SYMBOL(dma_direct_ops); diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c new file mode 100644 index 000000000000..d2a92ddaac4d --- /dev/null +++ b/kernel/dma/mapping.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch-independent dma-mapping routines + * + * Copyright (c) 2006 SUSE Linux Products GmbH + * Copyright (c) 2006 Tejun Heo + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Managed DMA API + */ +struct dma_devres { + size_t size; + void *vaddr; + dma_addr_t dma_handle; + unsigned long attrs; +}; + +static void dmam_release(struct device *dev, void *res) +{ + struct dma_devres *this = res; + + dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, + this->attrs); +} + +static int dmam_match(struct device *dev, void *res, void *match_data) +{ + struct dma_devres *this = res, *match = match_data; + + if (this->vaddr == match->vaddr) { + WARN_ON(this->size != match->size || + this->dma_handle != match->dma_handle); + return 1; + } + return 0; +} + +/** + * dmam_alloc_coherent - Managed dma_alloc_coherent() + * @dev: Device to allocate coherent memory for + * @size: Size of allocation + * @dma_handle: Out argument for allocated DMA handle + * @gfp: Allocation flags + * + * Managed dma_alloc_coherent(). Memory allocated using this function + * will be automatically released on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +void *dmam_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + struct dma_devres *dr; + void *vaddr; + + dr = devres_alloc(dmam_release, sizeof(*dr), gfp); + if (!dr) + return NULL; + + vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); + if (!vaddr) { + devres_free(dr); + return NULL; + } + + dr->vaddr = vaddr; + dr->dma_handle = *dma_handle; + dr->size = size; + + devres_add(dev, dr); + + return vaddr; +} +EXPORT_SYMBOL(dmam_alloc_coherent); + +/** + * dmam_free_coherent - Managed dma_free_coherent() + * @dev: Device to free coherent memory for + * @size: Size of allocation + * @vaddr: Virtual address of the memory to free + * @dma_handle: DMA handle of the memory to free + * + * Managed dma_free_coherent(). + */ +void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + struct dma_devres match_data = { size, vaddr, dma_handle }; + + dma_free_coherent(dev, size, vaddr, dma_handle); + WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); +} +EXPORT_SYMBOL(dmam_free_coherent); + +/** + * dmam_alloc_attrs - Managed dma_alloc_attrs() + * @dev: Device to allocate non_coherent memory for + * @size: Size of allocation + * @dma_handle: Out argument for allocated DMA handle + * @gfp: Allocation flags + * @attrs: Flags in the DMA_ATTR_* namespace. + * + * Managed dma_alloc_attrs(). Memory allocated using this function will be + * automatically released on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) +{ + struct dma_devres *dr; + void *vaddr; + + dr = devres_alloc(dmam_release, sizeof(*dr), gfp); + if (!dr) + return NULL; + + vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); + if (!vaddr) { + devres_free(dr); + return NULL; + } + + dr->vaddr = vaddr; + dr->dma_handle = *dma_handle; + dr->size = size; + dr->attrs = attrs; + + devres_add(dev, dr); + + return vaddr; +} +EXPORT_SYMBOL(dmam_alloc_attrs); + +#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT + +static void dmam_coherent_decl_release(struct device *dev, void *res) +{ + dma_release_declared_memory(dev); +} + +/** + * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() + * @dev: Device to declare coherent memory for + * @phys_addr: Physical address of coherent memory to be declared + * @device_addr: Device address of coherent memory to be declared + * @size: Size of coherent memory to be declared + * @flags: Flags + * + * Managed dma_declare_coherent_memory(). + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags) +{ + void *res; + int rc; + + res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); + if (!res) + return -ENOMEM; + + rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, + flags); + if (!rc) + devres_add(dev, res); + else + devres_free(res); + + return rc; +} +EXPORT_SYMBOL(dmam_declare_coherent_memory); + +/** + * dmam_release_declared_memory - Managed dma_release_declared_memory(). + * @dev: Device to release declared coherent memory for + * + * Managed dmam_release_declared_memory(). + */ +void dmam_release_declared_memory(struct device *dev) +{ + WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); +} +EXPORT_SYMBOL(dmam_release_declared_memory); + +#endif + +/* + * Create scatter-list for the already allocated DMA buffer. + */ +int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + struct page *page = virt_to_page(cpu_addr); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return 0; +} +EXPORT_SYMBOL(dma_common_get_sgtable); + +/* + * Create userspace mapping for the DMA-coherent memory. + */ +int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + int ret = -ENXIO; +#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP + unsigned long user_count = vma_pages(vma); + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long off = vma->vm_pgoff; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) + return ret; + + if (off < count && user_count <= (count - off)) + ret = remap_pfn_range(vma, vma->vm_start, + page_to_pfn(virt_to_page(cpu_addr)) + off, + user_count << PAGE_SHIFT, + vma->vm_page_prot); +#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ + + return ret; +} +EXPORT_SYMBOL(dma_common_mmap); + +#ifdef CONFIG_MMU +static struct vm_struct *__dma_common_pages_remap(struct page **pages, + size_t size, unsigned long vm_flags, pgprot_t prot, + const void *caller) +{ + struct vm_struct *area; + + area = get_vm_area_caller(size, vm_flags, caller); + if (!area) + return NULL; + + if (map_vm_area(area, prot, pages)) { + vunmap(area->addr); + return NULL; + } + + return area; +} + +/* + * remaps an array of PAGE_SIZE pages into another vm_area + * Cannot be used in non-sleeping contexts + */ +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller) +{ + struct vm_struct *area; + + area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); + if (!area) + return NULL; + + area->pages = pages; + + return area->addr; +} + +/* + * remaps an allocated contiguous region into another vm_area. + * Cannot be used in non-sleeping contexts + */ + +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller) +{ + int i; + struct page **pages; + struct vm_struct *area; + + pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); + if (!pages) + return NULL; + + for (i = 0; i < (size >> PAGE_SHIFT); i++) + pages[i] = nth_page(page, i); + + area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); + + kfree(pages); + + if (!area) + return NULL; + return area->addr; +} + +/* + * unmaps a range previously mapped by dma_common_*_remap + */ +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + + if (!area || (area->flags & vm_flags) != vm_flags) { + WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); + return; + } + + unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); + vunmap(cpu_addr); +} +#endif + +/* + * enables DMA API use for a device + */ +int dma_configure(struct device *dev) +{ + if (dev->bus->dma_configure) + return dev->bus->dma_configure(dev); + return 0; +} + +void dma_deconfigure(struct device *dev) +{ + of_dma_deconfigure(dev); + acpi_dma_deconfigure(dev); +} diff --git a/kernel/dma/noncoherent.c b/kernel/dma/noncoherent.c new file mode 100644 index 000000000000..79e9a757387f --- /dev/null +++ b/kernel/dma/noncoherent.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Christoph Hellwig. + * + * DMA operations that map physical memory directly without providing cache + * coherence. + */ +#include +#include +#include +#include +#include + +static void dma_noncoherent_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_noncoherent_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); +} + +static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + dma_addr_t addr; + + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); + if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(dev, page_to_phys(page) + offset, + size, dir); + return addr; +} + +static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); + if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); + return nents; +} + +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU +static void dma_noncoherent_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); +} + +static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); +} + +static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); +} +#endif + +const struct dma_map_ops dma_noncoherent_ops = { + .alloc = arch_dma_alloc, + .free = arch_dma_free, + .mmap = arch_dma_mmap, + .sync_single_for_device = dma_noncoherent_sync_single_for_device, + .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, + .map_page = dma_noncoherent_map_page, + .map_sg = dma_noncoherent_map_sg, +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU + .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, + .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, + .unmap_page = dma_noncoherent_unmap_page, + .unmap_sg = dma_noncoherent_unmap_sg, +#endif + .dma_supported = dma_direct_supported, + .mapping_error = dma_direct_mapping_error, + .cache_sync = arch_dma_cache_sync, +}; +EXPORT_SYMBOL(dma_noncoherent_ops); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c new file mode 100644 index 000000000000..04b68d9dffac --- /dev/null +++ b/kernel/dma/swiotlb.c @@ -0,0 +1,1087 @@ +/* + * Dynamic DMA mapping support. + * + * This implementation is a fallback for platforms that do not support + * I/O TLBs (aka DMA address translation hardware). + * Copyright (C) 2000 Asit Mallick + * Copyright (C) 2000 Goutham Rao + * Copyright (C) 2000, 2003 Hewlett-Packard Co + * David Mosberger-Tang + * + * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. + * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid + * unnecessary i-cache flushing. + * 04/07/.. ak Better overflow handling. Assorted fixes. + * 05/09/10 linville Add support for syncing ranges, support syncing for + * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. + * 08/12/11 beckyb Add highmem support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#define OFFSET(val,align) ((unsigned long) \ + ( (val) & ( (align) - 1))) + +#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) + +/* + * Minimum IO TLB size to bother booting with. Systems with mainly + * 64bit capable cards will only lightly use the swiotlb. If we can't + * allocate a contiguous 1MB, we're probably in trouble anyway. + */ +#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) + +enum swiotlb_force swiotlb_force; + +/* + * Used to do a quick range check in swiotlb_tbl_unmap_single and + * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this + * API. + */ +static phys_addr_t io_tlb_start, io_tlb_end; + +/* + * The number of IO TLB blocks (in groups of 64) between io_tlb_start and + * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. + */ +static unsigned long io_tlb_nslabs; + +/* + * When the IOMMU overflows we return a fallback buffer. This sets the size. + */ +static unsigned long io_tlb_overflow = 32*1024; + +static phys_addr_t io_tlb_overflow_buffer; + +/* + * This is a free list describing the number of free entries available from + * each index + */ +static unsigned int *io_tlb_list; +static unsigned int io_tlb_index; + +/* + * Max segment that we can provide which (if pages are contingous) will + * not be bounced (unless SWIOTLB_FORCE is set). + */ +unsigned int max_segment; + +/* + * We need to save away the original address corresponding to a mapped entry + * for the sync operations. + */ +#define INVALID_PHYS_ADDR (~(phys_addr_t)0) +static phys_addr_t *io_tlb_orig_addr; + +/* + * Protect the above data structures in the map and unmap calls + */ +static DEFINE_SPINLOCK(io_tlb_lock); + +static int late_alloc; + +static int __init +setup_io_tlb_npages(char *str) +{ + if (isdigit(*str)) { + io_tlb_nslabs = simple_strtoul(str, &str, 0); + /* avoid tail segment of size < IO_TLB_SEGSIZE */ + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + if (*str == ',') + ++str; + if (!strcmp(str, "force")) { + swiotlb_force = SWIOTLB_FORCE; + } else if (!strcmp(str, "noforce")) { + swiotlb_force = SWIOTLB_NO_FORCE; + io_tlb_nslabs = 1; + } + + return 0; +} +early_param("swiotlb", setup_io_tlb_npages); +/* make io_tlb_overflow tunable too? */ + +unsigned long swiotlb_nr_tbl(void) +{ + return io_tlb_nslabs; +} +EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); + +unsigned int swiotlb_max_segment(void) +{ + return max_segment; +} +EXPORT_SYMBOL_GPL(swiotlb_max_segment); + +void swiotlb_set_max_segment(unsigned int val) +{ + if (swiotlb_force == SWIOTLB_FORCE) + max_segment = 1; + else + max_segment = rounddown(val, PAGE_SIZE); +} + +/* default to 64MB */ +#define IO_TLB_DEFAULT_SIZE (64UL<<20) +unsigned long swiotlb_size_or_default(void) +{ + unsigned long size; + + size = io_tlb_nslabs << IO_TLB_SHIFT; + + return size ? size : (IO_TLB_DEFAULT_SIZE); +} + +static bool no_iotlb_memory; + +void swiotlb_print_info(void) +{ + unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; + unsigned char *vstart, *vend; + + if (no_iotlb_memory) { + pr_warn("software IO TLB: No low mem\n"); + return; + } + + vstart = phys_to_virt(io_tlb_start); + vend = phys_to_virt(io_tlb_end); + + printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n", + (unsigned long long)io_tlb_start, + (unsigned long long)io_tlb_end, + bytes >> 20, vstart, vend - 1); +} + +/* + * Early SWIOTLB allocation may be too early to allow an architecture to + * perform the desired operations. This function allows the architecture to + * call SWIOTLB when the operations are possible. It needs to be called + * before the SWIOTLB memory is used. + */ +void __init swiotlb_update_mem_attributes(void) +{ + void *vaddr; + unsigned long bytes; + + if (no_iotlb_memory || late_alloc) + return; + + vaddr = phys_to_virt(io_tlb_start); + bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); + memset(vaddr, 0, bytes); + + vaddr = phys_to_virt(io_tlb_overflow_buffer); + bytes = PAGE_ALIGN(io_tlb_overflow); + set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); + memset(vaddr, 0, bytes); +} + +int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) +{ + void *v_overflow_buffer; + unsigned long i, bytes; + + bytes = nslabs << IO_TLB_SHIFT; + + io_tlb_nslabs = nslabs; + io_tlb_start = __pa(tlb); + io_tlb_end = io_tlb_start + bytes; + + /* + * Get the overflow emergency buffer + */ + v_overflow_buffer = memblock_virt_alloc_low_nopanic( + PAGE_ALIGN(io_tlb_overflow), + PAGE_SIZE); + if (!v_overflow_buffer) + return -ENOMEM; + + io_tlb_overflow_buffer = __pa(v_overflow_buffer); + + /* + * Allocate and initialize the free list array. This array is used + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE + * between io_tlb_start and io_tlb_end. + */ + io_tlb_list = memblock_virt_alloc( + PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), + PAGE_SIZE); + io_tlb_orig_addr = memblock_virt_alloc( + PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), + PAGE_SIZE); + for (i = 0; i < io_tlb_nslabs; i++) { + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } + io_tlb_index = 0; + + if (verbose) + swiotlb_print_info(); + + swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); + return 0; +} + +/* + * Statically reserve bounce buffer space and initialize bounce buffer data + * structures for the software IO TLB used to implement the DMA API. + */ +void __init +swiotlb_init(int verbose) +{ + size_t default_size = IO_TLB_DEFAULT_SIZE; + unsigned char *vstart; + unsigned long bytes; + + if (!io_tlb_nslabs) { + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + + bytes = io_tlb_nslabs << IO_TLB_SHIFT; + + /* Get IO TLB memory from the low pages */ + vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); + if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) + return; + + if (io_tlb_start) + memblock_free_early(io_tlb_start, + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); + pr_warn("Cannot allocate SWIOTLB buffer"); + no_iotlb_memory = true; +} + +/* + * Systems with larger DMA zones (those that don't support ISA) can + * initialize the swiotlb later using the slab allocator if needed. + * This should be just like above, but with some error catching. + */ +int +swiotlb_late_init_with_default_size(size_t default_size) +{ + unsigned long bytes, req_nslabs = io_tlb_nslabs; + unsigned char *vstart = NULL; + unsigned int order; + int rc = 0; + + if (!io_tlb_nslabs) { + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + + /* + * Get IO TLB memory from the low pages + */ + order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); + io_tlb_nslabs = SLABS_PER_PAGE << order; + bytes = io_tlb_nslabs << IO_TLB_SHIFT; + + while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { + vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, + order); + if (vstart) + break; + order--; + } + + if (!vstart) { + io_tlb_nslabs = req_nslabs; + return -ENOMEM; + } + if (order != get_order(bytes)) { + printk(KERN_WARNING "Warning: only able to allocate %ld MB " + "for software IO TLB\n", (PAGE_SIZE << order) >> 20); + io_tlb_nslabs = SLABS_PER_PAGE << order; + } + rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); + if (rc) + free_pages((unsigned long)vstart, order); + + return rc; +} + +int +swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) +{ + unsigned long i, bytes; + unsigned char *v_overflow_buffer; + + bytes = nslabs << IO_TLB_SHIFT; + + io_tlb_nslabs = nslabs; + io_tlb_start = virt_to_phys(tlb); + io_tlb_end = io_tlb_start + bytes; + + set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); + memset(tlb, 0, bytes); + + /* + * Get the overflow emergency buffer + */ + v_overflow_buffer = (void *)__get_free_pages(GFP_DMA, + get_order(io_tlb_overflow)); + if (!v_overflow_buffer) + goto cleanup2; + + set_memory_decrypted((unsigned long)v_overflow_buffer, + io_tlb_overflow >> PAGE_SHIFT); + memset(v_overflow_buffer, 0, io_tlb_overflow); + io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); + + /* + * Allocate and initialize the free list array. This array is used + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE + * between io_tlb_start and io_tlb_end. + */ + io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * sizeof(int))); + if (!io_tlb_list) + goto cleanup3; + + io_tlb_orig_addr = (phys_addr_t *) + __get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * + sizeof(phys_addr_t))); + if (!io_tlb_orig_addr) + goto cleanup4; + + for (i = 0; i < io_tlb_nslabs; i++) { + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } + io_tlb_index = 0; + + swiotlb_print_info(); + + late_alloc = 1; + + swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); + + return 0; + +cleanup4: + free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * + sizeof(int))); + io_tlb_list = NULL; +cleanup3: + free_pages((unsigned long)v_overflow_buffer, + get_order(io_tlb_overflow)); + io_tlb_overflow_buffer = 0; +cleanup2: + io_tlb_end = 0; + io_tlb_start = 0; + io_tlb_nslabs = 0; + max_segment = 0; + return -ENOMEM; +} + +void __init swiotlb_exit(void) +{ + if (!io_tlb_orig_addr) + return; + + if (late_alloc) { + free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer), + get_order(io_tlb_overflow)); + free_pages((unsigned long)io_tlb_orig_addr, + get_order(io_tlb_nslabs * sizeof(phys_addr_t))); + free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * + sizeof(int))); + free_pages((unsigned long)phys_to_virt(io_tlb_start), + get_order(io_tlb_nslabs << IO_TLB_SHIFT)); + } else { + memblock_free_late(io_tlb_overflow_buffer, + PAGE_ALIGN(io_tlb_overflow)); + memblock_free_late(__pa(io_tlb_orig_addr), + PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); + memblock_free_late(__pa(io_tlb_list), + PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); + memblock_free_late(io_tlb_start, + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); + } + io_tlb_nslabs = 0; + max_segment = 0; +} + +int is_swiotlb_buffer(phys_addr_t paddr) +{ + return paddr >= io_tlb_start && paddr < io_tlb_end; +} + +/* + * Bounce: copy the swiotlb buffer back to the original dma location + */ +static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir) +{ + unsigned long pfn = PFN_DOWN(orig_addr); + unsigned char *vaddr = phys_to_virt(tlb_addr); + + if (PageHighMem(pfn_to_page(pfn))) { + /* The buffer does not have a mapping. Map it in and copy */ + unsigned int offset = orig_addr & ~PAGE_MASK; + char *buffer; + unsigned int sz = 0; + unsigned long flags; + + while (size) { + sz = min_t(size_t, PAGE_SIZE - offset, size); + + local_irq_save(flags); + buffer = kmap_atomic(pfn_to_page(pfn)); + if (dir == DMA_TO_DEVICE) + memcpy(vaddr, buffer + offset, sz); + else + memcpy(buffer + offset, vaddr, sz); + kunmap_atomic(buffer); + local_irq_restore(flags); + + size -= sz; + pfn++; + vaddr += sz; + offset = 0; + } + } else if (dir == DMA_TO_DEVICE) { + memcpy(vaddr, phys_to_virt(orig_addr), size); + } else { + memcpy(phys_to_virt(orig_addr), vaddr, size); + } +} + +phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, + dma_addr_t tbl_dma_addr, + phys_addr_t orig_addr, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + unsigned long flags; + phys_addr_t tlb_addr; + unsigned int nslots, stride, index, wrap; + int i; + unsigned long mask; + unsigned long offset_slots; + unsigned long max_slots; + + if (no_iotlb_memory) + panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); + + if (mem_encrypt_active()) + pr_warn_once("%s is active and system is using DMA bounce buffers\n", + sme_active() ? "SME" : "SEV"); + + mask = dma_get_seg_boundary(hwdev); + + tbl_dma_addr &= mask; + + offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + + /* + * Carefully handle integer overflow which can occur when mask == ~0UL. + */ + max_slots = mask + 1 + ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT + : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); + + /* + * For mappings greater than or equal to a page, we limit the stride + * (and hence alignment) to a page size. + */ + nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + if (size >= PAGE_SIZE) + stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); + else + stride = 1; + + BUG_ON(!nslots); + + /* + * Find suitable number of IO TLB entries size that will fit this + * request and allocate a buffer from that IO TLB pool. + */ + spin_lock_irqsave(&io_tlb_lock, flags); + index = ALIGN(io_tlb_index, stride); + if (index >= io_tlb_nslabs) + index = 0; + wrap = index; + + do { + while (iommu_is_span_boundary(index, nslots, offset_slots, + max_slots)) { + index += stride; + if (index >= io_tlb_nslabs) + index = 0; + if (index == wrap) + goto not_found; + } + + /* + * If we find a slot that indicates we have 'nslots' number of + * contiguous buffers, we allocate the buffers from that slot + * and mark the entries as '0' indicating unavailable. + */ + if (io_tlb_list[index] >= nslots) { + int count = 0; + + for (i = index; i < (int) (index + nslots); i++) + io_tlb_list[i] = 0; + for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT); + + /* + * Update the indices to avoid searching in the next + * round. + */ + io_tlb_index = ((index + nslots) < io_tlb_nslabs + ? (index + nslots) : 0); + + goto found; + } + index += stride; + if (index >= io_tlb_nslabs) + index = 0; + } while (index != wrap); + +not_found: + spin_unlock_irqrestore(&io_tlb_lock, flags); + if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) + dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); + return SWIOTLB_MAP_ERROR; +found: + spin_unlock_irqrestore(&io_tlb_lock, flags); + + /* + * Save away the mapping from the original address to the DMA address. + * This is needed when we sync the memory. Then we sync the buffer if + * needed. + */ + for (i = 0; i < nslots; i++) + io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) + swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); + + return tlb_addr; +} + +/* + * Allocates bounce buffer and returns its physical address. + */ +static phys_addr_t +map_single(struct device *hwdev, phys_addr_t phys, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + dma_addr_t start_dma_addr; + + if (swiotlb_force == SWIOTLB_NO_FORCE) { + dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n", + &phys); + return SWIOTLB_MAP_ERROR; + } + + start_dma_addr = __phys_to_dma(hwdev, io_tlb_start); + return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, + dir, attrs); +} + +/* + * tlb_addr is the physical address of the bounce buffer to unmap. + */ +void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + unsigned long flags; + int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; + phys_addr_t orig_addr = io_tlb_orig_addr[index]; + + /* + * First, sync the memory before unmapping the entry + */ + if (orig_addr != INVALID_PHYS_ADDR && + !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) + swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); + + /* + * Return the buffer to the free list by setting the corresponding + * entries to indicate the number of contiguous entries available. + * While returning the entries to the free list, we merge the entries + * with slots below and above the pool being returned. + */ + spin_lock_irqsave(&io_tlb_lock, flags); + { + count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? + io_tlb_list[index + nslots] : 0); + /* + * Step 1: return the slots to the free list, merging the + * slots with superceeding slots + */ + for (i = index + nslots - 1; i >= index; i--) { + io_tlb_list[i] = ++count; + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } + /* + * Step 2: merge the returned slots with the preceding slots, + * if available (non zero) + */ + for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + } + spin_unlock_irqrestore(&io_tlb_lock, flags); +} + +void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, + size_t size, enum dma_data_direction dir, + enum dma_sync_target target) +{ + int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; + phys_addr_t orig_addr = io_tlb_orig_addr[index]; + + if (orig_addr == INVALID_PHYS_ADDR) + return; + orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); + + switch (target) { + case SYNC_FOR_CPU: + if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) + swiotlb_bounce(orig_addr, tlb_addr, + size, DMA_FROM_DEVICE); + else + BUG_ON(dir != DMA_TO_DEVICE); + break; + case SYNC_FOR_DEVICE: + if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) + swiotlb_bounce(orig_addr, tlb_addr, + size, DMA_TO_DEVICE); + else + BUG_ON(dir != DMA_FROM_DEVICE); + break; + default: + BUG(); + } +} + +static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, + size_t size) +{ + u64 mask = DMA_BIT_MASK(32); + + if (dev && dev->coherent_dma_mask) + mask = dev->coherent_dma_mask; + return addr + size - 1 <= mask; +} + +static void * +swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, + unsigned long attrs) +{ + phys_addr_t phys_addr; + + if (swiotlb_force == SWIOTLB_NO_FORCE) + goto out_warn; + + phys_addr = swiotlb_tbl_map_single(dev, + __phys_to_dma(dev, io_tlb_start), + 0, size, DMA_FROM_DEVICE, attrs); + if (phys_addr == SWIOTLB_MAP_ERROR) + goto out_warn; + + *dma_handle = __phys_to_dma(dev, phys_addr); + if (!dma_coherent_ok(dev, *dma_handle, size)) + goto out_unmap; + + memset(phys_to_virt(phys_addr), 0, size); + return phys_to_virt(phys_addr); + +out_unmap: + dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", + (unsigned long long)dev->coherent_dma_mask, + (unsigned long long)*dma_handle); + + /* + * DMA_TO_DEVICE to avoid memcpy in unmap_single. + * DMA_ATTR_SKIP_CPU_SYNC is optional. + */ + swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); +out_warn: + if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { + dev_warn(dev, + "swiotlb: coherent allocation failed, size=%zu\n", + size); + dump_stack(); + } + return NULL; +} + +static bool swiotlb_free_buffer(struct device *dev, size_t size, + dma_addr_t dma_addr) +{ + phys_addr_t phys_addr = dma_to_phys(dev, dma_addr); + + WARN_ON_ONCE(irqs_disabled()); + + if (!is_swiotlb_buffer(phys_addr)) + return false; + + /* + * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. + * DMA_ATTR_SKIP_CPU_SYNC is optional. + */ + swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + return true; +} + +static void +swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, + int do_panic) +{ + if (swiotlb_force == SWIOTLB_NO_FORCE) + return; + + /* + * Ran out of IOMMU space for this operation. This is very bad. + * Unfortunately the drivers cannot handle this operation properly. + * unless they check for dma_mapping_error (most don't) + * When the mapping is small enough return a static buffer to limit + * the damage, or panic when the transfer is too big. + */ + dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n", + size); + + if (size <= io_tlb_overflow || !do_panic) + return; + + if (dir == DMA_BIDIRECTIONAL) + panic("DMA: Random memory could be DMA accessed\n"); + if (dir == DMA_FROM_DEVICE) + panic("DMA: Random memory could be DMA written\n"); + if (dir == DMA_TO_DEVICE) + panic("DMA: Random memory could be DMA read\n"); +} + +/* + * Map a single buffer of the indicated size for DMA in streaming mode. The + * physical address to use is returned. + * + * Once the device is given the dma address, the device owns this memory until + * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. + */ +dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + phys_addr_t map, phys = page_to_phys(page) + offset; + dma_addr_t dev_addr = phys_to_dma(dev, phys); + + BUG_ON(dir == DMA_NONE); + /* + * If the address happens to be in the device's DMA window, + * we can safely return the device addr and not worry about bounce + * buffering it. + */ + if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE) + return dev_addr; + + trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); + + /* Oh well, have to allocate and map a bounce buffer. */ + map = map_single(dev, phys, size, dir, attrs); + if (map == SWIOTLB_MAP_ERROR) { + swiotlb_full(dev, size, dir, 1); + return __phys_to_dma(dev, io_tlb_overflow_buffer); + } + + dev_addr = __phys_to_dma(dev, map); + + /* Ensure that the address returned is DMA'ble */ + if (dma_capable(dev, dev_addr, size)) + return dev_addr; + + attrs |= DMA_ATTR_SKIP_CPU_SYNC; + swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); + + return __phys_to_dma(dev, io_tlb_overflow_buffer); +} + +/* + * Unmap a single streaming mode DMA translation. The dma_addr and size must + * match what was provided for in a previous swiotlb_map_page call. All + * other usages are undefined. + * + * After this call, reads by the cpu to the buffer are guaranteed to see + * whatever the device wrote there. + */ +static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); + + BUG_ON(dir == DMA_NONE); + + if (is_swiotlb_buffer(paddr)) { + swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); + return; + } + + if (dir != DMA_FROM_DEVICE) + return; + + /* + * phys_to_virt doesn't work with hihgmem page but we could + * call dma_mark_clean() with hihgmem page here. However, we + * are fine since dma_mark_clean() is null on POWERPC. We can + * make dma_mark_clean() take a physical address if necessary. + */ + dma_mark_clean(phys_to_virt(paddr), size); +} + +void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + unmap_single(hwdev, dev_addr, size, dir, attrs); +} + +/* + * Make physical memory consistent for a single streaming mode DMA translation + * after a transfer. + * + * If you perform a swiotlb_map_page() but wish to interrogate the buffer + * using the cpu, yet do not wish to teardown the dma mapping, you must + * call this function before doing so. At the next point you give the dma + * address back to the card, you must first perform a + * swiotlb_dma_sync_for_device, and then the device again owns the buffer + */ +static void +swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + enum dma_sync_target target) +{ + phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); + + BUG_ON(dir == DMA_NONE); + + if (is_swiotlb_buffer(paddr)) { + swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); + return; + } + + if (dir != DMA_FROM_DEVICE) + return; + + dma_mark_clean(phys_to_virt(paddr), size); +} + +void +swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir) +{ + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); +} + +void +swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir) +{ + swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); +} + +/* + * Map a set of buffers described by scatterlist in streaming mode for DMA. + * This is the scatter-gather version of the above swiotlb_map_page + * interface. Here the scatter gather list elements are each tagged with the + * appropriate dma address and length. They are obtained via + * sg_dma_{address,length}(SG). + * + * NOTE: An implementation may be able to use a smaller number of + * DMA address/length pairs than there are SG table elements. + * (for example via virtual mapping capabilities) + * The routine returns the number of addr/length pairs actually + * used, at most nents. + * + * Device ownership issues as mentioned above for swiotlb_map_page are the + * same here. + */ +int +swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, + enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + BUG_ON(dir == DMA_NONE); + + for_each_sg(sgl, sg, nelems, i) { + phys_addr_t paddr = sg_phys(sg); + dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); + + if (swiotlb_force == SWIOTLB_FORCE || + !dma_capable(hwdev, dev_addr, sg->length)) { + phys_addr_t map = map_single(hwdev, sg_phys(sg), + sg->length, dir, attrs); + if (map == SWIOTLB_MAP_ERROR) { + /* Don't panic here, we expect map_sg users + to do proper error handling. */ + swiotlb_full(hwdev, sg->length, dir, 0); + attrs |= DMA_ATTR_SKIP_CPU_SYNC; + swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, + attrs); + sg_dma_len(sgl) = 0; + return 0; + } + sg->dma_address = __phys_to_dma(hwdev, map); + } else + sg->dma_address = dev_addr; + sg_dma_len(sg) = sg->length; + } + return nelems; +} + +/* + * Unmap a set of streaming mode DMA translations. Again, cpu read rules + * concerning calls here are the same as for swiotlb_unmap_page() above. + */ +void +swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + BUG_ON(dir == DMA_NONE); + + for_each_sg(sgl, sg, nelems, i) + unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, + attrs); +} + +/* + * Make physical memory consistent for a set of streaming mode DMA translations + * after a transfer. + * + * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules + * and usage. + */ +static void +swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, + enum dma_sync_target target) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) + swiotlb_sync_single(hwdev, sg->dma_address, + sg_dma_len(sg), dir, target); +} + +void +swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); +} + +void +swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) +{ + swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); +} + +int +swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) +{ + return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer)); +} + +/* + * Return whether the given device DMA address mask can be supported + * properly. For example, if your device can only drive the low 24-bits + * during bus mastering, then you would pass 0x00ffffff as the mask to + * this function. + */ +int +swiotlb_dma_supported(struct device *hwdev, u64 mask) +{ + return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; +} + +void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) +{ + void *vaddr; + + /* temporary workaround: */ + if (gfp & __GFP_NOWARN) + attrs |= DMA_ATTR_NO_WARN; + + /* + * Don't print a warning when the first allocation attempt fails. + * swiotlb_alloc_coherent() will print a warning when the DMA memory + * allocation ultimately failed. + */ + gfp |= __GFP_NOWARN; + + vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); + if (!vaddr) + vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs); + return vaddr; +} + +void swiotlb_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_addr, unsigned long attrs) +{ + if (!swiotlb_free_buffer(dev, size, dma_addr)) + dma_direct_free(dev, size, vaddr, dma_addr, attrs); +} + +const struct dma_map_ops swiotlb_dma_ops = { + .mapping_error = swiotlb_dma_mapping_error, + .alloc = swiotlb_alloc, + .free = swiotlb_free, + .sync_single_for_cpu = swiotlb_sync_single_for_cpu, + .sync_single_for_device = swiotlb_sync_single_for_device, + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = swiotlb_sync_sg_for_device, + .map_sg = swiotlb_map_sg_attrs, + .unmap_sg = swiotlb_unmap_sg_attrs, + .map_page = swiotlb_map_page, + .unmap_page = swiotlb_unmap_page, + .dma_supported = dma_direct_supported, +}; diff --git a/kernel/dma/virt.c b/kernel/dma/virt.c new file mode 100644 index 000000000000..631ddec4b60a --- /dev/null +++ b/kernel/dma/virt.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA operations that map to virtual addresses without flushing memory. + */ +#include +#include +#include +#include + +static void *dma_virt_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *ret; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + if (ret) + *dma_handle = (uintptr_t)ret; + return ret; +} + +static void dma_virt_free(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + unsigned long attrs) +{ + free_pages((unsigned long)cpu_addr, get_order(size)); +} + +static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return (uintptr_t)(page_address(page) + offset); +} + +static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + BUG_ON(!sg_page(sg)); + sg_dma_address(sg) = (uintptr_t)sg_virt(sg); + sg_dma_len(sg) = sg->length; + } + + return nents; +} + +const struct dma_map_ops dma_virt_ops = { + .alloc = dma_virt_alloc, + .free = dma_virt_free, + .map_page = dma_virt_map_page, + .map_sg = dma_virt_map_sg, +}; +EXPORT_SYMBOL(dma_virt_ops); diff --git a/lib/Kconfig b/lib/Kconfig index 809fdd155739..803fcbced729 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -420,60 +420,15 @@ config HAS_IOPORT_MAP depends on HAS_IOMEM && !NO_IOPORT_MAP default y -config HAS_DMA - bool - depends on !NO_DMA - default y +source "kernel/dma/Kconfig" config SGL_ALLOC bool default n -config NEED_SG_DMA_LENGTH - bool - -config NEED_DMA_MAP_STATE - bool - -config ARCH_DMA_ADDR_T_64BIT - def_bool 64BIT || PHYS_ADDR_T_64BIT - config IOMMU_HELPER bool -config ARCH_HAS_SYNC_DMA_FOR_DEVICE - bool - -config ARCH_HAS_SYNC_DMA_FOR_CPU - bool - select NEED_DMA_MAP_STATE - -config DMA_DIRECT_OPS - bool - depends on HAS_DMA - -config DMA_NONCOHERENT_OPS - bool - depends on HAS_DMA - select DMA_DIRECT_OPS - -config DMA_NONCOHERENT_MMAP - bool - depends on DMA_NONCOHERENT_OPS - -config DMA_NONCOHERENT_CACHE_SYNC - bool - depends on DMA_NONCOHERENT_OPS - -config DMA_VIRT_OPS - bool - depends on HAS_DMA - -config SWIOTLB - bool - select DMA_DIRECT_OPS - select NEED_DMA_MAP_STATE - config CHECK_SIGNATURE bool diff --git a/lib/Makefile b/lib/Makefile index 5e0e160c9242..8153fdab287f 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -29,9 +29,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ lib-$(CONFIG_PRINTK) += dump_stack.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o -obj-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o -obj-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o -obj-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o lib-y += kobject.o klist.o obj-y += lockref.o @@ -148,7 +145,6 @@ obj-$(CONFIG_SMP) += percpu_counter.o obj-$(CONFIG_AUDIT_GENERIC) += audit.o obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o -obj-$(CONFIG_SWIOTLB) += swiotlb.o obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o @@ -169,8 +165,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o obj-$(CONFIG_LRU_CACHE) += lru_cache.o -obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o - obj-$(CONFIG_GENERIC_CSUM) += checksum.o obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o diff --git a/lib/dma-debug.c b/lib/dma-debug.c deleted file mode 100644 index c007d25bee09..000000000000 --- a/lib/dma-debug.c +++ /dev/null @@ -1,1773 +0,0 @@ -/* - * Copyright (C) 2008 Advanced Micro Devices, Inc. - * - * Author: Joerg Roedel - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define HASH_SIZE 1024ULL -#define HASH_FN_SHIFT 13 -#define HASH_FN_MASK (HASH_SIZE - 1) - -/* allow architectures to override this if absolutely required */ -#ifndef PREALLOC_DMA_DEBUG_ENTRIES -#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) -#endif - -enum { - dma_debug_single, - dma_debug_page, - dma_debug_sg, - dma_debug_coherent, - dma_debug_resource, -}; - -enum map_err_types { - MAP_ERR_CHECK_NOT_APPLICABLE, - MAP_ERR_NOT_CHECKED, - MAP_ERR_CHECKED, -}; - -#define DMA_DEBUG_STACKTRACE_ENTRIES 5 - -/** - * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping - * @list: node on pre-allocated free_entries list - * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent - * @type: single, page, sg, coherent - * @pfn: page frame of the start address - * @offset: offset of mapping relative to pfn - * @size: length of the mapping - * @direction: enum dma_data_direction - * @sg_call_ents: 'nents' from dma_map_sg - * @sg_mapped_ents: 'mapped_ents' from dma_map_sg - * @map_err_type: track whether dma_mapping_error() was checked - * @stacktrace: support backtraces when a violation is detected - */ -struct dma_debug_entry { - struct list_head list; - struct device *dev; - int type; - unsigned long pfn; - size_t offset; - u64 dev_addr; - u64 size; - int direction; - int sg_call_ents; - int sg_mapped_ents; - enum map_err_types map_err_type; -#ifdef CONFIG_STACKTRACE - struct stack_trace stacktrace; - unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; -#endif -}; - -typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); - -struct hash_bucket { - struct list_head list; - spinlock_t lock; -} ____cacheline_aligned_in_smp; - -/* Hash list to save the allocated dma addresses */ -static struct hash_bucket dma_entry_hash[HASH_SIZE]; -/* List of pre-allocated dma_debug_entry's */ -static LIST_HEAD(free_entries); -/* Lock for the list above */ -static DEFINE_SPINLOCK(free_entries_lock); - -/* Global disable flag - will be set in case of an error */ -static bool global_disable __read_mostly; - -/* Early initialization disable flag, set at the end of dma_debug_init */ -static bool dma_debug_initialized __read_mostly; - -static inline bool dma_debug_disabled(void) -{ - return global_disable || !dma_debug_initialized; -} - -/* Global error count */ -static u32 error_count; - -/* Global error show enable*/ -static u32 show_all_errors __read_mostly; -/* Number of errors to show */ -static u32 show_num_errors = 1; - -static u32 num_free_entries; -static u32 min_free_entries; -static u32 nr_total_entries; - -/* number of preallocated entries requested by kernel cmdline */ -static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; - -/* debugfs dentry's for the stuff above */ -static struct dentry *dma_debug_dent __read_mostly; -static struct dentry *global_disable_dent __read_mostly; -static struct dentry *error_count_dent __read_mostly; -static struct dentry *show_all_errors_dent __read_mostly; -static struct dentry *show_num_errors_dent __read_mostly; -static struct dentry *num_free_entries_dent __read_mostly; -static struct dentry *min_free_entries_dent __read_mostly; -static struct dentry *filter_dent __read_mostly; - -/* per-driver filter related state */ - -#define NAME_MAX_LEN 64 - -static char current_driver_name[NAME_MAX_LEN] __read_mostly; -static struct device_driver *current_driver __read_mostly; - -static DEFINE_RWLOCK(driver_name_lock); - -static const char *const maperr2str[] = { - [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", - [MAP_ERR_NOT_CHECKED] = "dma map error not checked", - [MAP_ERR_CHECKED] = "dma map error checked", -}; - -static const char *type2name[5] = { "single", "page", - "scather-gather", "coherent", - "resource" }; - -static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", - "DMA_FROM_DEVICE", "DMA_NONE" }; - -/* - * The access to some variables in this macro is racy. We can't use atomic_t - * here because all these variables are exported to debugfs. Some of them even - * writeable. This is also the reason why a lock won't help much. But anyway, - * the races are no big deal. Here is why: - * - * error_count: the addition is racy, but the worst thing that can happen is - * that we don't count some errors - * show_num_errors: the subtraction is racy. Also no big deal because in - * worst case this will result in one warning more in the - * system log than the user configured. This variable is - * writeable via debugfs. - */ -static inline void dump_entry_trace(struct dma_debug_entry *entry) -{ -#ifdef CONFIG_STACKTRACE - if (entry) { - pr_warning("Mapped at:\n"); - print_stack_trace(&entry->stacktrace, 0); - } -#endif -} - -static bool driver_filter(struct device *dev) -{ - struct device_driver *drv; - unsigned long flags; - bool ret; - - /* driver filter off */ - if (likely(!current_driver_name[0])) - return true; - - /* driver filter on and initialized */ - if (current_driver && dev && dev->driver == current_driver) - return true; - - /* driver filter on, but we can't filter on a NULL device... */ - if (!dev) - return false; - - if (current_driver || !current_driver_name[0]) - return false; - - /* driver filter on but not yet initialized */ - drv = dev->driver; - if (!drv) - return false; - - /* lock to protect against change of current_driver_name */ - read_lock_irqsave(&driver_name_lock, flags); - - ret = false; - if (drv->name && - strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { - current_driver = drv; - ret = true; - } - - read_unlock_irqrestore(&driver_name_lock, flags); - - return ret; -} - -#define err_printk(dev, entry, format, arg...) do { \ - error_count += 1; \ - if (driver_filter(dev) && \ - (show_all_errors || show_num_errors > 0)) { \ - WARN(1, "%s %s: " format, \ - dev ? dev_driver_string(dev) : "NULL", \ - dev ? dev_name(dev) : "NULL", ## arg); \ - dump_entry_trace(entry); \ - } \ - if (!show_all_errors && show_num_errors > 0) \ - show_num_errors -= 1; \ - } while (0); - -/* - * Hash related functions - * - * Every DMA-API request is saved into a struct dma_debug_entry. To - * have quick access to these structs they are stored into a hash. - */ -static int hash_fn(struct dma_debug_entry *entry) -{ - /* - * Hash function is based on the dma address. - * We use bits 20-27 here as the index into the hash - */ - return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; -} - -/* - * Request exclusive access to a hash bucket for a given dma_debug_entry. - */ -static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, - unsigned long *flags) - __acquires(&dma_entry_hash[idx].lock) -{ - int idx = hash_fn(entry); - unsigned long __flags; - - spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); - *flags = __flags; - return &dma_entry_hash[idx]; -} - -/* - * Give up exclusive access to the hash bucket - */ -static void put_hash_bucket(struct hash_bucket *bucket, - unsigned long *flags) - __releases(&bucket->lock) -{ - unsigned long __flags = *flags; - - spin_unlock_irqrestore(&bucket->lock, __flags); -} - -static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) -{ - return ((a->dev_addr == b->dev_addr) && - (a->dev == b->dev)) ? true : false; -} - -static bool containing_match(struct dma_debug_entry *a, - struct dma_debug_entry *b) -{ - if (a->dev != b->dev) - return false; - - if ((b->dev_addr <= a->dev_addr) && - ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) - return true; - - return false; -} - -/* - * Search a given entry in the hash bucket list - */ -static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, - struct dma_debug_entry *ref, - match_fn match) -{ - struct dma_debug_entry *entry, *ret = NULL; - int matches = 0, match_lvl, last_lvl = -1; - - list_for_each_entry(entry, &bucket->list, list) { - if (!match(ref, entry)) - continue; - - /* - * Some drivers map the same physical address multiple - * times. Without a hardware IOMMU this results in the - * same device addresses being put into the dma-debug - * hash multiple times too. This can result in false - * positives being reported. Therefore we implement a - * best-fit algorithm here which returns the entry from - * the hash which fits best to the reference value - * instead of the first-fit. - */ - matches += 1; - match_lvl = 0; - entry->size == ref->size ? ++match_lvl : 0; - entry->type == ref->type ? ++match_lvl : 0; - entry->direction == ref->direction ? ++match_lvl : 0; - entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; - - if (match_lvl == 4) { - /* perfect-fit - return the result */ - return entry; - } else if (match_lvl > last_lvl) { - /* - * We found an entry that fits better then the - * previous one or it is the 1st match. - */ - last_lvl = match_lvl; - ret = entry; - } - } - - /* - * If we have multiple matches but no perfect-fit, just return - * NULL. - */ - ret = (matches == 1) ? ret : NULL; - - return ret; -} - -static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, - struct dma_debug_entry *ref) -{ - return __hash_bucket_find(bucket, ref, exact_match); -} - -static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, - struct dma_debug_entry *ref, - unsigned long *flags) -{ - - unsigned int max_range = dma_get_max_seg_size(ref->dev); - struct dma_debug_entry *entry, index = *ref; - unsigned int range = 0; - - while (range <= max_range) { - entry = __hash_bucket_find(*bucket, ref, containing_match); - - if (entry) - return entry; - - /* - * Nothing found, go back a hash bucket - */ - put_hash_bucket(*bucket, flags); - range += (1 << HASH_FN_SHIFT); - index.dev_addr -= (1 << HASH_FN_SHIFT); - *bucket = get_hash_bucket(&index, flags); - } - - return NULL; -} - -/* - * Add an entry to a hash bucket - */ -static void hash_bucket_add(struct hash_bucket *bucket, - struct dma_debug_entry *entry) -{ - list_add_tail(&entry->list, &bucket->list); -} - -/* - * Remove entry from a hash bucket list - */ -static void hash_bucket_del(struct dma_debug_entry *entry) -{ - list_del(&entry->list); -} - -static unsigned long long phys_addr(struct dma_debug_entry *entry) -{ - if (entry->type == dma_debug_resource) - return __pfn_to_phys(entry->pfn) + entry->offset; - - return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; -} - -/* - * Dump mapping entries for debugging purposes - */ -void debug_dma_dump_mappings(struct device *dev) -{ - int idx; - - for (idx = 0; idx < HASH_SIZE; idx++) { - struct hash_bucket *bucket = &dma_entry_hash[idx]; - struct dma_debug_entry *entry; - unsigned long flags; - - spin_lock_irqsave(&bucket->lock, flags); - - list_for_each_entry(entry, &bucket->list, list) { - if (!dev || dev == entry->dev) { - dev_info(entry->dev, - "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n", - type2name[entry->type], idx, - phys_addr(entry), entry->pfn, - entry->dev_addr, entry->size, - dir2name[entry->direction], - maperr2str[entry->map_err_type]); - } - } - - spin_unlock_irqrestore(&bucket->lock, flags); - } -} - -/* - * For each mapping (initial cacheline in the case of - * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a - * scatterlist, or the cacheline specified in dma_map_single) insert - * into this tree using the cacheline as the key. At - * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If - * the entry already exists at insertion time add a tag as a reference - * count for the overlapping mappings. For now, the overlap tracking - * just ensures that 'unmaps' balance 'maps' before marking the - * cacheline idle, but we should also be flagging overlaps as an API - * violation. - * - * Memory usage is mostly constrained by the maximum number of available - * dma-debug entries in that we need a free dma_debug_entry before - * inserting into the tree. In the case of dma_map_page and - * dma_alloc_coherent there is only one dma_debug_entry and one - * dma_active_cacheline entry to track per event. dma_map_sg(), on the - * other hand, consumes a single dma_debug_entry, but inserts 'nents' - * entries into the tree. - * - * At any time debug_dma_assert_idle() can be called to trigger a - * warning if any cachelines in the given page are in the active set. - */ -static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); -static DEFINE_SPINLOCK(radix_lock); -#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) -#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) -#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) - -static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) -{ - return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + - (entry->offset >> L1_CACHE_SHIFT); -} - -static int active_cacheline_read_overlap(phys_addr_t cln) -{ - int overlap = 0, i; - - for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) - if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) - overlap |= 1 << i; - return overlap; -} - -static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) -{ - int i; - - if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) - return overlap; - - for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) - if (overlap & 1 << i) - radix_tree_tag_set(&dma_active_cacheline, cln, i); - else - radix_tree_tag_clear(&dma_active_cacheline, cln, i); - - return overlap; -} - -static void active_cacheline_inc_overlap(phys_addr_t cln) -{ - int overlap = active_cacheline_read_overlap(cln); - - overlap = active_cacheline_set_overlap(cln, ++overlap); - - /* If we overflowed the overlap counter then we're potentially - * leaking dma-mappings. Otherwise, if maps and unmaps are - * balanced then this overflow may cause false negatives in - * debug_dma_assert_idle() as the cacheline may be marked idle - * prematurely. - */ - WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, - "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", - ACTIVE_CACHELINE_MAX_OVERLAP, &cln); -} - -static int active_cacheline_dec_overlap(phys_addr_t cln) -{ - int overlap = active_cacheline_read_overlap(cln); - - return active_cacheline_set_overlap(cln, --overlap); -} - -static int active_cacheline_insert(struct dma_debug_entry *entry) -{ - phys_addr_t cln = to_cacheline_number(entry); - unsigned long flags; - int rc; - - /* If the device is not writing memory then we don't have any - * concerns about the cpu consuming stale data. This mitigates - * legitimate usages of overlapping mappings. - */ - if (entry->direction == DMA_TO_DEVICE) - return 0; - - spin_lock_irqsave(&radix_lock, flags); - rc = radix_tree_insert(&dma_active_cacheline, cln, entry); - if (rc == -EEXIST) - active_cacheline_inc_overlap(cln); - spin_unlock_irqrestore(&radix_lock, flags); - - return rc; -} - -static void active_cacheline_remove(struct dma_debug_entry *entry) -{ - phys_addr_t cln = to_cacheline_number(entry); - unsigned long flags; - - /* ...mirror the insert case */ - if (entry->direction == DMA_TO_DEVICE) - return; - - spin_lock_irqsave(&radix_lock, flags); - /* since we are counting overlaps the final put of the - * cacheline will occur when the overlap count is 0. - * active_cacheline_dec_overlap() returns -1 in that case - */ - if (active_cacheline_dec_overlap(cln) < 0) - radix_tree_delete(&dma_active_cacheline, cln); - spin_unlock_irqrestore(&radix_lock, flags); -} - -/** - * debug_dma_assert_idle() - assert that a page is not undergoing dma - * @page: page to lookup in the dma_active_cacheline tree - * - * Place a call to this routine in cases where the cpu touching the page - * before the dma completes (page is dma_unmapped) will lead to data - * corruption. - */ -void debug_dma_assert_idle(struct page *page) -{ - static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; - struct dma_debug_entry *entry = NULL; - void **results = (void **) &ents; - unsigned int nents, i; - unsigned long flags; - phys_addr_t cln; - - if (dma_debug_disabled()) - return; - - if (!page) - return; - - cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; - spin_lock_irqsave(&radix_lock, flags); - nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, - CACHELINES_PER_PAGE); - for (i = 0; i < nents; i++) { - phys_addr_t ent_cln = to_cacheline_number(ents[i]); - - if (ent_cln == cln) { - entry = ents[i]; - break; - } else if (ent_cln >= cln + CACHELINES_PER_PAGE) - break; - } - spin_unlock_irqrestore(&radix_lock, flags); - - if (!entry) - return; - - cln = to_cacheline_number(entry); - err_printk(entry->dev, entry, - "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", - &cln); -} - -/* - * Wrapper function for adding an entry to the hash. - * This function takes care of locking itself. - */ -static void add_dma_entry(struct dma_debug_entry *entry) -{ - struct hash_bucket *bucket; - unsigned long flags; - int rc; - - bucket = get_hash_bucket(entry, &flags); - hash_bucket_add(bucket, entry); - put_hash_bucket(bucket, &flags); - - rc = active_cacheline_insert(entry); - if (rc == -ENOMEM) { - pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); - global_disable = true; - } - - /* TODO: report -EEXIST errors here as overlapping mappings are - * not supported by the DMA API - */ -} - -static struct dma_debug_entry *__dma_entry_alloc(void) -{ - struct dma_debug_entry *entry; - - entry = list_entry(free_entries.next, struct dma_debug_entry, list); - list_del(&entry->list); - memset(entry, 0, sizeof(*entry)); - - num_free_entries -= 1; - if (num_free_entries < min_free_entries) - min_free_entries = num_free_entries; - - return entry; -} - -/* struct dma_entry allocator - * - * The next two functions implement the allocator for - * struct dma_debug_entries. - */ -static struct dma_debug_entry *dma_entry_alloc(void) -{ - struct dma_debug_entry *entry; - unsigned long flags; - - spin_lock_irqsave(&free_entries_lock, flags); - - if (list_empty(&free_entries)) { - global_disable = true; - spin_unlock_irqrestore(&free_entries_lock, flags); - pr_err("DMA-API: debugging out of memory - disabling\n"); - return NULL; - } - - entry = __dma_entry_alloc(); - - spin_unlock_irqrestore(&free_entries_lock, flags); - -#ifdef CONFIG_STACKTRACE - entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; - entry->stacktrace.entries = entry->st_entries; - entry->stacktrace.skip = 2; - save_stack_trace(&entry->stacktrace); -#endif - - return entry; -} - -static void dma_entry_free(struct dma_debug_entry *entry) -{ - unsigned long flags; - - active_cacheline_remove(entry); - - /* - * add to beginning of the list - this way the entries are - * more likely cache hot when they are reallocated. - */ - spin_lock_irqsave(&free_entries_lock, flags); - list_add(&entry->list, &free_entries); - num_free_entries += 1; - spin_unlock_irqrestore(&free_entries_lock, flags); -} - -int dma_debug_resize_entries(u32 num_entries) -{ - int i, delta, ret = 0; - unsigned long flags; - struct dma_debug_entry *entry; - LIST_HEAD(tmp); - - spin_lock_irqsave(&free_entries_lock, flags); - - if (nr_total_entries < num_entries) { - delta = num_entries - nr_total_entries; - - spin_unlock_irqrestore(&free_entries_lock, flags); - - for (i = 0; i < delta; i++) { - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - break; - - list_add_tail(&entry->list, &tmp); - } - - spin_lock_irqsave(&free_entries_lock, flags); - - list_splice(&tmp, &free_entries); - nr_total_entries += i; - num_free_entries += i; - } else { - delta = nr_total_entries - num_entries; - - for (i = 0; i < delta && !list_empty(&free_entries); i++) { - entry = __dma_entry_alloc(); - kfree(entry); - } - - nr_total_entries -= i; - } - - if (nr_total_entries != num_entries) - ret = 1; - - spin_unlock_irqrestore(&free_entries_lock, flags); - - return ret; -} - -/* - * DMA-API debugging init code - * - * The init code does two things: - * 1. Initialize core data structures - * 2. Preallocate a given number of dma_debug_entry structs - */ - -static int prealloc_memory(u32 num_entries) -{ - struct dma_debug_entry *entry, *next_entry; - int i; - - for (i = 0; i < num_entries; ++i) { - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - goto out_err; - - list_add_tail(&entry->list, &free_entries); - } - - num_free_entries = num_entries; - min_free_entries = num_entries; - - pr_info("DMA-API: preallocated %d debug entries\n", num_entries); - - return 0; - -out_err: - - list_for_each_entry_safe(entry, next_entry, &free_entries, list) { - list_del(&entry->list); - kfree(entry); - } - - return -ENOMEM; -} - -static ssize_t filter_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - char buf[NAME_MAX_LEN + 1]; - unsigned long flags; - int len; - - if (!current_driver_name[0]) - return 0; - - /* - * We can't copy to userspace directly because current_driver_name can - * only be read under the driver_name_lock with irqs disabled. So - * create a temporary copy first. - */ - read_lock_irqsave(&driver_name_lock, flags); - len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); - read_unlock_irqrestore(&driver_name_lock, flags); - - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static ssize_t filter_write(struct file *file, const char __user *userbuf, - size_t count, loff_t *ppos) -{ - char buf[NAME_MAX_LEN]; - unsigned long flags; - size_t len; - int i; - - /* - * We can't copy from userspace directly. Access to - * current_driver_name is protected with a write_lock with irqs - * disabled. Since copy_from_user can fault and may sleep we - * need to copy to temporary buffer first - */ - len = min(count, (size_t)(NAME_MAX_LEN - 1)); - if (copy_from_user(buf, userbuf, len)) - return -EFAULT; - - buf[len] = 0; - - write_lock_irqsave(&driver_name_lock, flags); - - /* - * Now handle the string we got from userspace very carefully. - * The rules are: - * - only use the first token we got - * - token delimiter is everything looking like a space - * character (' ', '\n', '\t' ...) - * - */ - if (!isalnum(buf[0])) { - /* - * If the first character userspace gave us is not - * alphanumerical then assume the filter should be - * switched off. - */ - if (current_driver_name[0]) - pr_info("DMA-API: switching off dma-debug driver filter\n"); - current_driver_name[0] = 0; - current_driver = NULL; - goto out_unlock; - } - - /* - * Now parse out the first token and use it as the name for the - * driver to filter for. - */ - for (i = 0; i < NAME_MAX_LEN - 1; ++i) { - current_driver_name[i] = buf[i]; - if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) - break; - } - current_driver_name[i] = 0; - current_driver = NULL; - - pr_info("DMA-API: enable driver filter for driver [%s]\n", - current_driver_name); - -out_unlock: - write_unlock_irqrestore(&driver_name_lock, flags); - - return count; -} - -static const struct file_operations filter_fops = { - .read = filter_read, - .write = filter_write, - .llseek = default_llseek, -}; - -static int dma_debug_fs_init(void) -{ - dma_debug_dent = debugfs_create_dir("dma-api", NULL); - if (!dma_debug_dent) { - pr_err("DMA-API: can not create debugfs directory\n"); - return -ENOMEM; - } - - global_disable_dent = debugfs_create_bool("disabled", 0444, - dma_debug_dent, - &global_disable); - if (!global_disable_dent) - goto out_err; - - error_count_dent = debugfs_create_u32("error_count", 0444, - dma_debug_dent, &error_count); - if (!error_count_dent) - goto out_err; - - show_all_errors_dent = debugfs_create_u32("all_errors", 0644, - dma_debug_dent, - &show_all_errors); - if (!show_all_errors_dent) - goto out_err; - - show_num_errors_dent = debugfs_create_u32("num_errors", 0644, - dma_debug_dent, - &show_num_errors); - if (!show_num_errors_dent) - goto out_err; - - num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, - dma_debug_dent, - &num_free_entries); - if (!num_free_entries_dent) - goto out_err; - - min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, - dma_debug_dent, - &min_free_entries); - if (!min_free_entries_dent) - goto out_err; - - filter_dent = debugfs_create_file("driver_filter", 0644, - dma_debug_dent, NULL, &filter_fops); - if (!filter_dent) - goto out_err; - - return 0; - -out_err: - debugfs_remove_recursive(dma_debug_dent); - - return -ENOMEM; -} - -static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) -{ - struct dma_debug_entry *entry; - unsigned long flags; - int count = 0, i; - - for (i = 0; i < HASH_SIZE; ++i) { - spin_lock_irqsave(&dma_entry_hash[i].lock, flags); - list_for_each_entry(entry, &dma_entry_hash[i].list, list) { - if (entry->dev == dev) { - count += 1; - *out_entry = entry; - } - } - spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); - } - - return count; -} - -static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) -{ - struct device *dev = data; - struct dma_debug_entry *uninitialized_var(entry); - int count; - - if (dma_debug_disabled()) - return 0; - - switch (action) { - case BUS_NOTIFY_UNBOUND_DRIVER: - count = device_dma_allocations(dev, &entry); - if (count == 0) - break; - err_printk(dev, entry, "DMA-API: device driver has pending " - "DMA allocations while released from device " - "[count=%d]\n" - "One of leaked entries details: " - "[device address=0x%016llx] [size=%llu bytes] " - "[mapped with %s] [mapped as %s]\n", - count, entry->dev_addr, entry->size, - dir2name[entry->direction], type2name[entry->type]); - break; - default: - break; - } - - return 0; -} - -void dma_debug_add_bus(struct bus_type *bus) -{ - struct notifier_block *nb; - - if (dma_debug_disabled()) - return; - - nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); - if (nb == NULL) { - pr_err("dma_debug_add_bus: out of memory\n"); - return; - } - - nb->notifier_call = dma_debug_device_change; - - bus_register_notifier(bus, nb); -} - -static int dma_debug_init(void) -{ - int i; - - /* Do not use dma_debug_initialized here, since we really want to be - * called to set dma_debug_initialized - */ - if (global_disable) - return 0; - - for (i = 0; i < HASH_SIZE; ++i) { - INIT_LIST_HEAD(&dma_entry_hash[i].list); - spin_lock_init(&dma_entry_hash[i].lock); - } - - if (dma_debug_fs_init() != 0) { - pr_err("DMA-API: error creating debugfs entries - disabling\n"); - global_disable = true; - - return 0; - } - - if (prealloc_memory(nr_prealloc_entries) != 0) { - pr_err("DMA-API: debugging out of memory error - disabled\n"); - global_disable = true; - - return 0; - } - - nr_total_entries = num_free_entries; - - dma_debug_initialized = true; - - pr_info("DMA-API: debugging enabled by kernel config\n"); - return 0; -} -core_initcall(dma_debug_init); - -static __init int dma_debug_cmdline(char *str) -{ - if (!str) - return -EINVAL; - - if (strncmp(str, "off", 3) == 0) { - pr_info("DMA-API: debugging disabled on kernel command line\n"); - global_disable = true; - } - - return 0; -} - -static __init int dma_debug_entries_cmdline(char *str) -{ - if (!str) - return -EINVAL; - if (!get_option(&str, &nr_prealloc_entries)) - nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; - return 0; -} - -__setup("dma_debug=", dma_debug_cmdline); -__setup("dma_debug_entries=", dma_debug_entries_cmdline); - -static void check_unmap(struct dma_debug_entry *ref) -{ - struct dma_debug_entry *entry; - struct hash_bucket *bucket; - unsigned long flags; - - bucket = get_hash_bucket(ref, &flags); - entry = bucket_find_exact(bucket, ref); - - if (!entry) { - /* must drop lock before calling dma_mapping_error */ - put_hash_bucket(bucket, &flags); - - if (dma_mapping_error(ref->dev, ref->dev_addr)) { - err_printk(ref->dev, NULL, - "DMA-API: device driver tries to free an " - "invalid DMA memory address\n"); - } else { - err_printk(ref->dev, NULL, - "DMA-API: device driver tries to free DMA " - "memory it has not allocated [device " - "address=0x%016llx] [size=%llu bytes]\n", - ref->dev_addr, ref->size); - } - return; - } - - if (ref->size != entry->size) { - err_printk(ref->dev, entry, "DMA-API: device driver frees " - "DMA memory with different size " - "[device address=0x%016llx] [map size=%llu bytes] " - "[unmap size=%llu bytes]\n", - ref->dev_addr, entry->size, ref->size); - } - - if (ref->type != entry->type) { - err_printk(ref->dev, entry, "DMA-API: device driver frees " - "DMA memory with wrong function " - "[device address=0x%016llx] [size=%llu bytes] " - "[mapped as %s] [unmapped as %s]\n", - ref->dev_addr, ref->size, - type2name[entry->type], type2name[ref->type]); - } else if ((entry->type == dma_debug_coherent) && - (phys_addr(ref) != phys_addr(entry))) { - err_printk(ref->dev, entry, "DMA-API: device driver frees " - "DMA memory with different CPU address " - "[device address=0x%016llx] [size=%llu bytes] " - "[cpu alloc address=0x%016llx] " - "[cpu free address=0x%016llx]", - ref->dev_addr, ref->size, - phys_addr(entry), - phys_addr(ref)); - } - - if (ref->sg_call_ents && ref->type == dma_debug_sg && - ref->sg_call_ents != entry->sg_call_ents) { - err_printk(ref->dev, entry, "DMA-API: device driver frees " - "DMA sg list with different entry count " - "[map count=%d] [unmap count=%d]\n", - entry->sg_call_ents, ref->sg_call_ents); - } - - /* - * This may be no bug in reality - but most implementations of the - * DMA API don't handle this properly, so check for it here - */ - if (ref->direction != entry->direction) { - err_printk(ref->dev, entry, "DMA-API: device driver frees " - "DMA memory with different direction " - "[device address=0x%016llx] [size=%llu bytes] " - "[mapped with %s] [unmapped with %s]\n", - ref->dev_addr, ref->size, - dir2name[entry->direction], - dir2name[ref->direction]); - } - - /* - * Drivers should use dma_mapping_error() to check the returned - * addresses of dma_map_single() and dma_map_page(). - * If not, print this warning message. See Documentation/DMA-API.txt. - */ - if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { - err_printk(ref->dev, entry, - "DMA-API: device driver failed to check map error" - "[device address=0x%016llx] [size=%llu bytes] " - "[mapped as %s]", - ref->dev_addr, ref->size, - type2name[entry->type]); - } - - hash_bucket_del(entry); - dma_entry_free(entry); - - put_hash_bucket(bucket, &flags); -} - -static void check_for_stack(struct device *dev, - struct page *page, size_t offset) -{ - void *addr; - struct vm_struct *stack_vm_area = task_stack_vm_area(current); - - if (!stack_vm_area) { - /* Stack is direct-mapped. */ - if (PageHighMem(page)) - return; - addr = page_address(page) + offset; - if (object_is_on_stack(addr)) - err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr); - } else { - /* Stack is vmalloced. */ - int i; - - for (i = 0; i < stack_vm_area->nr_pages; i++) { - if (page != stack_vm_area->pages[i]) - continue; - - addr = (u8 *)current->stack + i * PAGE_SIZE + offset; - err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr); - break; - } - } -} - -static inline bool overlap(void *addr, unsigned long len, void *start, void *end) -{ - unsigned long a1 = (unsigned long)addr; - unsigned long b1 = a1 + len; - unsigned long a2 = (unsigned long)start; - unsigned long b2 = (unsigned long)end; - - return !(b1 <= a2 || a1 >= b2); -} - -static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) -{ - if (overlap(addr, len, _stext, _etext) || - overlap(addr, len, __start_rodata, __end_rodata)) - err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); -} - -static void check_sync(struct device *dev, - struct dma_debug_entry *ref, - bool to_cpu) -{ - struct dma_debug_entry *entry; - struct hash_bucket *bucket; - unsigned long flags; - - bucket = get_hash_bucket(ref, &flags); - - entry = bucket_find_contain(&bucket, ref, &flags); - - if (!entry) { - err_printk(dev, NULL, "DMA-API: device driver tries " - "to sync DMA memory it has not allocated " - "[device address=0x%016llx] [size=%llu bytes]\n", - (unsigned long long)ref->dev_addr, ref->size); - goto out; - } - - if (ref->size > entry->size) { - err_printk(dev, entry, "DMA-API: device driver syncs" - " DMA memory outside allocated range " - "[device address=0x%016llx] " - "[allocation size=%llu bytes] " - "[sync offset+size=%llu]\n", - entry->dev_addr, entry->size, - ref->size); - } - - if (entry->direction == DMA_BIDIRECTIONAL) - goto out; - - if (ref->direction != entry->direction) { - err_printk(dev, entry, "DMA-API: device driver syncs " - "DMA memory with different direction " - "[device address=0x%016llx] [size=%llu bytes] " - "[mapped with %s] [synced with %s]\n", - (unsigned long long)ref->dev_addr, entry->size, - dir2name[entry->direction], - dir2name[ref->direction]); - } - - if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && - !(ref->direction == DMA_TO_DEVICE)) - err_printk(dev, entry, "DMA-API: device driver syncs " - "device read-only DMA memory for cpu " - "[device address=0x%016llx] [size=%llu bytes] " - "[mapped with %s] [synced with %s]\n", - (unsigned long long)ref->dev_addr, entry->size, - dir2name[entry->direction], - dir2name[ref->direction]); - - if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && - !(ref->direction == DMA_FROM_DEVICE)) - err_printk(dev, entry, "DMA-API: device driver syncs " - "device write-only DMA memory to device " - "[device address=0x%016llx] [size=%llu bytes] " - "[mapped with %s] [synced with %s]\n", - (unsigned long long)ref->dev_addr, entry->size, - dir2name[entry->direction], - dir2name[ref->direction]); - - if (ref->sg_call_ents && ref->type == dma_debug_sg && - ref->sg_call_ents != entry->sg_call_ents) { - err_printk(ref->dev, entry, "DMA-API: device driver syncs " - "DMA sg list with different entry count " - "[map count=%d] [sync count=%d]\n", - entry->sg_call_ents, ref->sg_call_ents); - } - -out: - put_hash_bucket(bucket, &flags); -} - -static void check_sg_segment(struct device *dev, struct scatterlist *sg) -{ -#ifdef CONFIG_DMA_API_DEBUG_SG - unsigned int max_seg = dma_get_max_seg_size(dev); - u64 start, end, boundary = dma_get_seg_boundary(dev); - - /* - * Either the driver forgot to set dma_parms appropriately, or - * whoever generated the list forgot to check them. - */ - if (sg->length > max_seg) - err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", - sg->length, max_seg); - /* - * In some cases this could potentially be the DMA API - * implementation's fault, but it would usually imply that - * the scatterlist was built inappropriately to begin with. - */ - start = sg_dma_address(sg); - end = start + sg_dma_len(sg) - 1; - if ((start ^ end) & ~boundary) - err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", - start, end, boundary); -#endif -} - -void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, - size_t size, int direction, dma_addr_t dma_addr, - bool map_single) -{ - struct dma_debug_entry *entry; - - if (unlikely(dma_debug_disabled())) - return; - - if (dma_mapping_error(dev, dma_addr)) - return; - - entry = dma_entry_alloc(); - if (!entry) - return; - - entry->dev = dev; - entry->type = dma_debug_page; - entry->pfn = page_to_pfn(page); - entry->offset = offset, - entry->dev_addr = dma_addr; - entry->size = size; - entry->direction = direction; - entry->map_err_type = MAP_ERR_NOT_CHECKED; - - if (map_single) - entry->type = dma_debug_single; - - check_for_stack(dev, page, offset); - - if (!PageHighMem(page)) { - void *addr = page_address(page) + offset; - - check_for_illegal_area(dev, addr, size); - } - - add_dma_entry(entry); -} -EXPORT_SYMBOL(debug_dma_map_page); - -void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - struct dma_debug_entry ref; - struct dma_debug_entry *entry; - struct hash_bucket *bucket; - unsigned long flags; - - if (unlikely(dma_debug_disabled())) - return; - - ref.dev = dev; - ref.dev_addr = dma_addr; - bucket = get_hash_bucket(&ref, &flags); - - list_for_each_entry(entry, &bucket->list, list) { - if (!exact_match(&ref, entry)) - continue; - - /* - * The same physical address can be mapped multiple - * times. Without a hardware IOMMU this results in the - * same device addresses being put into the dma-debug - * hash multiple times too. This can result in false - * positives being reported. Therefore we implement a - * best-fit algorithm here which updates the first entry - * from the hash which fits the reference value and is - * not currently listed as being checked. - */ - if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { - entry->map_err_type = MAP_ERR_CHECKED; - break; - } - } - - put_hash_bucket(bucket, &flags); -} -EXPORT_SYMBOL(debug_dma_mapping_error); - -void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, int direction, bool map_single) -{ - struct dma_debug_entry ref = { - .type = dma_debug_page, - .dev = dev, - .dev_addr = addr, - .size = size, - .direction = direction, - }; - - if (unlikely(dma_debug_disabled())) - return; - - if (map_single) - ref.type = dma_debug_single; - - check_unmap(&ref); -} -EXPORT_SYMBOL(debug_dma_unmap_page); - -void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, int mapped_ents, int direction) -{ - struct dma_debug_entry *entry; - struct scatterlist *s; - int i; - - if (unlikely(dma_debug_disabled())) - return; - - for_each_sg(sg, s, mapped_ents, i) { - entry = dma_entry_alloc(); - if (!entry) - return; - - entry->type = dma_debug_sg; - entry->dev = dev; - entry->pfn = page_to_pfn(sg_page(s)); - entry->offset = s->offset, - entry->size = sg_dma_len(s); - entry->dev_addr = sg_dma_address(s); - entry->direction = direction; - entry->sg_call_ents = nents; - entry->sg_mapped_ents = mapped_ents; - - check_for_stack(dev, sg_page(s), s->offset); - - if (!PageHighMem(sg_page(s))) { - check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); - } - - check_sg_segment(dev, s); - - add_dma_entry(entry); - } -} -EXPORT_SYMBOL(debug_dma_map_sg); - -static int get_nr_mapped_entries(struct device *dev, - struct dma_debug_entry *ref) -{ - struct dma_debug_entry *entry; - struct hash_bucket *bucket; - unsigned long flags; - int mapped_ents; - - bucket = get_hash_bucket(ref, &flags); - entry = bucket_find_exact(bucket, ref); - mapped_ents = 0; - - if (entry) - mapped_ents = entry->sg_mapped_ents; - put_hash_bucket(bucket, &flags); - - return mapped_ents; -} - -void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, int dir) -{ - struct scatterlist *s; - int mapped_ents = 0, i; - - if (unlikely(dma_debug_disabled())) - return; - - for_each_sg(sglist, s, nelems, i) { - - struct dma_debug_entry ref = { - .type = dma_debug_sg, - .dev = dev, - .pfn = page_to_pfn(sg_page(s)), - .offset = s->offset, - .dev_addr = sg_dma_address(s), - .size = sg_dma_len(s), - .direction = dir, - .sg_call_ents = nelems, - }; - - if (mapped_ents && i >= mapped_ents) - break; - - if (!i) - mapped_ents = get_nr_mapped_entries(dev, &ref); - - check_unmap(&ref); - } -} -EXPORT_SYMBOL(debug_dma_unmap_sg); - -void debug_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t dma_addr, void *virt) -{ - struct dma_debug_entry *entry; - - if (unlikely(dma_debug_disabled())) - return; - - if (unlikely(virt == NULL)) - return; - - /* handle vmalloc and linear addresses */ - if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) - return; - - entry = dma_entry_alloc(); - if (!entry) - return; - - entry->type = dma_debug_coherent; - entry->dev = dev; - entry->offset = offset_in_page(virt); - entry->size = size; - entry->dev_addr = dma_addr; - entry->direction = DMA_BIDIRECTIONAL; - - if (is_vmalloc_addr(virt)) - entry->pfn = vmalloc_to_pfn(virt); - else - entry->pfn = page_to_pfn(virt_to_page(virt)); - - add_dma_entry(entry); -} -EXPORT_SYMBOL(debug_dma_alloc_coherent); - -void debug_dma_free_coherent(struct device *dev, size_t size, - void *virt, dma_addr_t addr) -{ - struct dma_debug_entry ref = { - .type = dma_debug_coherent, - .dev = dev, - .offset = offset_in_page(virt), - .dev_addr = addr, - .size = size, - .direction = DMA_BIDIRECTIONAL, - }; - - /* handle vmalloc and linear addresses */ - if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) - return; - - if (is_vmalloc_addr(virt)) - ref.pfn = vmalloc_to_pfn(virt); - else - ref.pfn = page_to_pfn(virt_to_page(virt)); - - if (unlikely(dma_debug_disabled())) - return; - - check_unmap(&ref); -} -EXPORT_SYMBOL(debug_dma_free_coherent); - -void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, - int direction, dma_addr_t dma_addr) -{ - struct dma_debug_entry *entry; - - if (unlikely(dma_debug_disabled())) - return; - - entry = dma_entry_alloc(); - if (!entry) - return; - - entry->type = dma_debug_resource; - entry->dev = dev; - entry->pfn = PHYS_PFN(addr); - entry->offset = offset_in_page(addr); - entry->size = size; - entry->dev_addr = dma_addr; - entry->direction = direction; - entry->map_err_type = MAP_ERR_NOT_CHECKED; - - add_dma_entry(entry); -} -EXPORT_SYMBOL(debug_dma_map_resource); - -void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, - size_t size, int direction) -{ - struct dma_debug_entry ref = { - .type = dma_debug_resource, - .dev = dev, - .dev_addr = dma_addr, - .size = size, - .direction = direction, - }; - - if (unlikely(dma_debug_disabled())) - return; - - check_unmap(&ref); -} -EXPORT_SYMBOL(debug_dma_unmap_resource); - -void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, int direction) -{ - struct dma_debug_entry ref; - - if (unlikely(dma_debug_disabled())) - return; - - ref.type = dma_debug_single; - ref.dev = dev; - ref.dev_addr = dma_handle; - ref.size = size; - ref.direction = direction; - ref.sg_call_ents = 0; - - check_sync(dev, &ref, true); -} -EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); - -void debug_dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - int direction) -{ - struct dma_debug_entry ref; - - if (unlikely(dma_debug_disabled())) - return; - - ref.type = dma_debug_single; - ref.dev = dev; - ref.dev_addr = dma_handle; - ref.size = size; - ref.direction = direction; - ref.sg_call_ents = 0; - - check_sync(dev, &ref, false); -} -EXPORT_SYMBOL(debug_dma_sync_single_for_device); - -void debug_dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, size_t size, - int direction) -{ - struct dma_debug_entry ref; - - if (unlikely(dma_debug_disabled())) - return; - - ref.type = dma_debug_single; - ref.dev = dev; - ref.dev_addr = dma_handle; - ref.size = offset + size; - ref.direction = direction; - ref.sg_call_ents = 0; - - check_sync(dev, &ref, true); -} -EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); - -void debug_dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, int direction) -{ - struct dma_debug_entry ref; - - if (unlikely(dma_debug_disabled())) - return; - - ref.type = dma_debug_single; - ref.dev = dev; - ref.dev_addr = dma_handle; - ref.size = offset + size; - ref.direction = direction; - ref.sg_call_ents = 0; - - check_sync(dev, &ref, false); -} -EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); - -void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, int direction) -{ - struct scatterlist *s; - int mapped_ents = 0, i; - - if (unlikely(dma_debug_disabled())) - return; - - for_each_sg(sg, s, nelems, i) { - - struct dma_debug_entry ref = { - .type = dma_debug_sg, - .dev = dev, - .pfn = page_to_pfn(sg_page(s)), - .offset = s->offset, - .dev_addr = sg_dma_address(s), - .size = sg_dma_len(s), - .direction = direction, - .sg_call_ents = nelems, - }; - - if (!i) - mapped_ents = get_nr_mapped_entries(dev, &ref); - - if (i >= mapped_ents) - break; - - check_sync(dev, &ref, true); - } -} -EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); - -void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nelems, int direction) -{ - struct scatterlist *s; - int mapped_ents = 0, i; - - if (unlikely(dma_debug_disabled())) - return; - - for_each_sg(sg, s, nelems, i) { - - struct dma_debug_entry ref = { - .type = dma_debug_sg, - .dev = dev, - .pfn = page_to_pfn(sg_page(s)), - .offset = s->offset, - .dev_addr = sg_dma_address(s), - .size = sg_dma_len(s), - .direction = direction, - .sg_call_ents = nelems, - }; - if (!i) - mapped_ents = get_nr_mapped_entries(dev, &ref); - - if (i >= mapped_ents) - break; - - check_sync(dev, &ref, false); - } -} -EXPORT_SYMBOL(debug_dma_sync_sg_for_device); - -static int __init dma_debug_driver_setup(char *str) -{ - int i; - - for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { - current_driver_name[i] = *str; - if (*str == 0) - break; - } - - if (current_driver_name[0]) - pr_info("DMA-API: enable driver filter for driver [%s]\n", - current_driver_name); - - - return 1; -} -__setup("dma_debug_driver=", dma_debug_driver_setup); diff --git a/lib/dma-direct.c b/lib/dma-direct.c deleted file mode 100644 index 8be8106270c2..000000000000 --- a/lib/dma-direct.c +++ /dev/null @@ -1,204 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DMA operations that map physical memory directly without using an IOMMU or - * flushing caches. - */ -#include -#include -#include -#include -#include -#include -#include - -#define DIRECT_MAPPING_ERROR 0 - -/* - * Most architectures use ZONE_DMA for the first 16 Megabytes, but - * some use it for entirely different regions: - */ -#ifndef ARCH_ZONE_DMA_BITS -#define ARCH_ZONE_DMA_BITS 24 -#endif - -/* - * For AMD SEV all DMA must be to unencrypted addresses. - */ -static inline bool force_dma_unencrypted(void) -{ - return sev_active(); -} - -static bool -check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, - const char *caller) -{ - if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { - if (!dev->dma_mask) { - dev_err(dev, - "%s: call on device without dma_mask\n", - caller); - return false; - } - - if (*dev->dma_mask >= DMA_BIT_MASK(32)) { - dev_err(dev, - "%s: overflow %pad+%zu of device mask %llx\n", - caller, &dma_addr, size, *dev->dma_mask); - } - return false; - } - return true; -} - -static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) -{ - dma_addr_t addr = force_dma_unencrypted() ? - __phys_to_dma(dev, phys) : phys_to_dma(dev, phys); - return addr + size - 1 <= dev->coherent_dma_mask; -} - -void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - int page_order = get_order(size); - struct page *page = NULL; - void *ret; - - /* we always manually zero the memory once we are done: */ - gfp &= ~__GFP_ZERO; - - /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ - if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) - gfp |= GFP_DMA; - if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) - gfp |= GFP_DMA32; - -again: - /* CMA can be used only in the context which permits sleeping */ - if (gfpflags_allow_blocking(gfp)) { - page = dma_alloc_from_contiguous(dev, count, page_order, gfp); - if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { - dma_release_from_contiguous(dev, page, count); - page = NULL; - } - } - if (!page) - page = alloc_pages_node(dev_to_node(dev), gfp, page_order); - - if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { - __free_pages(page, page_order); - page = NULL; - - if (IS_ENABLED(CONFIG_ZONE_DMA32) && - dev->coherent_dma_mask < DMA_BIT_MASK(64) && - !(gfp & (GFP_DMA32 | GFP_DMA))) { - gfp |= GFP_DMA32; - goto again; - } - - if (IS_ENABLED(CONFIG_ZONE_DMA) && - dev->coherent_dma_mask < DMA_BIT_MASK(32) && - !(gfp & GFP_DMA)) { - gfp = (gfp & ~GFP_DMA32) | GFP_DMA; - goto again; - } - } - - if (!page) - return NULL; - ret = page_address(page); - if (force_dma_unencrypted()) { - set_memory_decrypted((unsigned long)ret, 1 << page_order); - *dma_handle = __phys_to_dma(dev, page_to_phys(page)); - } else { - *dma_handle = phys_to_dma(dev, page_to_phys(page)); - } - memset(ret, 0, size); - return ret; -} - -/* - * NOTE: this function must never look at the dma_addr argument, because we want - * to be able to use it as a helper for iommu implementations as well. - */ -void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_addr, unsigned long attrs) -{ - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned int page_order = get_order(size); - - if (force_dma_unencrypted()) - set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); - if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) - free_pages((unsigned long)cpu_addr, page_order); -} - -dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; - - if (!check_addr(dev, dma_addr, size, __func__)) - return DIRECT_MAPPING_ERROR; - return dma_addr; -} - -int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, - enum dma_data_direction dir, unsigned long attrs) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sgl, sg, nents, i) { - BUG_ON(!sg_page(sg)); - - sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); - if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) - return 0; - sg_dma_len(sg) = sg->length; - } - - return nents; -} - -int dma_direct_supported(struct device *dev, u64 mask) -{ -#ifdef CONFIG_ZONE_DMA - if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) - return 0; -#else - /* - * Because 32-bit DMA masks are so common we expect every architecture - * to be able to satisfy them - either by not supporting more physical - * memory, or by providing a ZONE_DMA32. If neither is the case, the - * architecture needs to use an IOMMU instead of the direct mapping. - */ - if (mask < DMA_BIT_MASK(32)) - return 0; -#endif - /* - * Various PCI/PCIe bridges have broken support for > 32bit DMA even - * if the device itself might support it. - */ - if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32)) - return 0; - return 1; -} - -int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == DIRECT_MAPPING_ERROR; -} - -const struct dma_map_ops dma_direct_ops = { - .alloc = dma_direct_alloc, - .free = dma_direct_free, - .map_page = dma_direct_map_page, - .map_sg = dma_direct_map_sg, - .dma_supported = dma_direct_supported, - .mapping_error = dma_direct_mapping_error, -}; -EXPORT_SYMBOL(dma_direct_ops); diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c deleted file mode 100644 index 79e9a757387f..000000000000 --- a/lib/dma-noncoherent.c +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2018 Christoph Hellwig. - * - * DMA operations that map physical memory directly without providing cache - * coherence. - */ -#include -#include -#include -#include -#include - -static void dma_noncoherent_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); -} - -static void dma_noncoherent_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); -} - -static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t addr; - - addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); - if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - arch_sync_dma_for_device(dev, page_to_phys(page) + offset, - size, dir); - return addr; -} - -static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); - if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); - return nents; -} - -#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU -static void dma_noncoherent_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); -} - -static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); -} - -static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); -} - -static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); -} -#endif - -const struct dma_map_ops dma_noncoherent_ops = { - .alloc = arch_dma_alloc, - .free = arch_dma_free, - .mmap = arch_dma_mmap, - .sync_single_for_device = dma_noncoherent_sync_single_for_device, - .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, - .map_page = dma_noncoherent_map_page, - .map_sg = dma_noncoherent_map_sg, -#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU - .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, - .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, - .unmap_page = dma_noncoherent_unmap_page, - .unmap_sg = dma_noncoherent_unmap_sg, -#endif - .dma_supported = dma_direct_supported, - .mapping_error = dma_direct_mapping_error, - .cache_sync = arch_dma_cache_sync, -}; -EXPORT_SYMBOL(dma_noncoherent_ops); diff --git a/lib/dma-virt.c b/lib/dma-virt.c deleted file mode 100644 index 8e61a02ef9ca..000000000000 --- a/lib/dma-virt.c +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * lib/dma-virt.c - * - * DMA operations that map to virtual addresses without flushing memory. - */ -#include -#include -#include -#include - -static void *dma_virt_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) -{ - void *ret; - - ret = (void *)__get_free_pages(gfp, get_order(size)); - if (ret) - *dma_handle = (uintptr_t)ret; - return ret; -} - -static void dma_virt_free(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_addr, - unsigned long attrs) -{ - free_pages((unsigned long)cpu_addr, get_order(size)); -} - -static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - return (uintptr_t)(page_address(page) + offset); -} - -static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sgl, sg, nents, i) { - BUG_ON(!sg_page(sg)); - sg_dma_address(sg) = (uintptr_t)sg_virt(sg); - sg_dma_len(sg) = sg->length; - } - - return nents; -} - -const struct dma_map_ops dma_virt_ops = { - .alloc = dma_virt_alloc, - .free = dma_virt_free, - .map_page = dma_virt_map_page, - .map_sg = dma_virt_map_sg, -}; -EXPORT_SYMBOL(dma_virt_ops); diff --git a/lib/swiotlb.c b/lib/swiotlb.c deleted file mode 100644 index 04b68d9dffac..000000000000 --- a/lib/swiotlb.c +++ /dev/null @@ -1,1087 +0,0 @@ -/* - * Dynamic DMA mapping support. - * - * This implementation is a fallback for platforms that do not support - * I/O TLBs (aka DMA address translation hardware). - * Copyright (C) 2000 Asit Mallick - * Copyright (C) 2000 Goutham Rao - * Copyright (C) 2000, 2003 Hewlett-Packard Co - * David Mosberger-Tang - * - * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. - * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid - * unnecessary i-cache flushing. - * 04/07/.. ak Better overflow handling. Assorted fixes. - * 05/09/10 linville Add support for syncing ranges, support syncing for - * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. - * 08/12/11 beckyb Add highmem support - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#define CREATE_TRACE_POINTS -#include - -#define OFFSET(val,align) ((unsigned long) \ - ( (val) & ( (align) - 1))) - -#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) - -/* - * Minimum IO TLB size to bother booting with. Systems with mainly - * 64bit capable cards will only lightly use the swiotlb. If we can't - * allocate a contiguous 1MB, we're probably in trouble anyway. - */ -#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) - -enum swiotlb_force swiotlb_force; - -/* - * Used to do a quick range check in swiotlb_tbl_unmap_single and - * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this - * API. - */ -static phys_addr_t io_tlb_start, io_tlb_end; - -/* - * The number of IO TLB blocks (in groups of 64) between io_tlb_start and - * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. - */ -static unsigned long io_tlb_nslabs; - -/* - * When the IOMMU overflows we return a fallback buffer. This sets the size. - */ -static unsigned long io_tlb_overflow = 32*1024; - -static phys_addr_t io_tlb_overflow_buffer; - -/* - * This is a free list describing the number of free entries available from - * each index - */ -static unsigned int *io_tlb_list; -static unsigned int io_tlb_index; - -/* - * Max segment that we can provide which (if pages are contingous) will - * not be bounced (unless SWIOTLB_FORCE is set). - */ -unsigned int max_segment; - -/* - * We need to save away the original address corresponding to a mapped entry - * for the sync operations. - */ -#define INVALID_PHYS_ADDR (~(phys_addr_t)0) -static phys_addr_t *io_tlb_orig_addr; - -/* - * Protect the above data structures in the map and unmap calls - */ -static DEFINE_SPINLOCK(io_tlb_lock); - -static int late_alloc; - -static int __init -setup_io_tlb_npages(char *str) -{ - if (isdigit(*str)) { - io_tlb_nslabs = simple_strtoul(str, &str, 0); - /* avoid tail segment of size < IO_TLB_SEGSIZE */ - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); - } - if (*str == ',') - ++str; - if (!strcmp(str, "force")) { - swiotlb_force = SWIOTLB_FORCE; - } else if (!strcmp(str, "noforce")) { - swiotlb_force = SWIOTLB_NO_FORCE; - io_tlb_nslabs = 1; - } - - return 0; -} -early_param("swiotlb", setup_io_tlb_npages); -/* make io_tlb_overflow tunable too? */ - -unsigned long swiotlb_nr_tbl(void) -{ - return io_tlb_nslabs; -} -EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); - -unsigned int swiotlb_max_segment(void) -{ - return max_segment; -} -EXPORT_SYMBOL_GPL(swiotlb_max_segment); - -void swiotlb_set_max_segment(unsigned int val) -{ - if (swiotlb_force == SWIOTLB_FORCE) - max_segment = 1; - else - max_segment = rounddown(val, PAGE_SIZE); -} - -/* default to 64MB */ -#define IO_TLB_DEFAULT_SIZE (64UL<<20) -unsigned long swiotlb_size_or_default(void) -{ - unsigned long size; - - size = io_tlb_nslabs << IO_TLB_SHIFT; - - return size ? size : (IO_TLB_DEFAULT_SIZE); -} - -static bool no_iotlb_memory; - -void swiotlb_print_info(void) -{ - unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; - unsigned char *vstart, *vend; - - if (no_iotlb_memory) { - pr_warn("software IO TLB: No low mem\n"); - return; - } - - vstart = phys_to_virt(io_tlb_start); - vend = phys_to_virt(io_tlb_end); - - printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n", - (unsigned long long)io_tlb_start, - (unsigned long long)io_tlb_end, - bytes >> 20, vstart, vend - 1); -} - -/* - * Early SWIOTLB allocation may be too early to allow an architecture to - * perform the desired operations. This function allows the architecture to - * call SWIOTLB when the operations are possible. It needs to be called - * before the SWIOTLB memory is used. - */ -void __init swiotlb_update_mem_attributes(void) -{ - void *vaddr; - unsigned long bytes; - - if (no_iotlb_memory || late_alloc) - return; - - vaddr = phys_to_virt(io_tlb_start); - bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); - set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); - memset(vaddr, 0, bytes); - - vaddr = phys_to_virt(io_tlb_overflow_buffer); - bytes = PAGE_ALIGN(io_tlb_overflow); - set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); - memset(vaddr, 0, bytes); -} - -int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) -{ - void *v_overflow_buffer; - unsigned long i, bytes; - - bytes = nslabs << IO_TLB_SHIFT; - - io_tlb_nslabs = nslabs; - io_tlb_start = __pa(tlb); - io_tlb_end = io_tlb_start + bytes; - - /* - * Get the overflow emergency buffer - */ - v_overflow_buffer = memblock_virt_alloc_low_nopanic( - PAGE_ALIGN(io_tlb_overflow), - PAGE_SIZE); - if (!v_overflow_buffer) - return -ENOMEM; - - io_tlb_overflow_buffer = __pa(v_overflow_buffer); - - /* - * Allocate and initialize the free list array. This array is used - * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE - * between io_tlb_start and io_tlb_end. - */ - io_tlb_list = memblock_virt_alloc( - PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), - PAGE_SIZE); - io_tlb_orig_addr = memblock_virt_alloc( - PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), - PAGE_SIZE); - for (i = 0; i < io_tlb_nslabs; i++) { - io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - } - io_tlb_index = 0; - - if (verbose) - swiotlb_print_info(); - - swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); - return 0; -} - -/* - * Statically reserve bounce buffer space and initialize bounce buffer data - * structures for the software IO TLB used to implement the DMA API. - */ -void __init -swiotlb_init(int verbose) -{ - size_t default_size = IO_TLB_DEFAULT_SIZE; - unsigned char *vstart; - unsigned long bytes; - - if (!io_tlb_nslabs) { - io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); - } - - bytes = io_tlb_nslabs << IO_TLB_SHIFT; - - /* Get IO TLB memory from the low pages */ - vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); - if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) - return; - - if (io_tlb_start) - memblock_free_early(io_tlb_start, - PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); - pr_warn("Cannot allocate SWIOTLB buffer"); - no_iotlb_memory = true; -} - -/* - * Systems with larger DMA zones (those that don't support ISA) can - * initialize the swiotlb later using the slab allocator if needed. - * This should be just like above, but with some error catching. - */ -int -swiotlb_late_init_with_default_size(size_t default_size) -{ - unsigned long bytes, req_nslabs = io_tlb_nslabs; - unsigned char *vstart = NULL; - unsigned int order; - int rc = 0; - - if (!io_tlb_nslabs) { - io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); - } - - /* - * Get IO TLB memory from the low pages - */ - order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); - io_tlb_nslabs = SLABS_PER_PAGE << order; - bytes = io_tlb_nslabs << IO_TLB_SHIFT; - - while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { - vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, - order); - if (vstart) - break; - order--; - } - - if (!vstart) { - io_tlb_nslabs = req_nslabs; - return -ENOMEM; - } - if (order != get_order(bytes)) { - printk(KERN_WARNING "Warning: only able to allocate %ld MB " - "for software IO TLB\n", (PAGE_SIZE << order) >> 20); - io_tlb_nslabs = SLABS_PER_PAGE << order; - } - rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); - if (rc) - free_pages((unsigned long)vstart, order); - - return rc; -} - -int -swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) -{ - unsigned long i, bytes; - unsigned char *v_overflow_buffer; - - bytes = nslabs << IO_TLB_SHIFT; - - io_tlb_nslabs = nslabs; - io_tlb_start = virt_to_phys(tlb); - io_tlb_end = io_tlb_start + bytes; - - set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); - memset(tlb, 0, bytes); - - /* - * Get the overflow emergency buffer - */ - v_overflow_buffer = (void *)__get_free_pages(GFP_DMA, - get_order(io_tlb_overflow)); - if (!v_overflow_buffer) - goto cleanup2; - - set_memory_decrypted((unsigned long)v_overflow_buffer, - io_tlb_overflow >> PAGE_SHIFT); - memset(v_overflow_buffer, 0, io_tlb_overflow); - io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); - - /* - * Allocate and initialize the free list array. This array is used - * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE - * between io_tlb_start and io_tlb_end. - */ - io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, - get_order(io_tlb_nslabs * sizeof(int))); - if (!io_tlb_list) - goto cleanup3; - - io_tlb_orig_addr = (phys_addr_t *) - __get_free_pages(GFP_KERNEL, - get_order(io_tlb_nslabs * - sizeof(phys_addr_t))); - if (!io_tlb_orig_addr) - goto cleanup4; - - for (i = 0; i < io_tlb_nslabs; i++) { - io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - } - io_tlb_index = 0; - - swiotlb_print_info(); - - late_alloc = 1; - - swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); - - return 0; - -cleanup4: - free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * - sizeof(int))); - io_tlb_list = NULL; -cleanup3: - free_pages((unsigned long)v_overflow_buffer, - get_order(io_tlb_overflow)); - io_tlb_overflow_buffer = 0; -cleanup2: - io_tlb_end = 0; - io_tlb_start = 0; - io_tlb_nslabs = 0; - max_segment = 0; - return -ENOMEM; -} - -void __init swiotlb_exit(void) -{ - if (!io_tlb_orig_addr) - return; - - if (late_alloc) { - free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer), - get_order(io_tlb_overflow)); - free_pages((unsigned long)io_tlb_orig_addr, - get_order(io_tlb_nslabs * sizeof(phys_addr_t))); - free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * - sizeof(int))); - free_pages((unsigned long)phys_to_virt(io_tlb_start), - get_order(io_tlb_nslabs << IO_TLB_SHIFT)); - } else { - memblock_free_late(io_tlb_overflow_buffer, - PAGE_ALIGN(io_tlb_overflow)); - memblock_free_late(__pa(io_tlb_orig_addr), - PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); - memblock_free_late(__pa(io_tlb_list), - PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); - memblock_free_late(io_tlb_start, - PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); - } - io_tlb_nslabs = 0; - max_segment = 0; -} - -int is_swiotlb_buffer(phys_addr_t paddr) -{ - return paddr >= io_tlb_start && paddr < io_tlb_end; -} - -/* - * Bounce: copy the swiotlb buffer back to the original dma location - */ -static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(orig_addr); - unsigned char *vaddr = phys_to_virt(tlb_addr); - - if (PageHighMem(pfn_to_page(pfn))) { - /* The buffer does not have a mapping. Map it in and copy */ - unsigned int offset = orig_addr & ~PAGE_MASK; - char *buffer; - unsigned int sz = 0; - unsigned long flags; - - while (size) { - sz = min_t(size_t, PAGE_SIZE - offset, size); - - local_irq_save(flags); - buffer = kmap_atomic(pfn_to_page(pfn)); - if (dir == DMA_TO_DEVICE) - memcpy(vaddr, buffer + offset, sz); - else - memcpy(buffer + offset, vaddr, sz); - kunmap_atomic(buffer); - local_irq_restore(flags); - - size -= sz; - pfn++; - vaddr += sz; - offset = 0; - } - } else if (dir == DMA_TO_DEVICE) { - memcpy(vaddr, phys_to_virt(orig_addr), size); - } else { - memcpy(phys_to_virt(orig_addr), vaddr, size); - } -} - -phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, - dma_addr_t tbl_dma_addr, - phys_addr_t orig_addr, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - unsigned long flags; - phys_addr_t tlb_addr; - unsigned int nslots, stride, index, wrap; - int i; - unsigned long mask; - unsigned long offset_slots; - unsigned long max_slots; - - if (no_iotlb_memory) - panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); - - if (mem_encrypt_active()) - pr_warn_once("%s is active and system is using DMA bounce buffers\n", - sme_active() ? "SME" : "SEV"); - - mask = dma_get_seg_boundary(hwdev); - - tbl_dma_addr &= mask; - - offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - - /* - * Carefully handle integer overflow which can occur when mask == ~0UL. - */ - max_slots = mask + 1 - ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT - : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); - - /* - * For mappings greater than or equal to a page, we limit the stride - * (and hence alignment) to a page size. - */ - nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - if (size >= PAGE_SIZE) - stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); - else - stride = 1; - - BUG_ON(!nslots); - - /* - * Find suitable number of IO TLB entries size that will fit this - * request and allocate a buffer from that IO TLB pool. - */ - spin_lock_irqsave(&io_tlb_lock, flags); - index = ALIGN(io_tlb_index, stride); - if (index >= io_tlb_nslabs) - index = 0; - wrap = index; - - do { - while (iommu_is_span_boundary(index, nslots, offset_slots, - max_slots)) { - index += stride; - if (index >= io_tlb_nslabs) - index = 0; - if (index == wrap) - goto not_found; - } - - /* - * If we find a slot that indicates we have 'nslots' number of - * contiguous buffers, we allocate the buffers from that slot - * and mark the entries as '0' indicating unavailable. - */ - if (io_tlb_list[index] >= nslots) { - int count = 0; - - for (i = index; i < (int) (index + nslots); i++) - io_tlb_list[i] = 0; - for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) - io_tlb_list[i] = ++count; - tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT); - - /* - * Update the indices to avoid searching in the next - * round. - */ - io_tlb_index = ((index + nslots) < io_tlb_nslabs - ? (index + nslots) : 0); - - goto found; - } - index += stride; - if (index >= io_tlb_nslabs) - index = 0; - } while (index != wrap); - -not_found: - spin_unlock_irqrestore(&io_tlb_lock, flags); - if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) - dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); - return SWIOTLB_MAP_ERROR; -found: - spin_unlock_irqrestore(&io_tlb_lock, flags); - - /* - * Save away the mapping from the original address to the DMA address. - * This is needed when we sync the memory. Then we sync the buffer if - * needed. - */ - for (i = 0; i < nslots; i++) - io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); - - return tlb_addr; -} - -/* - * Allocates bounce buffer and returns its physical address. - */ -static phys_addr_t -map_single(struct device *hwdev, phys_addr_t phys, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - dma_addr_t start_dma_addr; - - if (swiotlb_force == SWIOTLB_NO_FORCE) { - dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n", - &phys); - return SWIOTLB_MAP_ERROR; - } - - start_dma_addr = __phys_to_dma(hwdev, io_tlb_start); - return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, - dir, attrs); -} - -/* - * tlb_addr is the physical address of the bounce buffer to unmap. - */ -void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - unsigned long flags; - int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; - phys_addr_t orig_addr = io_tlb_orig_addr[index]; - - /* - * First, sync the memory before unmapping the entry - */ - if (orig_addr != INVALID_PHYS_ADDR && - !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) - swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); - - /* - * Return the buffer to the free list by setting the corresponding - * entries to indicate the number of contiguous entries available. - * While returning the entries to the free list, we merge the entries - * with slots below and above the pool being returned. - */ - spin_lock_irqsave(&io_tlb_lock, flags); - { - count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? - io_tlb_list[index + nslots] : 0); - /* - * Step 1: return the slots to the free list, merging the - * slots with superceeding slots - */ - for (i = index + nslots - 1; i >= index; i--) { - io_tlb_list[i] = ++count; - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - } - /* - * Step 2: merge the returned slots with the preceding slots, - * if available (non zero) - */ - for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) - io_tlb_list[i] = ++count; - } - spin_unlock_irqrestore(&io_tlb_lock, flags); -} - -void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, - enum dma_sync_target target) -{ - int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; - phys_addr_t orig_addr = io_tlb_orig_addr[index]; - - if (orig_addr == INVALID_PHYS_ADDR) - return; - orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); - - switch (target) { - case SYNC_FOR_CPU: - if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, - size, DMA_FROM_DEVICE); - else - BUG_ON(dir != DMA_TO_DEVICE); - break; - case SYNC_FOR_DEVICE: - if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, - size, DMA_TO_DEVICE); - else - BUG_ON(dir != DMA_FROM_DEVICE); - break; - default: - BUG(); - } -} - -static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, - size_t size) -{ - u64 mask = DMA_BIT_MASK(32); - - if (dev && dev->coherent_dma_mask) - mask = dev->coherent_dma_mask; - return addr + size - 1 <= mask; -} - -static void * -swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, - unsigned long attrs) -{ - phys_addr_t phys_addr; - - if (swiotlb_force == SWIOTLB_NO_FORCE) - goto out_warn; - - phys_addr = swiotlb_tbl_map_single(dev, - __phys_to_dma(dev, io_tlb_start), - 0, size, DMA_FROM_DEVICE, attrs); - if (phys_addr == SWIOTLB_MAP_ERROR) - goto out_warn; - - *dma_handle = __phys_to_dma(dev, phys_addr); - if (!dma_coherent_ok(dev, *dma_handle, size)) - goto out_unmap; - - memset(phys_to_virt(phys_addr), 0, size); - return phys_to_virt(phys_addr); - -out_unmap: - dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", - (unsigned long long)dev->coherent_dma_mask, - (unsigned long long)*dma_handle); - - /* - * DMA_TO_DEVICE to avoid memcpy in unmap_single. - * DMA_ATTR_SKIP_CPU_SYNC is optional. - */ - swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); -out_warn: - if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { - dev_warn(dev, - "swiotlb: coherent allocation failed, size=%zu\n", - size); - dump_stack(); - } - return NULL; -} - -static bool swiotlb_free_buffer(struct device *dev, size_t size, - dma_addr_t dma_addr) -{ - phys_addr_t phys_addr = dma_to_phys(dev, dma_addr); - - WARN_ON_ONCE(irqs_disabled()); - - if (!is_swiotlb_buffer(phys_addr)) - return false; - - /* - * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. - * DMA_ATTR_SKIP_CPU_SYNC is optional. - */ - swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - return true; -} - -static void -swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, - int do_panic) -{ - if (swiotlb_force == SWIOTLB_NO_FORCE) - return; - - /* - * Ran out of IOMMU space for this operation. This is very bad. - * Unfortunately the drivers cannot handle this operation properly. - * unless they check for dma_mapping_error (most don't) - * When the mapping is small enough return a static buffer to limit - * the damage, or panic when the transfer is too big. - */ - dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n", - size); - - if (size <= io_tlb_overflow || !do_panic) - return; - - if (dir == DMA_BIDIRECTIONAL) - panic("DMA: Random memory could be DMA accessed\n"); - if (dir == DMA_FROM_DEVICE) - panic("DMA: Random memory could be DMA written\n"); - if (dir == DMA_TO_DEVICE) - panic("DMA: Random memory could be DMA read\n"); -} - -/* - * Map a single buffer of the indicated size for DMA in streaming mode. The - * physical address to use is returned. - * - * Once the device is given the dma address, the device owns this memory until - * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. - */ -dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t map, phys = page_to_phys(page) + offset; - dma_addr_t dev_addr = phys_to_dma(dev, phys); - - BUG_ON(dir == DMA_NONE); - /* - * If the address happens to be in the device's DMA window, - * we can safely return the device addr and not worry about bounce - * buffering it. - */ - if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE) - return dev_addr; - - trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); - - /* Oh well, have to allocate and map a bounce buffer. */ - map = map_single(dev, phys, size, dir, attrs); - if (map == SWIOTLB_MAP_ERROR) { - swiotlb_full(dev, size, dir, 1); - return __phys_to_dma(dev, io_tlb_overflow_buffer); - } - - dev_addr = __phys_to_dma(dev, map); - - /* Ensure that the address returned is DMA'ble */ - if (dma_capable(dev, dev_addr, size)) - return dev_addr; - - attrs |= DMA_ATTR_SKIP_CPU_SYNC; - swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); - - return __phys_to_dma(dev, io_tlb_overflow_buffer); -} - -/* - * Unmap a single streaming mode DMA translation. The dma_addr and size must - * match what was provided for in a previous swiotlb_map_page call. All - * other usages are undefined. - * - * After this call, reads by the cpu to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); - - BUG_ON(dir == DMA_NONE); - - if (is_swiotlb_buffer(paddr)) { - swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); - return; - } - - if (dir != DMA_FROM_DEVICE) - return; - - /* - * phys_to_virt doesn't work with hihgmem page but we could - * call dma_mark_clean() with hihgmem page here. However, we - * are fine since dma_mark_clean() is null on POWERPC. We can - * make dma_mark_clean() take a physical address if necessary. - */ - dma_mark_clean(phys_to_virt(paddr), size); -} - -void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - unmap_single(hwdev, dev_addr, size, dir, attrs); -} - -/* - * Make physical memory consistent for a single streaming mode DMA translation - * after a transfer. - * - * If you perform a swiotlb_map_page() but wish to interrogate the buffer - * using the cpu, yet do not wish to teardown the dma mapping, you must - * call this function before doing so. At the next point you give the dma - * address back to the card, you must first perform a - * swiotlb_dma_sync_for_device, and then the device again owns the buffer - */ -static void -swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - enum dma_sync_target target) -{ - phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); - - BUG_ON(dir == DMA_NONE); - - if (is_swiotlb_buffer(paddr)) { - swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); - return; - } - - if (dir != DMA_FROM_DEVICE) - return; - - dma_mark_clean(phys_to_virt(paddr), size); -} - -void -swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) -{ - swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); -} - -void -swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) -{ - swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); -} - -/* - * Map a set of buffers described by scatterlist in streaming mode for DMA. - * This is the scatter-gather version of the above swiotlb_map_page - * interface. Here the scatter gather list elements are each tagged with the - * appropriate dma address and length. They are obtained via - * sg_dma_{address,length}(SG). - * - * NOTE: An implementation may be able to use a smaller number of - * DMA address/length pairs than there are SG table elements. - * (for example via virtual mapping capabilities) - * The routine returns the number of addr/length pairs actually - * used, at most nents. - * - * Device ownership issues as mentioned above for swiotlb_map_page are the - * same here. - */ -int -swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - BUG_ON(dir == DMA_NONE); - - for_each_sg(sgl, sg, nelems, i) { - phys_addr_t paddr = sg_phys(sg); - dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); - - if (swiotlb_force == SWIOTLB_FORCE || - !dma_capable(hwdev, dev_addr, sg->length)) { - phys_addr_t map = map_single(hwdev, sg_phys(sg), - sg->length, dir, attrs); - if (map == SWIOTLB_MAP_ERROR) { - /* Don't panic here, we expect map_sg users - to do proper error handling. */ - swiotlb_full(hwdev, sg->length, dir, 0); - attrs |= DMA_ATTR_SKIP_CPU_SYNC; - swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, - attrs); - sg_dma_len(sgl) = 0; - return 0; - } - sg->dma_address = __phys_to_dma(hwdev, map); - } else - sg->dma_address = dev_addr; - sg_dma_len(sg) = sg->length; - } - return nelems; -} - -/* - * Unmap a set of streaming mode DMA translations. Again, cpu read rules - * concerning calls here are the same as for swiotlb_unmap_page() above. - */ -void -swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - BUG_ON(dir == DMA_NONE); - - for_each_sg(sgl, sg, nelems, i) - unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, - attrs); -} - -/* - * Make physical memory consistent for a set of streaming mode DMA translations - * after a transfer. - * - * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules - * and usage. - */ -static void -swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - enum dma_sync_target target) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nelems, i) - swiotlb_sync_single(hwdev, sg->dma_address, - sg_dma_len(sg), dir, target); -} - -void -swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); -} - -void -swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); -} - -int -swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) -{ - return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer)); -} - -/* - * Return whether the given device DMA address mask can be supported - * properly. For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask to - * this function. - */ -int -swiotlb_dma_supported(struct device *hwdev, u64 mask) -{ - return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; -} - -void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - void *vaddr; - - /* temporary workaround: */ - if (gfp & __GFP_NOWARN) - attrs |= DMA_ATTR_NO_WARN; - - /* - * Don't print a warning when the first allocation attempt fails. - * swiotlb_alloc_coherent() will print a warning when the DMA memory - * allocation ultimately failed. - */ - gfp |= __GFP_NOWARN; - - vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); - if (!vaddr) - vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs); - return vaddr; -} - -void swiotlb_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, unsigned long attrs) -{ - if (!swiotlb_free_buffer(dev, size, dma_addr)) - dma_direct_free(dev, size, vaddr, dma_addr, attrs); -} - -const struct dma_map_ops swiotlb_dma_ops = { - .mapping_error = swiotlb_dma_mapping_error, - .alloc = swiotlb_alloc, - .free = swiotlb_free, - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, - .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = swiotlb_sync_sg_for_device, - .map_sg = swiotlb_map_sg_attrs, - .unmap_sg = swiotlb_unmap_sg_attrs, - .map_page = swiotlb_map_page, - .unmap_page = swiotlb_unmap_page, - .dma_supported = dma_direct_supported, -}; -- cgit v1.2.3 From 9f3cb9b71bc3166f27ceffe0b7295c464c638151 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 5 Jun 2018 09:22:37 -0700 Subject: MAINAINTERS: Corrected Broadcom Northstar2 entry While moving the Northstar 2 DTS into a dedicated directory, the corresponding MAINTAINERS file entry was not updated accordingly, fix that. Fixes: 63a913c157f5 ("arm64: dts: move ns2 into northstar2 directory") Signed-off-by: Florian Fainelli --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9d5eeff51b5f..67b014ea81f2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2971,7 +2971,7 @@ N: bcm585* N: bcm586* N: bcm88312 N: hr2 -F: arch/arm64/boot/dts/broadcom/ns2* +F: arch/arm64/boot/dts/broadcom/northstar2/* F: drivers/clk/bcm/clk-ns* F: drivers/pinctrl/bcm/pinctrl-ns* -- cgit v1.2.3 From d64324acdb7052b08b13ffc8c12af232c6a29afd Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 5 Jun 2018 09:24:55 -0700 Subject: MAINTAINERS: Update Broadcom iProc entry with Stingray Update the MAINTAINERS file to cover the "stingray" pattern and a few files under arch/arm64/boot/dts/broadcom/* as well as the clock driver and binding. Signed-off-by: Florian Fainelli --- MAINTAINERS | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 67b014ea81f2..c78feb02a8c9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2971,9 +2971,13 @@ N: bcm585* N: bcm586* N: bcm88312 N: hr2 +N: stingray F: arch/arm64/boot/dts/broadcom/northstar2/* +F: arch/arm64/boot/dts/broadcom/stingray/* F: drivers/clk/bcm/clk-ns* +F: drivers/clk/bcm/clk-sr* F: drivers/pinctrl/bcm/pinctrl-ns* +F: include/dt-bindings/clock/bcm-sr* BROADCOM KONA GPIO DRIVER M: Ray Jui -- cgit v1.2.3 From 73c4b15eff163f633a86589c5baf071f41af26b1 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 18 Jun 2018 14:41:36 -0700 Subject: MAINTAINERS: Add me as an x86 entry code maintainer And update my email address. Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: Andy Lutomirski Signed-off-by: Linus Torvalds --- MAINTAINERS | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9d5eeff51b5f..624c3fd11d04 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15575,6 +15575,13 @@ S: Maintained F: Documentation/x86/ F: arch/x86/ +X86 ENTRY CODE +M: Andy Lutomirski +L: linux-kernel@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm +S: Maintained +F: arch/x86/entry/ + X86 MCE INFRASTRUCTURE M: Tony Luck M: Borislav Petkov @@ -15597,7 +15604,7 @@ F: drivers/platform/x86/ F: drivers/platform/olpc/ X86 VDSO -M: Andy Lutomirski +M: Andy Lutomirski L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso S: Maintained -- cgit v1.2.3 From 01a21986f8ed52911eafdc728f595d2252b71451 Mon Sep 17 00:00:00 2001 From: Joel Stanley Date: Tue, 19 Jun 2018 15:08:34 +0930 Subject: MAINTAINERS: Add Sam as the maintainer for NCSI Sam has been handing the maintenance of NCSI for a number release cycles now. Acked-by: Samuel Mendoza-Jonas Signed-off-by: Joel Stanley Signed-off-by: David S. Miller --- MAINTAINERS | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index a5f04264ad10..ebb3168fd9e0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9756,6 +9756,11 @@ L: linux-scsi@vger.kernel.org S: Maintained F: drivers/scsi/NCR_D700.* +NCSI LIBRARY: +M: Samuel Mendoza-Jonas +S: Maintained +F: net/ncsi/ + NCT6775 HARDWARE MONITOR DRIVER M: Guenter Roeck L: linux-hwmon@vger.kernel.org -- cgit v1.2.3 From 48e315618dc4dc8904182cd221e3d395d5d97005 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 22 Jun 2018 12:08:20 +0200 Subject: MAINTAINERS: Add file patterns for x86 device tree bindings Submitters of device tree binding documentation may forget to CC the subsystem maintainer if this is missing. Signed-off-by: Geert Uytterhoeven Signed-off-by: Thomas Gleixner Cc: "H . Peter Anvin" Cc: Rob Herring Cc: Mark Rutland Cc: devicetree@vger.kernel.org Link: https://lkml.kernel.org/r/20180622100820.29616-1-geert@linux-m68k.org --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index 9c125f705f78..60929873b900 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15366,6 +15366,7 @@ M: x86@kernel.org L: linux-kernel@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core S: Maintained +F: Documentation/devicetree/bindings/x86/ F: Documentation/x86/ F: arch/x86/ -- cgit v1.2.3 From 935c5e3eafe8186dd13e1eda24ad82ecae942852 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 22 Jun 2018 12:08:13 +0200 Subject: MAINTAINERS: Add file patterns for dsa device tree bindings Submitters of device tree binding documentation may forget to CC the subsystem maintainer if this is missing. Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index edf3cf5ea691..977f9a431f77 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9882,6 +9882,7 @@ M: Andrew Lunn M: Vivien Didelot M: Florian Fainelli S: Maintained +F: Documentation/devicetree/bindings/net/dsa/ F: net/dsa/ F: include/net/dsa.h F: include/linux/dsa/ -- cgit v1.2.3 From 0e49740c35c1d8f0c8fd84e9387b6a1007228d6f Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Wed, 27 Jun 2018 13:07:07 -0500 Subject: MAINTAINERS: Timur has a kernel.org address Timur Tabi no longer works for Qualcomm, and he now has a kernel.org email address, so update MAINTAINERS accordingly. Signed-off-by: Timur Tabi Signed-off-by: Linus Torvalds --- MAINTAINERS | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index f0896c04811e..bfb8a18d4793 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5673,7 +5673,7 @@ F: drivers/crypto/caam/ F: Documentation/devicetree/bindings/crypto/fsl-sec4.txt FREESCALE DIU FRAMEBUFFER DRIVER -M: Timur Tabi +M: Timur Tabi L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/fsl-diu-fb.* @@ -5773,7 +5773,7 @@ S: Maintained F: drivers/net/wan/fsl_ucc_hdlc* FREESCALE QUICC ENGINE UCC UART DRIVER -M: Timur Tabi +M: Timur Tabi L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/tty/serial/ucc_uart.c @@ -5797,7 +5797,7 @@ F: drivers/net/ethernet/freescale/fs_enet/ F: include/linux/fs_enet_pd.h FREESCALE SOC SOUND DRIVERS -M: Timur Tabi +M: Timur Tabi M: Nicolin Chen M: Xiubo Li R: Fabio Estevam @@ -11813,9 +11813,9 @@ F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt F: drivers/cpufreq/qcom-cpufreq-kryo.c QUALCOMM EMAC GIGABIT ETHERNET DRIVER -M: Timur Tabi +M: Timur Tabi L: netdev@vger.kernel.org -S: Supported +S: Maintained F: drivers/net/ethernet/qualcomm/emac/ QUALCOMM HEXAGON ARCHITECTURE -- cgit v1.2.3 From 008e682b5bf1bdd735b2eecf2f3903d7da49dd52 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 27 Jun 2018 23:26:24 -0700 Subject: proc: add Alexey to MAINTAINERS I know I'll regret it. Link: http://lkml.kernel.org/r/20180627194840.GA18113@avx2 Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'MAINTAINERS') diff --git a/MAINTAINERS b/MAINTAINERS index bfb8a18d4793..07d1576fc766 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11481,6 +11481,15 @@ W: http://wireless.kernel.org/en/users/Drivers/p54 S: Obsolete F: drivers/net/wireless/intersil/prism54/ +PROC FILESYSTEM +R: Alexey Dobriyan +L: linux-kernel@vger.kernel.org +L: linux-fsdevel@vger.kernel.org +S: Maintained +F: fs/proc/ +F: include/linux/proc_fs.h +F: tools/testing/selftests/proc/ + PROC SYSCTL M: "Luis R. Rodriguez" M: Kees Cook -- cgit v1.2.3