diff options
Diffstat (limited to 'drivers')
198 files changed, 3909 insertions, 1805 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 0703bff5e60e..20360e480bd8 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -65,6 +65,9 @@ static struct acpi_scan_handler pci_root_handler = { .ids = root_device_ids, .attach = acpi_pci_root_add, .detach = acpi_pci_root_remove, + .hotplug = { + .ignore = true, + }, }; static DEFINE_MUTEX(osc_lock); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 15daa21fcd05..fd39459926b1 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1772,7 +1772,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) */ list_for_each_entry(hwid, &pnp.ids, list) { handler = acpi_scan_match_handler(hwid->id, NULL); - if (handler) { + if (handler && !handler->hotplug.ignore) { acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, acpi_hotplug_notify_cb, handler); break; diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index db5293650f62..6dbc3ca45223 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -309,7 +309,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr, sprintf(table_attr->name + ACPI_NAME_SIZE, "%d", table_attr->instance); - table_attr->attr.size = 0; + table_attr->attr.size = table_header->length; table_attr->attr.read = acpi_table_show; table_attr->attr.attr.name = table_attr->name; table_attr->attr.attr.mode = 0400; @@ -354,8 +354,9 @@ static int acpi_tables_sysfs_init(void) { struct acpi_table_attr *table_attr; struct acpi_table_header *table_header = NULL; - int table_index = 0; - int result; + int table_index; + acpi_status status; + int ret; tables_kobj = kobject_create_and_add("tables", acpi_kobj); if (!tables_kobj) @@ -365,33 +366,34 @@ static int acpi_tables_sysfs_init(void) if (!dynamic_tables_kobj) goto err_dynamic_tables; - do { - result = acpi_get_table_by_index(table_index, &table_header); - if (!result) { - table_index++; - table_attr = NULL; - table_attr = - kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); - if (!table_attr) - return -ENOMEM; - - acpi_table_attr_init(table_attr, table_header); - result = - sysfs_create_bin_file(tables_kobj, - &table_attr->attr); - if (result) { - kfree(table_attr); - return result; - } else - list_add_tail(&table_attr->node, - &acpi_table_attr_list); + for (table_index = 0;; table_index++) { + status = acpi_get_table_by_index(table_index, &table_header); + + if (status == AE_BAD_PARAMETER) + break; + + if (ACPI_FAILURE(status)) + continue; + + table_attr = NULL; + table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); + if (!table_attr) + return -ENOMEM; + + acpi_table_attr_init(table_attr, table_header); + ret = sysfs_create_bin_file(tables_kobj, &table_attr->attr); + if (ret) { + kfree(table_attr); + return ret; } - } while (!result); + list_add_tail(&table_attr->node, &acpi_table_attr_list); + } + kobject_uevent(tables_kobj, KOBJ_ADD); kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); - result = acpi_install_table_handler(acpi_sysfs_table_handler, NULL); + status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL); - return result == AE_OK ? 0 : -EINVAL; + return ACPI_FAILURE(status) ? -EINVAL : 0; err_dynamic_tables: kobject_put(tables_kobj); err: diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index b5d842370cc9..ea192ec029c4 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -223,7 +223,7 @@ static void null_softirq_done_fn(struct request *rq) blk_end_request_all(rq, 0); } -#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) +#ifdef CONFIG_SMP static void null_ipi_cmd_end_io(void *data) { @@ -260,7 +260,7 @@ static void null_cmd_end_ipi(struct nullb_cmd *cmd) put_cpu(); } -#endif /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ +#endif /* CONFIG_SMP */ static inline void null_handle_cmd(struct nullb_cmd *cmd) { @@ -270,7 +270,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) end_cmd(cmd); break; case NULL_IRQ_SOFTIRQ: -#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) +#ifdef CONFIG_SMP null_cmd_end_ipi(cmd); #else end_cmd(cmd); @@ -571,7 +571,7 @@ static int __init null_init(void) { unsigned int i; -#if !defined(CONFIG_SMP) || !defined(CONFIG_USE_GENERIC_SMP_HELPERS) +#if !defined(CONFIG_SMP) if (irqmode == NULL_IRQ_SOFTIRQ) { pr_warn("null_blk: softirq completions not available.\n"); pr_warn("null_blk: using direct completions.\n"); diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 94c0c74434ea..1a65838888cd 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -33,6 +33,15 @@ config TCG_TIS from within Linux. To compile this driver as a module, choose M here; the module will be called tpm_tis. +config TCG_TIS_I2C_ATMEL + tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)" + depends on I2C + ---help--- + If you have an Atmel I2C TPM security chip say Yes and it will be + accessible from within Linux. + To compile this driver as a module, choose M here; the module will + be called tpm_tis_i2c_atmel. + config TCG_TIS_I2C_INFINEON tristate "TPM Interface Specification 1.2 Interface (I2C - Infineon)" depends on I2C @@ -42,7 +51,17 @@ config TCG_TIS_I2C_INFINEON Specification 0.20 say Yes and it will be accessible from within Linux. To compile this driver as a module, choose M here; the module - will be called tpm_tis_i2c_infineon. + will be called tpm_i2c_infineon. + +config TCG_TIS_I2C_NUVOTON + tristate "TPM Interface Specification 1.2 Interface (I2C - Nuvoton)" + depends on I2C + ---help--- + If you have a TPM security chip with an I2C interface from + Nuvoton Technology Corp. say Yes and it will be accessible + from within Linux. + To compile this driver as a module, choose M here; the module + will be called tpm_i2c_nuvoton. config TCG_NSC tristate "National Semiconductor TPM Interface" @@ -82,14 +101,14 @@ config TCG_IBMVTPM as a module, choose M here; the module will be called tpm_ibmvtpm. config TCG_ST33_I2C - tristate "STMicroelectronics ST33 I2C TPM" - depends on I2C - depends on GPIOLIB - ---help--- - If you have a TPM security chip from STMicroelectronics working with - an I2C bus say Yes and it will be accessible from within Linux. - To compile this driver as a module, choose M here; the module will be - called tpm_stm_st33_i2c. + tristate "STMicroelectronics ST33 I2C TPM" + depends on I2C + depends on GPIOLIB + ---help--- + If you have a TPM security chip from STMicroelectronics working with + an I2C bus say Yes and it will be accessible from within Linux. + To compile this driver as a module, choose M here; the module will be + called tpm_stm_st33_i2c. config TCG_XEN tristate "XEN TPM Interface" diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index eb41ff97d0ad..b80a4000daee 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -2,17 +2,20 @@ # Makefile for the kernel tpm device drivers. # obj-$(CONFIG_TCG_TPM) += tpm.o +tpm-y := tpm-interface.o +tpm-$(CONFIG_ACPI) += tpm_ppi.o + ifdef CONFIG_ACPI - obj-$(CONFIG_TCG_TPM) += tpm_bios.o - tpm_bios-objs += tpm_eventlog.o tpm_acpi.o tpm_ppi.o + tpm-y += tpm_eventlog.o tpm_acpi.o else ifdef CONFIG_TCG_IBMVTPM - obj-$(CONFIG_TCG_TPM) += tpm_bios.o - tpm_bios-objs += tpm_eventlog.o tpm_of.o + tpm-y += tpm_eventlog.o tpm_of.o endif endif obj-$(CONFIG_TCG_TIS) += tpm_tis.o +obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o +obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o obj-$(CONFIG_TCG_NSC) += tpm_nsc.o obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm-interface.c index e3c974a6c522..6ae41d337630 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm-interface.c @@ -10,13 +10,13 @@ * Maintained by: <tpmdd-devel@lists.sourceforge.net> * * Device driver for TCG/TCPA TPM (trusted platform module). - * Specifications at www.trustedcomputinggroup.org + * Specifications at www.trustedcomputinggroup.org * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. - * + * * Note, the TPM chip is not interrupt driven (only polling) * and can have very long timeouts (minutes!). Hence the unusual * calls to msleep. @@ -371,13 +371,14 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, return -ENODATA; if (count > bufsiz) { dev_err(chip->dev, - "invalid count value %x %zx \n", count, bufsiz); + "invalid count value %x %zx\n", count, bufsiz); return -E2BIG; } mutex_lock(&chip->tpm_mutex); - if ((rc = chip->vendor.send(chip, (u8 *) buf, count)) < 0) { + rc = chip->vendor.send(chip, (u8 *) buf, count); + if (rc < 0) { dev_err(chip->dev, "tpm_transmit: tpm_send: error %zd\n", rc); goto out; @@ -444,7 +445,7 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd, { int err; - len = tpm_transmit(chip,(u8 *) cmd, len); + len = tpm_transmit(chip, (u8 *) cmd, len); if (len < 0) return len; else if (len < TPM_HEADER_SIZE) @@ -658,7 +659,7 @@ static int tpm_continue_selftest(struct tpm_chip *chip) return rc; } -ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, +ssize_t tpm_show_enabled(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; @@ -674,7 +675,7 @@ ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr, } EXPORT_SYMBOL_GPL(tpm_show_enabled); -ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, +ssize_t tpm_show_active(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; @@ -690,7 +691,7 @@ ssize_t tpm_show_active(struct device * dev, struct device_attribute * attr, } EXPORT_SYMBOL_GPL(tpm_show_active); -ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, +ssize_t tpm_show_owned(struct device *dev, struct device_attribute *attr, char *buf) { cap_t cap; @@ -706,8 +707,8 @@ ssize_t tpm_show_owned(struct device * dev, struct device_attribute * attr, } EXPORT_SYMBOL_GPL(tpm_show_owned); -ssize_t tpm_show_temp_deactivated(struct device * dev, - struct device_attribute * attr, char *buf) +ssize_t tpm_show_temp_deactivated(struct device *dev, + struct device_attribute *attr, char *buf) { cap_t cap; ssize_t rc; @@ -769,10 +770,10 @@ static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) /** * tpm_pcr_read - read a pcr value - * @chip_num: tpm idx # or ANY + * @chip_num: tpm idx # or ANY * @pcr_idx: pcr idx to retrieve - * @res_buf: TPM_PCR value - * size of res_buf is 20 bytes (or NULL if you don't care) + * @res_buf: TPM_PCR value + * size of res_buf is 20 bytes (or NULL if you don't care) * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing @@ -794,9 +795,9 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read); /** * tpm_pcr_extend - extend pcr value with hash - * @chip_num: tpm idx # or AN& + * @chip_num: tpm idx # or AN& * @pcr_idx: pcr idx to extend - * @hash: hash value used to extend pcr value + * @hash: hash value used to extend pcr value * * The TPM driver should be built-in, but for whatever reason it * isn't, protect against the chip disappearing, by incrementing @@ -847,8 +848,7 @@ int tpm_do_selftest(struct tpm_chip *chip) unsigned long duration; struct tpm_cmd_t cmd; - duration = tpm_calc_ordinal_duration(chip, - TPM_ORD_CONTINUE_SELFTEST); + duration = tpm_calc_ordinal_duration(chip, TPM_ORD_CONTINUE_SELFTEST); loops = jiffies_to_msecs(duration) / delay_msec; @@ -965,12 +965,12 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr, if (err) goto out; - /* + /* ignore header 10 bytes algorithm 32 bits (1 == RSA ) encscheme 16 bits sigscheme 16 bits - parameters (RSA 12->bytes: keybit, #primes, expbit) + parameters (RSA 12->bytes: keybit, #primes, expbit) keylenbytes 32 bits 256 byte modulus ignore checksum 20 bytes @@ -1020,43 +1020,33 @@ ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr, str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); - rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, - "attempting to determine the 1.1 version"); - if (rc) - return 0; - str += sprintf(str, - "TCG version: %d.%d\nFirmware version: %d.%d\n", - cap.tpm_version.Major, cap.tpm_version.Minor, - cap.tpm_version.revMajor, cap.tpm_version.revMinor); - return str - buf; -} -EXPORT_SYMBOL_GPL(tpm_show_caps); - -ssize_t tpm_show_caps_1_2(struct device * dev, - struct device_attribute * attr, char *buf) -{ - cap_t cap; - ssize_t rc; - char *str = buf; - - rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap, - "attempting to determine the manufacturer"); - if (rc) - return 0; - str += sprintf(str, "Manufacturer: 0x%x\n", - be32_to_cpu(cap.manufacturer_id)); + /* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */ rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap, "attempting to determine the 1.2 version"); - if (rc) - return 0; - str += sprintf(str, - "TCG version: %d.%d\nFirmware version: %d.%d\n", - cap.tpm_version_1_2.Major, cap.tpm_version_1_2.Minor, - cap.tpm_version_1_2.revMajor, - cap.tpm_version_1_2.revMinor); + if (!rc) { + str += sprintf(str, + "TCG version: %d.%d\nFirmware version: %d.%d\n", + cap.tpm_version_1_2.Major, + cap.tpm_version_1_2.Minor, + cap.tpm_version_1_2.revMajor, + cap.tpm_version_1_2.revMinor); + } else { + /* Otherwise just use TPM_STRUCT_VER */ + rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap, + "attempting to determine the 1.1 version"); + if (rc) + return 0; + str += sprintf(str, + "TCG version: %d.%d\nFirmware version: %d.%d\n", + cap.tpm_version.Major, + cap.tpm_version.Minor, + cap.tpm_version.revMajor, + cap.tpm_version.revMinor); + } + return str - buf; } -EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); +EXPORT_SYMBOL_GPL(tpm_show_caps); ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr, char *buf) @@ -1102,8 +1092,8 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, } EXPORT_SYMBOL_GPL(tpm_store_cancel); -static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, bool check_cancel, - bool *canceled) +static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask, + bool check_cancel, bool *canceled) { u8 status = chip->vendor.status(chip); @@ -1170,38 +1160,25 @@ EXPORT_SYMBOL_GPL(wait_for_tpm_stat); */ int tpm_open(struct inode *inode, struct file *file) { - int minor = iminor(inode); - struct tpm_chip *chip = NULL, *pos; - - rcu_read_lock(); - list_for_each_entry_rcu(pos, &tpm_chip_list, list) { - if (pos->vendor.miscdev.minor == minor) { - chip = pos; - get_device(chip->dev); - break; - } - } - rcu_read_unlock(); - - if (!chip) - return -ENODEV; + struct miscdevice *misc = file->private_data; + struct tpm_chip *chip = container_of(misc, struct tpm_chip, + vendor.miscdev); if (test_and_set_bit(0, &chip->is_open)) { dev_dbg(chip->dev, "Another process owns this TPM\n"); - put_device(chip->dev); return -EBUSY; } chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); if (chip->data_buffer == NULL) { clear_bit(0, &chip->is_open); - put_device(chip->dev); return -ENOMEM; } atomic_set(&chip->data_pending, 0); file->private_data = chip; + get_device(chip->dev); return 0; } EXPORT_SYMBOL_GPL(tpm_open); @@ -1463,7 +1440,6 @@ void tpm_dev_vendor_release(struct tpm_chip *chip) chip->vendor.release(chip->dev); clear_bit(chip->dev_num, dev_mask); - kfree(chip->vendor.miscdev.name); } EXPORT_SYMBOL_GPL(tpm_dev_vendor_release); @@ -1487,7 +1463,7 @@ void tpm_dev_release(struct device *dev) EXPORT_SYMBOL_GPL(tpm_dev_release); /* - * Called from tpm_<specific>.c probe function only for devices + * Called from tpm_<specific>.c probe function only for devices * the driver has determined it should claim. Prior to calling * this function the specific probe function has called pci_enable_device * upon errant exit from this function specific probe function should call @@ -1496,17 +1472,13 @@ EXPORT_SYMBOL_GPL(tpm_dev_release); struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vendor_specific *entry) { -#define DEVNAME_SIZE 7 - - char *devname; struct tpm_chip *chip; /* Driver specific per-device data */ chip = kzalloc(sizeof(*chip), GFP_KERNEL); - devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL); - if (chip == NULL || devname == NULL) - goto out_free; + if (chip == NULL) + return NULL; mutex_init(&chip->buffer_mutex); mutex_init(&chip->tpm_mutex); @@ -1531,8 +1503,9 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, set_bit(chip->dev_num, dev_mask); - scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num); - chip->vendor.miscdev.name = devname; + scnprintf(chip->devname, sizeof(chip->devname), "%s%d", "tpm", + chip->dev_num); + chip->vendor.miscdev.name = chip->devname; chip->vendor.miscdev.parent = dev; chip->dev = get_device(dev); @@ -1558,7 +1531,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, goto put_device; } - chip->bios_dir = tpm_bios_log_setup(devname); + chip->bios_dir = tpm_bios_log_setup(chip->devname); /* Make chip available */ spin_lock(&driver_lock); @@ -1571,7 +1544,6 @@ put_device: put_device(chip->dev); out_free: kfree(chip); - kfree(devname); return NULL; } EXPORT_SYMBOL_GPL(tpm_register_hardware); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index a7bfc176ed43..f32847872193 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -59,8 +59,6 @@ extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr, char *); extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr, char *); -extern ssize_t tpm_show_caps_1_2(struct device *, struct device_attribute *attr, - char *); extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr, const char *, size_t); extern ssize_t tpm_show_enabled(struct device *, struct device_attribute *attr, @@ -122,6 +120,7 @@ struct tpm_chip { struct device *dev; /* Device stuff */ int dev_num; /* /dev/tpm# */ + char devname[7]; unsigned long is_open; /* only one allowed */ int time_expired; diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 99d6820c611d..c9a528d25d22 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -202,7 +202,7 @@ static int __init init_atmel(void) have_region = (atmel_request_region - (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; + (base, region_size, "tpm_atmel0") == NULL) ? 0 : 1; pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0); if (IS_ERR(pdev)) { diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c index 84ddc557b8f8..59f7cb28260b 100644 --- a/drivers/char/tpm/tpm_eventlog.c +++ b/drivers/char/tpm/tpm_eventlog.c @@ -406,7 +406,6 @@ out_tpm: out: return NULL; } -EXPORT_SYMBOL_GPL(tpm_bios_log_setup); void tpm_bios_log_teardown(struct dentry **lst) { @@ -415,5 +414,3 @@ void tpm_bios_log_teardown(struct dentry **lst) for (i = 0; i < 3; i++) securityfs_remove(lst[i]); } -EXPORT_SYMBOL_GPL(tpm_bios_log_teardown); -MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c new file mode 100644 index 000000000000..c3cd7fe481a1 --- /dev/null +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -0,0 +1,284 @@ +/* + * ATMEL I2C TPM AT97SC3204T + * + * Copyright (C) 2012 V Lab Technologies + * Teddy Reed <teddy@prosauce.org> + * Copyright (C) 2013, Obsidian Research Corp. + * Jason Gunthorpe <jgunthorpe@obsidianresearch.com> + * Device driver for ATMEL I2C TPMs. + * + * Teddy Reed determined the basic I2C command flow, unlike other I2C TPM + * devices the raw TCG formatted TPM command data is written via I2C and then + * raw TCG formatted TPM command data is returned via I2C. + * + * TGC status/locality/etc functions seen in the LPC implementation do not + * seem to be present. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/>. + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include "tpm.h" + +#define I2C_DRIVER_NAME "tpm_i2c_atmel" + +#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ +#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ + +#define ATMEL_STS_OK 1 + +struct priv_data { + size_t len; + /* This is the amount we read on the first try. 25 was chosen to fit a + * fair number of read responses in the buffer so a 2nd retry can be + * avoided in small message cases. */ + u8 buffer[sizeof(struct tpm_output_header) + 25]; +}; + +static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->dev); + s32 status; + + priv->len = 0; + + if (len <= 2) + return -EIO; + + status = i2c_master_send(client, buf, len); + + dev_dbg(chip->dev, + "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, + (int)min_t(size_t, 64, len), buf, len, status); + return status; +} + +static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->dev); + struct tpm_output_header *hdr = + (struct tpm_output_header *)priv->buffer; + u32 expected_len; + int rc; + + if (priv->len == 0) + return -EIO; + + /* Get the message size from the message header, if we didn't get the + * whole message in read_status then we need to re-read the + * message. */ + expected_len = be32_to_cpu(hdr->length); + if (expected_len > count) + return -ENOMEM; + + if (priv->len >= expected_len) { + dev_dbg(chip->dev, + "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); + memcpy(buf, priv->buffer, expected_len); + return expected_len; + } + + rc = i2c_master_recv(client, buf, expected_len); + dev_dbg(chip->dev, + "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, + (int)min_t(size_t, 64, expected_len), buf, count, + expected_len); + return rc; +} + +static void i2c_atmel_cancel(struct tpm_chip *chip) +{ + dev_err(chip->dev, "TPM operation cancellation was requested, but is not supported"); +} + +static u8 i2c_atmel_read_status(struct tpm_chip *chip) +{ + struct priv_data *priv = chip->vendor.priv; + struct i2c_client *client = to_i2c_client(chip->dev); + int rc; + + /* The TPM fails the I2C read until it is ready, so we do the entire + * transfer here and buffer it locally. This way the common code can + * properly handle the timeouts. */ + priv->len = 0; + memset(priv->buffer, 0, sizeof(priv->buffer)); + + + /* Once the TPM has completed the command the command remains readable + * until another command is issued. */ + rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); + dev_dbg(chip->dev, + "%s: sts=%d", __func__, rc); + if (rc <= 0) + return 0; + + priv->len = rc; + + return ATMEL_STS_OK; +} + +static const struct file_operations i2c_atmel_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); +static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); +static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); +static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); +static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); +static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); +static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); + +static struct attribute *i2c_atmel_attrs[] = { + &dev_attr_pubek.attr, + &dev_attr_pcrs.attr, + &dev_attr_enabled.attr, + &dev_attr_active.attr, + &dev_attr_owned.attr, + &dev_attr_temp_deactivated.attr, + &dev_attr_caps.attr, + &dev_attr_cancel.attr, + &dev_attr_durations.attr, + &dev_attr_timeouts.attr, + NULL, +}; + +static struct attribute_group i2c_atmel_attr_grp = { + .attrs = i2c_atmel_attrs +}; + +static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status) +{ + return 0; +} + +static const struct tpm_vendor_specific i2c_atmel = { + .status = i2c_atmel_read_status, + .recv = i2c_atmel_recv, + .send = i2c_atmel_send, + .cancel = i2c_atmel_cancel, + .req_complete_mask = ATMEL_STS_OK, + .req_complete_val = ATMEL_STS_OK, + .req_canceled = i2c_atmel_req_canceled, + .attr_group = &i2c_atmel_attr_grp, + .miscdev.fops = &i2c_atmel_ops, +}; + +static int i2c_atmel_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc; + struct tpm_chip *chip; + struct device *dev = &client->dev; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + chip = tpm_register_hardware(dev, &i2c_atmel); + if (!chip) { + dev_err(dev, "%s() error in tpm_register_hardware\n", __func__); + return -ENODEV; + } + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.irq = 0; + + /* There is no known way to probe for this device, and all version + * information seems to be read via TPM commands. Thus we rely on the + * TPM startup process in the common code to detect the device. */ + if (tpm_get_timeouts(chip)) { + rc = -ENODEV; + goto out_err; + } + + if (tpm_do_selftest(chip)) { + rc = -ENODEV; + goto out_err; + } + + return 0; + +out_err: + tpm_dev_vendor_release(chip); + tpm_remove_hardware(chip->dev); + return rc; +} + +static int i2c_atmel_remove(struct i2c_client *client) +{ + struct device *dev = &(client->dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + + if (chip) + tpm_dev_vendor_release(chip); + tpm_remove_hardware(dev); + kfree(chip); + return 0; +} + +static const struct i2c_device_id i2c_atmel_id[] = { + {I2C_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, i2c_atmel_id); + +#ifdef CONFIG_OF +static const struct of_device_id i2c_atmel_of_match[] = { + {.compatible = "atmel,at97sc3204t"}, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_atmel_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume); + +static struct i2c_driver i2c_atmel_driver = { + .id_table = i2c_atmel_id, + .probe = i2c_atmel_probe, + .remove = i2c_atmel_remove, + .driver = { + .name = I2C_DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &i2c_atmel_pm_ops, + .of_match_table = of_match_ptr(i2c_atmel_of_match), + }, +}; + +module_i2c_driver(i2c_atmel_driver); + +MODULE_AUTHOR("Jason Gunthorpe <jgunthorpe@obsidianresearch.com>"); +MODULE_DESCRIPTION("Atmel TPM I2C Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index b8735de8ce95..fefd2aa5c81e 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -581,7 +581,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); @@ -685,7 +685,6 @@ out_vendor: chip->dev->release = NULL; chip->release = NULL; tpm_dev.client = NULL; - dev_set_drvdata(chip->dev, chip); out_err: return rc; } @@ -766,7 +765,6 @@ static int tpm_tis_i2c_remove(struct i2c_client *client) chip->dev->release = NULL; chip->release = NULL; tpm_dev.client = NULL; - dev_set_drvdata(chip->dev, chip); return 0; } diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c new file mode 100644 index 000000000000..6276fea01ff0 --- /dev/null +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -0,0 +1,710 @@ +/****************************************************************************** + * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501, + * based on the TCG TPM Interface Spec version 1.2. + * Specifications at www.trustedcomputinggroup.org + * + * Copyright (C) 2011, Nuvoton Technology Corporation. + * Dan Morav <dan.morav@nuvoton.com> + * Copyright (C) 2013, Obsidian Research Corp. + * Jason Gunthorpe <jgunthorpe@obsidianresearch.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see http://www.gnu.org/licenses/>. + * + * Nuvoton contact information: APC.Support@nuvoton.com + *****************************************************************************/ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/i2c.h> +#include "tpm.h" + +/* I2C interface offsets */ +#define TPM_STS 0x00 +#define TPM_BURST_COUNT 0x01 +#define TPM_DATA_FIFO_W 0x20 +#define TPM_DATA_FIFO_R 0x40 +#define TPM_VID_DID_RID 0x60 +/* TPM command header size */ +#define TPM_HEADER_SIZE 10 +#define TPM_RETRY 5 +/* + * I2C bus device maximum buffer size w/o counting I2C address or command + * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data + */ +#define TPM_I2C_MAX_BUF_SIZE 32 +#define TPM_I2C_RETRY_COUNT 32 +#define TPM_I2C_BUS_DELAY 1 /* msec */ +#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */ +#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */ + +#define I2C_DRIVER_NAME "tpm_i2c_nuvoton" + +struct priv_data { + unsigned int intrs; +}; + +static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size, + u8 *data) +{ + s32 status; + + status = i2c_smbus_read_i2c_block_data(client, offset, size, data); + dev_dbg(&client->dev, + "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, + offset, size, (int)size, data, status); + return status; +} + +static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, + u8 *data) +{ + s32 status; + + status = i2c_smbus_write_i2c_block_data(client, offset, size, data); + dev_dbg(&client->dev, + "%s(offset=%u size=%u data=%*ph) -> sts=%d\n", __func__, + offset, size, (int)size, data, status); + return status; +} + +#define TPM_STS_VALID 0x80 +#define TPM_STS_COMMAND_READY 0x40 +#define TPM_STS_GO 0x20 +#define TPM_STS_DATA_AVAIL 0x10 +#define TPM_STS_EXPECT 0x08 +#define TPM_STS_RESPONSE_RETRY 0x02 +#define TPM_STS_ERR_VAL 0x07 /* bit2...bit0 reads always 0 */ + +#define TPM_I2C_SHORT_TIMEOUT 750 /* ms */ +#define TPM_I2C_LONG_TIMEOUT 2000 /* 2 sec */ + +/* read TPM_STS register */ +static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) +{ + struct i2c_client *client = to_i2c_client(chip->dev); + s32 status; + u8 data; + + status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); + if (status <= 0) { + dev_err(chip->dev, "%s() error return %d\n", __func__, + status); + data = TPM_STS_ERR_VAL; + } + + return data; +} + +/* write byte to TPM_STS register */ +static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) +{ + s32 status; + int i; + + /* this causes the current command to be aborted */ + for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) { + status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data); + msleep(TPM_I2C_BUS_DELAY); + } + return status; +} + +/* write commandReady to TPM_STS register */ +static void i2c_nuvoton_ready(struct tpm_chip *chip) +{ + struct i2c_client *client = to_i2c_client(chip->dev); + s32 status; + + /* this causes the current command to be aborted */ + status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); + if (status < 0) + dev_err(chip->dev, + "%s() fail to write TPM_STS.commandReady\n", __func__); +} + +/* read burstCount field from TPM_STS register + * return -1 on fail to read */ +static int i2c_nuvoton_get_burstcount(struct i2c_client *client, + struct tpm_chip *chip) +{ + unsigned long stop = jiffies + chip->vendor.timeout_d; + s32 status; + int burst_count = -1; + u8 data; + + /* wait for burstcount to be non-zero */ + do { + /* in I2C burstCount is 1 byte */ + status = i2c_nuvoton_read_buf(client, TPM_BURST_COUNT, 1, + &data); + if (status > 0 && data > 0) { + burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data); + break; + } + msleep(TPM_I2C_BUS_DELAY); + } while (time_before(jiffies, stop)); + + return burst_count; +} + +/* + * WPCT301/NPCT501 SINT# supports only dataAvail + * any call to this function which is not waiting for dataAvail will + * set queue to NULL to avoid waiting for interrupt + */ +static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value) +{ + u8 status = i2c_nuvoton_read_status(chip); + return (status != TPM_STS_ERR_VAL) && ((status & mask) == value); +} + +static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, + u32 timeout, wait_queue_head_t *queue) +{ + if (chip->vendor.irq && queue) { + s32 rc; + DEFINE_WAIT(wait); + struct priv_data *priv = chip->vendor.priv; + unsigned int cur_intrs = priv->intrs; + + enable_irq(chip->vendor.irq); + rc = wait_event_interruptible_timeout(*queue, + cur_intrs != priv->intrs, + timeout); + if (rc > 0) + return 0; + /* At this point we know that the SINT pin is asserted, so we + * do not need to do i2c_nuvoton_check_status */ + } else { + unsigned long ten_msec, stop; + bool status_valid; + + /* check current status */ + status_valid = i2c_nuvoton_check_status(chip, mask, value); + if (status_valid) + return 0; + + /* use polling to wait for the event */ + ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG); + stop = jiffies + timeout; + do { + if (time_before(jiffies, ten_msec)) + msleep(TPM_I2C_RETRY_DELAY_SHORT); + else + msleep(TPM_I2C_RETRY_DELAY_LONG); + status_valid = i2c_nuvoton_check_status(chip, mask, + value); + if (status_valid) + return 0; + } while (time_before(jiffies, stop)); + } + dev_err(chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask, + value); + return -ETIMEDOUT; +} + +/* wait for dataAvail field to be set in the TPM_STS register */ +static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout, + wait_queue_head_t *queue) +{ + return i2c_nuvoton_wait_for_stat(chip, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + TPM_STS_DATA_AVAIL | TPM_STS_VALID, + timeout, queue); +} + +/* Read @count bytes into @buf from TPM_RD_FIFO register */ +static int i2c_nuvoton_recv_data(struct i2c_client *client, + struct tpm_chip *chip, u8 *buf, size_t count) +{ + s32 rc; + int burst_count, bytes2read, size = 0; + + while (size < count && + i2c_nuvoton_wait_for_data_avail(chip, + chip->vendor.timeout_c, + &chip->vendor.read_queue) == 0) { + burst_count = i2c_nuvoton_get_burstcount(client, chip); + if (burst_count < 0) { + dev_err(chip->dev, + "%s() fail to read burstCount=%d\n", __func__, + burst_count); + return -EIO; + } + bytes2read = min_t(size_t, burst_count, count - size); + rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, + bytes2read, &buf[size]); + if (rc < 0) { + dev_err(chip->dev, + "%s() fail on i2c_nuvoton_read_buf()=%d\n", + __func__, rc); + return -EIO; + } + dev_dbg(chip->dev, "%s(%d):", __func__, bytes2read); + size += bytes2read; + } + + return size; +} + +/* Read TPM command results */ +static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct device *dev = chip->dev; + struct i2c_client *client = to_i2c_client(dev); + s32 rc; + int expected, status, burst_count, retries, size = 0; + + if (count < TPM_HEADER_SIZE) { + i2c_nuvoton_ready(chip); /* return to idle */ + dev_err(dev, "%s() count < header size\n", __func__); + return -EIO; + } + for (retries = 0; retries < TPM_RETRY; retries++) { + if (retries > 0) { + /* if this is not the first trial, set responseRetry */ + i2c_nuvoton_write_status(client, + TPM_STS_RESPONSE_RETRY); + } + /* + * read first available (> 10 bytes), including: + * tag, paramsize, and result + */ + status = i2c_nuvoton_wait_for_data_avail( + chip, chip->vendor.timeout_c, &chip->vendor.read_queue); + if (status != 0) { + dev_err(dev, "%s() timeout on dataAvail\n", __func__); + size = -ETIMEDOUT; + continue; + } + burst_count = i2c_nuvoton_get_burstcount(client, chip); + if (burst_count < 0) { + dev_err(dev, "%s() fail to get burstCount\n", __func__); + size = -EIO; + continue; + } + size = i2c_nuvoton_recv_data(client, chip, buf, + burst_count); + if (size < TPM_HEADER_SIZE) { + dev_err(dev, "%s() fail to read header\n", __func__); + size = -EIO; + continue; + } + /* + * convert number of expected bytes field from big endian 32 bit + * to machine native + */ + expected = be32_to_cpu(*(__be32 *) (buf + 2)); + if (expected > count) { + dev_err(dev, "%s() expected > count\n", __func__); + size = -EIO; + continue; + } + rc = i2c_nuvoton_recv_data(client, chip, &buf[size], + expected - size); + size += rc; + if (rc < 0 || size < expected) { + dev_err(dev, "%s() fail to read remainder of result\n", + __func__); + size = -EIO; + continue; + } + if (i2c_nuvoton_wait_for_stat( + chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL, + TPM_STS_VALID, chip->vendor.timeout_c, + NULL)) { + dev_err(dev, "%s() error left over data\n", __func__); + size = -ETIMEDOUT; + continue; + } + break; + } + i2c_nuvoton_ready(chip); + dev_dbg(chip->dev, "%s() -> %d\n", __func__, size); + return size; +} + +/* + * Send TPM command. + * + * If interrupts are used (signaled by an irq set in the vendor structure) + * tpm.c can skip polling for the data to be available as the interrupt is + * waited for here + */ +static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + struct device *dev = chip->dev; + struct i2c_client *client = to_i2c_client(dev); + u32 ordinal; + size_t count = 0; + int burst_count, bytes2write, retries, rc = -EIO; + + for (retries = 0; retries < TPM_RETRY; retries++) { + i2c_nuvoton_ready(chip); + if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY, + TPM_STS_COMMAND_READY, + chip->vendor.timeout_b, NULL)) { + dev_err(dev, "%s() timeout on commandReady\n", + __func__); + rc = -EIO; + continue; + } + rc = 0; + while (count < len - 1) { + burst_count = i2c_nuvoton_get_burstcount(client, + chip); + if (burst_count < 0) { + dev_err(dev, "%s() fail get burstCount\n", + __func__); + rc = -EIO; + break; + } + bytes2write = min_t(size_t, burst_count, + len - 1 - count); + rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, + bytes2write, &buf[count]); + if (rc < 0) { + dev_err(dev, "%s() fail i2cWriteBuf\n", + __func__); + break; + } + dev_dbg(dev, "%s(%d):", __func__, bytes2write); + count += bytes2write; + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_VALID | + TPM_STS_EXPECT, + TPM_STS_VALID | + TPM_STS_EXPECT, + chip->vendor.timeout_c, + NULL); + if (rc < 0) { + dev_err(dev, "%s() timeout on Expect\n", + __func__); + rc = -ETIMEDOUT; + break; + } + } + if (rc < 0) + continue; + + /* write last byte */ + rc = i2c_nuvoton_write_buf(client, TPM_DATA_FIFO_W, 1, + &buf[count]); + if (rc < 0) { + dev_err(dev, "%s() fail to write last byte\n", + __func__); + rc = -EIO; + continue; + } + dev_dbg(dev, "%s(last): %02x", __func__, buf[count]); + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_VALID | TPM_STS_EXPECT, + TPM_STS_VALID, + chip->vendor.timeout_c, NULL); + if (rc) { + dev_err(dev, "%s() timeout on Expect to clear\n", + __func__); + rc = -ETIMEDOUT; + continue; + } + break; + } + if (rc < 0) { + /* retries == TPM_RETRY */ + i2c_nuvoton_ready(chip); + return rc; + } + /* execute the TPM command */ + rc = i2c_nuvoton_write_status(client, TPM_STS_GO); + if (rc < 0) { + dev_err(dev, "%s() fail to write Go\n", __func__); + i2c_nuvoton_ready(chip); + return rc; + } + ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + rc = i2c_nuvoton_wait_for_data_avail(chip, + tpm_calc_ordinal_duration(chip, + ordinal), + &chip->vendor.read_queue); + if (rc) { + dev_err(dev, "%s() timeout command duration\n", __func__); + i2c_nuvoton_ready(chip); + return rc; + } + + dev_dbg(dev, "%s() -> %zd\n", __func__, len); + return len; +} + +static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status) +{ + return (status == TPM_STS_COMMAND_READY); +} + +static const struct file_operations i2c_nuvoton_ops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .open = tpm_open, + .read = tpm_read, + .write = tpm_write, + .release = tpm_release, +}; + +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); +static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); +static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); +static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); +static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); +static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); +static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); + +static struct attribute *i2c_nuvoton_attrs[] = { + &dev_attr_pubek.attr, + &dev_attr_pcrs.attr, + &dev_attr_enabled.attr, + &dev_attr_active.attr, + &dev_attr_owned.attr, + &dev_attr_temp_deactivated.attr, + &dev_attr_caps.attr, + &dev_attr_cancel.attr, + &dev_attr_durations.attr, + &dev_attr_timeouts.attr, + NULL, +}; + +static struct attribute_group i2c_nuvoton_attr_grp = { + .attrs = i2c_nuvoton_attrs +}; + +static const struct tpm_vendor_specific tpm_i2c = { + .status = i2c_nuvoton_read_status, + .recv = i2c_nuvoton_recv, + .send = i2c_nuvoton_send, + .cancel = i2c_nuvoton_ready, + .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, + .req_canceled = i2c_nuvoton_req_canceled, + .attr_group = &i2c_nuvoton_attr_grp, + .miscdev.fops = &i2c_nuvoton_ops, +}; + +/* The only purpose for the handler is to signal to any waiting threads that + * the interrupt is currently being asserted. The driver does not do any + * processing triggered by interrupts, and the chip provides no way to mask at + * the source (plus that would be slow over I2C). Run the IRQ as a one-shot, + * this means it cannot be shared. */ +static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id) +{ + struct tpm_chip *chip = dev_id; + struct priv_data *priv = chip->vendor.priv; + + priv->intrs++; + wake_up(&chip->vendor.read_queue); + disable_irq_nosync(chip->vendor.irq); + return IRQ_HANDLED; +} + +static int get_vid(struct i2c_client *client, u32 *res) +{ + static const u8 vid_did_rid_value[] = { 0x50, 0x10, 0xfe }; + u32 temp; + s32 rc; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + rc = i2c_nuvoton_read_buf(client, TPM_VID_DID_RID, 4, (u8 *)&temp); + if (rc < 0) + return rc; + + /* check WPCT301 values - ignore RID */ + if (memcmp(&temp, vid_did_rid_value, sizeof(vid_did_rid_value))) { + /* + * f/w rev 2.81 has an issue where the VID_DID_RID is not + * reporting the right value. so give it another chance at + * offset 0x20 (FIFO_W). + */ + rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_W, 4, + (u8 *) (&temp)); + if (rc < 0) + return rc; + + /* check WPCT301 values - ignore RID */ + if (memcmp(&temp, vid_did_rid_value, + sizeof(vid_did_rid_value))) + return -ENODEV; + } + + *res = temp; + return 0; +} + +static int i2c_nuvoton_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int rc; + struct tpm_chip *chip; + struct device *dev = &client->dev; + u32 vid = 0; + + rc = get_vid(client, &vid); + if (rc) + return rc; + + dev_info(dev, "VID: %04X DID: %02X RID: %02X\n", (u16) vid, + (u8) (vid >> 16), (u8) (vid >> 24)); + + chip = tpm_register_hardware(dev, &tpm_i2c); + if (!chip) { + dev_err(dev, "%s() error in tpm_register_hardware\n", __func__); + return -ENODEV; + } + + chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), + GFP_KERNEL); + init_waitqueue_head(&chip->vendor.read_queue); + init_waitqueue_head(&chip->vendor.int_queue); + + /* Default timeouts */ + chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT); + chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); + + /* + * I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to: + * TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT + * The IRQ should be set in the i2c_board_info (which is done + * automatically in of_i2c_register_devices, for device tree users */ + chip->vendor.irq = client->irq; + + if (chip->vendor.irq) { + dev_dbg(dev, "%s() chip-vendor.irq\n", __func__); + rc = devm_request_irq(dev, chip->vendor.irq, + i2c_nuvoton_int_handler, + IRQF_TRIGGER_LOW, + chip->vendor.miscdev.name, + chip); + if (rc) { + dev_err(dev, "%s() Unable to request irq: %d for use\n", + __func__, chip->vendor.irq); + chip->vendor.irq = 0; + } else { + /* Clear any pending interrupt */ + i2c_nuvoton_ready(chip); + /* - wait for TPM_STS==0xA0 (stsValid, commandReady) */ + rc = i2c_nuvoton_wait_for_stat(chip, + TPM_STS_COMMAND_READY, + TPM_STS_COMMAND_READY, + chip->vendor.timeout_b, + NULL); + if (rc == 0) { + /* + * TIS is in ready state + * write dummy byte to enter reception state + * TPM_DATA_FIFO_W <- rc (0) + */ + rc = i2c_nuvoton_write_buf(client, + TPM_DATA_FIFO_W, + 1, (u8 *) (&rc)); + if (rc < 0) + goto out_err; + /* TPM_STS <- 0x40 (commandReady) */ + i2c_nuvoton_ready(chip); + } else { + /* + * timeout_b reached - command was + * aborted. TIS should now be in idle state - + * only TPM_STS_VALID should be set + */ + if (i2c_nuvoton_read_status(chip) != + TPM_STS_VALID) { + rc = -EIO; + goto out_err; + } + } + } + } + + if (tpm_get_timeouts(chip)) { + rc = -ENODEV; + goto out_err; + } + + if (tpm_do_selftest(chip)) { + rc = -ENODEV; + goto out_err; + } + + return 0; + +out_err: + tpm_dev_vendor_release(chip); + tpm_remove_hardware(chip->dev); + return rc; +} + +static int i2c_nuvoton_remove(struct i2c_client *client) +{ + struct device *dev = &(client->dev); + struct tpm_chip *chip = dev_get_drvdata(dev); + + if (chip) + tpm_dev_vendor_release(chip); + tpm_remove_hardware(dev); + kfree(chip); + return 0; +} + + +static const struct i2c_device_id i2c_nuvoton_id[] = { + {I2C_DRIVER_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id); + +#ifdef CONFIG_OF +static const struct of_device_id i2c_nuvoton_of_match[] = { + {.compatible = "nuvoton,npct501"}, + {.compatible = "winbond,wpct301"}, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume); + +static struct i2c_driver i2c_nuvoton_driver = { + .id_table = i2c_nuvoton_id, + .probe = i2c_nuvoton_probe, + .remove = i2c_nuvoton_remove, + .driver = { + .name = I2C_DRIVER_NAME, + .owner = THIS_MODULE, + .pm = &i2c_nuvoton_pm_ops, + .of_match_table = of_match_ptr(i2c_nuvoton_of_match), + }, +}; + +module_i2c_driver(i2c_nuvoton_driver); + +MODULE_AUTHOR("Dan Morav (dan.morav@nuvoton.com)"); +MODULE_DESCRIPTION("Nuvoton TPM I2C Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c index 5bb8e2ddd3b3..a0d6ceb5d005 100644 --- a/drivers/char/tpm/tpm_i2c_stm_st33.c +++ b/drivers/char/tpm/tpm_i2c_stm_st33.c @@ -584,7 +584,7 @@ static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static struct attribute *stm_tpm_attrs[] = { @@ -746,8 +746,6 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) tpm_get_timeouts(chip); - i2c_set_clientdata(client, chip); - dev_info(chip->dev, "TPM I2C Initialized\n"); return 0; _irq_set: @@ -807,24 +805,18 @@ static int tpm_st33_i2c_remove(struct i2c_client *client) #ifdef CONFIG_PM_SLEEP /* * tpm_st33_i2c_pm_suspend suspend the TPM device - * Added: Work around when suspend and no tpm application is running, suspend - * may fail because chip->data_buffer is not set (only set in tpm_open in Linux - * TPM core) * @param: client, the i2c_client drescription (TPM I2C description). * @param: mesg, the power management message. * @return: 0 in case of success. */ static int tpm_st33_i2c_pm_suspend(struct device *dev) { - struct tpm_chip *chip = dev_get_drvdata(dev); struct st33zp24_platform_data *pin_infos = dev->platform_data; int ret = 0; if (power_mgt) { gpio_set_value(pin_infos->io_lpcpd, 0); } else { - if (chip->data_buffer == NULL) - chip->data_buffer = pin_infos->tpm_i2c_buffer[0]; ret = tpm_pm_suspend(dev); } return ret; @@ -849,8 +841,6 @@ static int tpm_st33_i2c_pm_resume(struct device *dev) TPM_STS_VALID) == TPM_STS_VALID, chip->vendor.timeout_b); } else { - if (chip->data_buffer == NULL) - chip->data_buffer = pin_infos->tpm_i2c_buffer[0]; ret = tpm_pm_resume(dev); if (!ret) tpm_do_selftest(chip); diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 56b07c35a13e..2783a42aa732 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -98,7 +98,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) if (count < len) { dev_err(ibmvtpm->dev, - "Invalid size in recv: count=%ld, crq_size=%d\n", + "Invalid size in recv: count=%zd, crq_size=%d\n", count, len); return -EIO; } @@ -136,7 +136,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) if (count > ibmvtpm->rtce_size) { dev_err(ibmvtpm->dev, - "Invalid size in send: count=%ld, rtce_size=%d\n", + "Invalid size in send: count=%zd, rtce_size=%d\n", count, ibmvtpm->rtce_size); return -EIO; } @@ -419,7 +419,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index 2168d15bc728..8e562dc65601 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -452,12 +452,8 @@ int tpm_add_ppi(struct kobject *parent) { return sysfs_create_group(parent, &ppi_attr_grp); } -EXPORT_SYMBOL_GPL(tpm_add_ppi); void tpm_remove_ppi(struct kobject *parent) { sysfs_remove_group(parent, &ppi_attr_grp); } -EXPORT_SYMBOL_GPL(tpm_remove_ppi); - -MODULE_LICENSE("GPL"); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 5796d0157ce0..1b74459c0723 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -448,7 +448,7 @@ static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); -static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index 94c280d36e8b..c8ff4df81779 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -351,8 +351,6 @@ static int tpmfront_probe(struct xenbus_device *dev, tpm_get_timeouts(priv->chip); - dev_set_drvdata(&dev->dev, priv->chip); - return rv; } diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 1a35ea53106b..bd2bca395792 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -489,6 +489,11 @@ void drm_sysfs_hotplug_event(struct drm_device *dev) } EXPORT_SYMBOL(drm_sysfs_hotplug_event); +static void drm_sysfs_release(struct device *dev) +{ + kfree(dev); +} + /** * drm_sysfs_device_add - adds a class device to sysfs for a character driver * @dev: DRM device to be added @@ -501,6 +506,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event); int drm_sysfs_device_add(struct drm_minor *minor) { char *minor_str; + int r; if (minor->type == DRM_MINOR_CONTROL) minor_str = "controlD%d"; @@ -509,14 +515,34 @@ int drm_sysfs_device_add(struct drm_minor *minor) else minor_str = "card%d"; - minor->kdev = device_create(drm_class, minor->dev->dev, - MKDEV(DRM_MAJOR, minor->index), - minor, minor_str, minor->index); - if (IS_ERR(minor->kdev)) { - DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev)); - return PTR_ERR(minor->kdev); + minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL); + if (!minor->dev) { + r = -ENOMEM; + goto error; } + + device_initialize(minor->kdev); + minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index); + minor->kdev->class = drm_class; + minor->kdev->type = &drm_sysfs_device_minor; + minor->kdev->parent = minor->dev->dev; + minor->kdev->release = drm_sysfs_release; + dev_set_drvdata(minor->kdev, minor); + + r = dev_set_name(minor->kdev, minor_str, minor->index); + if (r < 0) + goto error; + + r = device_add(minor->kdev); + if (r < 0) + goto error; + return 0; + +error: + DRM_ERROR("device create failed %d\n", r); + put_device(minor->kdev); + return r; } /** @@ -529,7 +555,7 @@ int drm_sysfs_device_add(struct drm_minor *minor) void drm_sysfs_device_remove(struct drm_minor *minor) { if (minor->kdev) - device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index)); + device_unregister(minor->kdev); minor->kdev = NULL; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3271fd4b1724..7bccedca487a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -383,6 +383,8 @@ out: g2d_userptr->npages, g2d_userptr->vma); + exynos_gem_put_vma(g2d_userptr->vma); + if (!g2d_userptr->out_of_list) list_del_init(&g2d_userptr->list); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8600c315b4c4..ccdbecca070d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1816,6 +1816,7 @@ struct drm_i915_file_private { #define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) +#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */ #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 6dd622d733b9..e4fba39631a5 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -790,7 +790,12 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) /* Default to using SSC */ dev_priv->vbt.lvds_use_ssc = 1; - dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); + /* + * Core/SandyBridge/IvyBridge use alternative (120MHz) reference + * clock for LVDS. + */ + dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, + !HAS_PCH_SPLIT(dev)); DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); for (port = PORT_A; port < I915_MAX_PORTS; port++) { diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 1591576a6101..330077bcd0bd 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1406,6 +1406,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder, default: break; } + + if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && + pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { + /* + * This is a big fat ugly hack. + * + * Some machines in UEFI boot mode provide us a VBT that has 18 + * bpp and 1.62 GHz link bandwidth for eDP, which for reasons + * unknown we fail to light up. Yet the same BIOS boots up with + * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as + * max, not what it tells us to use. + * + * Note: This will still be broken if the eDP panel is not lit + * up by the BIOS, and thus we can't get the mode at module + * load. + */ + DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", + pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); + dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; + } } static void intel_ddi_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3cddd508d110..7ec8b488bb1d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6518,6 +6518,9 @@ static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) void hsw_enable_package_c8(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + mutex_lock(&dev_priv->pc8.lock); __hsw_enable_package_c8(dev_priv); mutex_unlock(&dev_priv->pc8.lock); @@ -6525,6 +6528,9 @@ void hsw_enable_package_c8(struct drm_i915_private *dev_priv) void hsw_disable_package_c8(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + mutex_lock(&dev_priv->pc8.lock); __hsw_disable_package_c8(dev_priv); mutex_unlock(&dev_priv->pc8.lock); @@ -6562,6 +6568,9 @@ static void hsw_update_package_c8(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; bool allow; + if (!HAS_PC8(dev_priv->dev)) + return; + if (!i915_enable_pc8) return; @@ -6585,18 +6594,28 @@ done: static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + + mutex_lock(&dev_priv->pc8.lock); if (!dev_priv->pc8.gpu_idle) { dev_priv->pc8.gpu_idle = true; - hsw_enable_package_c8(dev_priv); + __hsw_enable_package_c8(dev_priv); } + mutex_unlock(&dev_priv->pc8.lock); } static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv) { + if (!HAS_PC8(dev_priv->dev)) + return; + + mutex_lock(&dev_priv->pc8.lock); if (dev_priv->pc8.gpu_idle) { dev_priv->pc8.gpu_idle = false; - hsw_disable_package_c8(dev_priv); + __hsw_disable_package_c8(dev_priv); } + mutex_unlock(&dev_priv->pc8.lock); } #define for_each_power_domain(domain, mask) \ @@ -7184,7 +7203,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ + POSTING_READ(CURCNTR(pipe)); I915_WRITE(CURBASE(pipe), base); + POSTING_READ(CURBASE(pipe)); } static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) @@ -7213,7 +7234,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) intel_crtc->cursor_visible = visible; } /* and commit changes on next vblank */ + POSTING_READ(CURCNTR_IVB(pipe)); I915_WRITE(CURBASE_IVB(pipe), base); + POSTING_READ(CURBASE_IVB(pipe)); } /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ @@ -9248,8 +9271,7 @@ check_crtc_state(struct drm_device *dev) enum pipe pipe; if (encoder->base.crtc != &crtc->base) continue; - if (encoder->get_config && - encoder->get_hw_state(encoder, &pipe)) + if (encoder->get_hw_state(encoder, &pipe)) encoder->get_config(encoder, &pipe_config); } @@ -10909,8 +10931,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (encoder->get_hw_state(encoder, &pipe)) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); encoder->base.crtc = &crtc->base; - if (encoder->get_config) - encoder->get_config(encoder, &crtc->config); + encoder->get_config(encoder, &crtc->config); } else { encoder->base.crtc = NULL; } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index eb8139da9763..0b2e842fef01 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1774,7 +1774,7 @@ static void intel_disable_dp(struct intel_encoder *encoder) * ensure that we have vdd while we switch off the panel. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); ironlake_edp_panel_off(intel_dp); /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0a07d7c9cafc..caf2ee4e5441 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1625,7 +1625,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) &to_intel_crtc(enabled)->config.adjusted_mode; int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->htotal; - int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; + int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; int pixel_size = enabled->fb->bits_per_pixel / 8; unsigned long line_time_us; int entries; @@ -3888,7 +3888,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev)) I915_WRITE(GEN6_RC6_THRESHOLD, 125000); else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 18c406246a2d..22cf0f4ba248 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -902,6 +902,13 @@ intel_tv_mode_valid(struct drm_connector *connector, } +static void +intel_tv_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; +} + static bool intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) @@ -1621,6 +1628,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_ENCODER_TVDAC); intel_encoder->compute_config = intel_tv_compute_config; + intel_encoder->get_config = intel_tv_get_config; intel_encoder->mode_set = intel_tv_mode_set; intel_encoder->enable = intel_enable_tv; intel_encoder->disable = intel_disable_tv; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f9883ceff946..0b02078a0b84 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -217,6 +217,19 @@ static void gen6_force_wake_work(struct work_struct *work) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } +static void intel_uncore_forcewake_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_VALLEYVIEW(dev)) { + vlv_force_wake_reset(dev_priv); + } else if (INTEL_INFO(dev)->gen >= 6) { + __gen6_gt_force_wake_reset(dev_priv); + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + __gen6_gt_force_wake_mt_reset(dev_priv); + } +} + void intel_uncore_early_sanitize(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -234,19 +247,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev) dev_priv->ellc_size = 128; DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); } -} -static void intel_uncore_forcewake_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_VALLEYVIEW(dev)) { - vlv_force_wake_reset(dev_priv); - } else if (INTEL_INFO(dev)->gen >= 6) { - __gen6_gt_force_wake_reset(dev_priv); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - __gen6_gt_force_wake_mt_reset(dev_priv); - } + intel_uncore_forcewake_reset(dev); } void intel_uncore_sanitize(struct drm_device *dev) diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index deaf98cdca3a..0652ee0a2098 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -56,8 +56,10 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, return -EINVAL; } args.ucRegIndex = buf[0]; - if (num > 1) - memcpy(&out, &buf[1], num - 1); + if (num > 1) { + num--; + memcpy(&out, &buf[1], num); + } args.lpI2CDataOut = cpu_to_le16(out); } else { if (num > ATOM_MAX_HW_I2C_READ) { diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ae92aa041c6a..b43a3a3c9067 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev) * cik_mm_rdoorbell - read a doorbell dword * * @rdev: radeon_device pointer - * @offset: byte offset into the aperture + * @index: doorbell index * * Returns the value in the doorbell aperture at the - * requested offset (CIK). + * requested doorbell index (CIK). */ -u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset) +u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index) { - if (offset < rdev->doorbell.size) { - return readl(((void __iomem *)rdev->doorbell.ptr) + offset); + if (index < rdev->doorbell.num_doorbells) { + return readl(rdev->doorbell.ptr + index); } else { - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset); + DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); return 0; } } @@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset) * cik_mm_wdoorbell - write a doorbell dword * * @rdev: radeon_device pointer - * @offset: byte offset into the aperture + * @index: doorbell index * @v: value to write * * Writes @v to the doorbell aperture at the - * requested offset (CIK). + * requested doorbell index (CIK). */ -void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v) +void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v) { - if (offset < rdev->doorbell.size) { - writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset); + if (index < rdev->doorbell.num_doorbells) { + writel(v, rdev->doorbell.ptr + index); } else { - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset); + DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); } } @@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if (num_pipe_configs == 4) { @@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if (num_pipe_configs == 2) { @@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else @@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, 0); } -void cik_semaphore_ring_emit(struct radeon_device *rdev, +bool cik_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) { +/* TODO: figure out why semaphore cause lockups */ +#if 0 uint64_t addr = semaphore->gpu_addr; unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); radeon_ring_write(ring, addr & 0xffffffff); radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); + + return true; +#else + return false; +#endif } /** @@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_bytes = size_in_bytes; @@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) { rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr); - WDOORBELL32(ring->doorbell_offset, ring->wptr); + WDOORBELL32(ring->doorbell_index, ring->wptr); } /** @@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) return r; } - /* doorbell offset */ - rdev->ring[idx].doorbell_offset = - (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0; - /* init the mqd struct */ memset(buf, 0, sizeof(struct bonaire_mqd)); @@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) RREG32(CP_HQD_PQ_DOORBELL_CONTROL); mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK; mqd->queue_state.cp_hqd_pq_doorbell_control |= - DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4); + DOORBELL_OFFSET(rdev->ring[idx].doorbell_index); mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN; mqd->queue_state.cp_hqd_pq_doorbell_control &= ~(DOORBELL_SOURCE | DOORBELL_HIT); @@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev) ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); - r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); + r = radeon_doorbell_get(rdev, &ring->doorbell_index); if (r) return r; ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); - r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); + r = radeon_doorbell_get(rdev, &ring->doorbell_index); if (r) return r; diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 9c9529de20ee..0300727a4f70 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev, * Add a DMA semaphore packet to the ring wait on or signal * other rings (CIK). */ -void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, +bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); radeon_ring_write(ring, addr & 0xfffffff8); radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); + + return true; } /** @@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_bytes = size_in_bytes; diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 91bb470de0a3..920e1e4a52c5 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev) static int cypress_pcie_performance_request(struct radeon_device *rdev, u8 perf_req, bool advertise) { +#if defined(CONFIG_ACPI) struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); +#endif u32 tmp; udelay(10); diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 6a0656d00ed0..a37b54436382 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_dw = size_in_dw; diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index f26339028154..cdc003085a76 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -3445,9 +3445,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev, static int ni_pcie_performance_request(struct radeon_device *rdev, u8 perf_req, bool advertise) { +#if defined(CONFIG_ACPI) struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); -#if defined(CONFIG_ACPI) if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) || (perf_req == PCIE_PERF_REQ_PECI_GEN2)) { if (eg_pi->pcie_performance_request_registered == false) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 784983d78158..10abc4d5a6cc 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, RADEON_SW_INT_FIRE); } -void r100_semaphore_ring_emit(struct radeon_device *rdev, +bool r100_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) { /* Unused on older asics, since we don't have semaphores or multiple rings */ BUG(); + return false; } int r100_copy_blit(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 4e609e8a8d2b..9ad06732a78b 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev, } } -void r600_semaphore_ring_emit(struct radeon_device *rdev, +bool r600_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); radeon_ring_write(ring, addr & 0xffffffff); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); + + return true; } /** @@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 3b317456512a..7844d15c139f 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev, * Add a DMA semaphore packet to the ring wait on or signal * other rings (r6xx-SI). */ -void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, +bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); radeon_ring_write(ring, addr & 0xfffffffc); radeon_ring_write(ring, upper_32_bits(addr) & 0xff); + + return true; } /** @@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_dw = size_in_dw; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b9ee99258602..ecf2a3960c07 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i void radeon_fence_process(struct radeon_device *rdev, int ring); bool radeon_fence_signaled(struct radeon_fence *fence); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); +int radeon_fence_wait_locked(struct radeon_fence *fence); int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); int radeon_fence_wait_any(struct radeon_device *rdev, @@ -548,17 +549,20 @@ struct radeon_semaphore { struct radeon_sa_bo *sa_bo; signed waiters; uint64_t gpu_addr; + struct radeon_fence *sync_to[RADEON_NUM_RINGS]; }; int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore); -void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, +bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore); -void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, +bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore); +void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, + struct radeon_fence *fence); int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, - int signaler, int waiter); + int waiting_ring); void radeon_semaphore_free(struct radeon_device *rdev, struct radeon_semaphore **semaphore, struct radeon_fence *fence); @@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); /* * GPU doorbell structures, functions & helpers */ +#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */ + struct radeon_doorbell { - u32 num_pages; - bool free[1024]; /* doorbell mmio */ - resource_size_t base; - resource_size_t size; - void __iomem *ptr; + resource_size_t base; + resource_size_t size; + u32 __iomem *ptr; + u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */ + unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)]; }; int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); @@ -765,7 +771,6 @@ struct radeon_ib { struct radeon_fence *fence; struct radeon_vm *vm; bool is_const_ib; - struct radeon_fence *sync_to[RADEON_NUM_RINGS]; struct radeon_semaphore *semaphore; }; @@ -799,8 +804,7 @@ struct radeon_ring { u32 pipe; u32 queue; struct radeon_bo *mqd_obj; - u32 doorbell_page_num; - u32 doorbell_offset; + u32 doorbell_index; unsigned wptr_offs; }; @@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, struct radeon_vm *vm, unsigned size); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); -void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, struct radeon_ib *const_ib); int radeon_ib_pool_init(struct radeon_device *rdev); @@ -1638,7 +1641,7 @@ struct radeon_asic_ring { /* command emmit functions */ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); - void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, + bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); @@ -1979,6 +1982,7 @@ struct cik_asic { unsigned tile_config; uint32_t tile_mode_array[32]; + uint32_t macrotile_mode_array[16]; }; union radeon_asic_config { @@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); -u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset); -void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); +u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index); +void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v); /* * Cast helper @@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); #define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) -#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset)) -#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v)) +#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index)) +#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v)) /* * Indirect registers accessor diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 50853c0cb49d..e354ce94cdd1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2015,6 +2015,8 @@ static struct radeon_asic ci_asic = { .bandwidth_update = &dce8_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, + .set_backlight_level = &atombios_set_backlight_level, + .get_backlight_level = &atombios_get_backlight_level, .hdmi_enable = &evergreen_hdmi_enable, .hdmi_setmode = &evergreen_hdmi_setmode, }, @@ -2114,6 +2116,8 @@ static struct radeon_asic kv_asic = { .bandwidth_update = &dce8_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, + .set_backlight_level = &atombios_set_backlight_level, + .get_backlight_level = &atombios_get_backlight_level, .hdmi_enable = &evergreen_hdmi_enable, .hdmi_setmode = &evergreen_hdmi_setmode, }, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f2833ee3a613..c9fd97b58076 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev); void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void r100_semaphore_ring_emit(struct radeon_device *rdev, +bool r100_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); @@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p); int r600_dma_cs_parse(struct radeon_cs_parser *p); void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void r600_semaphore_ring_emit(struct radeon_device *rdev, +bool r600_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); void r600_dma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, +bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait); @@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev, */ void cayman_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void cayman_uvd_semaphore_emit(struct radeon_device *rdev, - struct radeon_ring *ring, - struct radeon_semaphore *semaphore, - bool emit_wait); void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev); int cayman_init(struct radeon_device *rdev); void cayman_fini(struct radeon_device *rdev); @@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); void cik_sdma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, +bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait); @@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); void cik_fence_compute_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void cik_semaphore_ring_emit(struct radeon_device *rdev, +bool cik_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *cp, struct radeon_semaphore *semaphore, bool emit_wait); @@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev); int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); -void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, +bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait); @@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); /* uvd v3.1 */ -void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, +bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 26ca223d12d6..f41594b2eeac 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p) if (!p->relocs[i].robj) continue; - radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj); + radeon_semaphore_sync_to(p->ib.semaphore, + p->relocs[i].robj->tbo.sync_obj); } } @@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, goto out; } radeon_cs_sync_rings(parser); - radeon_ib_sync_to(&parser->ib, vm->fence); - radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id( - rdev, vm, parser->ring)); + radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); + radeon_semaphore_sync_to(parser->ib.semaphore, + radeon_vm_grab_id(rdev, vm, parser->ring)); if ((rdev->family >= CHIP_TAHITI) && (parser->chunk_const_ib_idx != -1)) { diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b9234c43f43d..39b033b441d2 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) */ int radeon_doorbell_init(struct radeon_device *rdev) { - int i; - /* doorbell bar mapping */ rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); rdev->doorbell.size = pci_resource_len(rdev->pdev, 2); - /* limit to 4 MB for now */ - if (rdev->doorbell.size > (4 * 1024 * 1024)) - rdev->doorbell.size = 4 * 1024 * 1024; + rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS); + if (rdev->doorbell.num_doorbells == 0) + return -EINVAL; - rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size); + rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32)); if (rdev->doorbell.ptr == NULL) { return -ENOMEM; } DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base); DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size); - rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE; + memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used)); - for (i = 0; i < rdev->doorbell.num_pages; i++) { - rdev->doorbell.free[i] = true; - } return 0; } @@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev) } /** - * radeon_doorbell_get - Allocate a doorbell page + * radeon_doorbell_get - Allocate a doorbell entry * * @rdev: radeon_device pointer - * @doorbell: doorbell page number + * @doorbell: doorbell index * - * Allocate a doorbell page for use by the driver (all asics). + * Allocate a doorbell for use by the driver (all asics). * Returns 0 on success or -EINVAL on failure. */ int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell) { - int i; - - for (i = 0; i < rdev->doorbell.num_pages; i++) { - if (rdev->doorbell.free[i]) { - rdev->doorbell.free[i] = false; - *doorbell = i; - return 0; - } + unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells); + if (offset < rdev->doorbell.num_doorbells) { + __set_bit(offset, rdev->doorbell.used); + *doorbell = offset; + return 0; + } else { + return -EINVAL; } - return -EINVAL; } /** - * radeon_doorbell_free - Free a doorbell page + * radeon_doorbell_free - Free a doorbell entry * * @rdev: radeon_device pointer - * @doorbell: doorbell page number + * @doorbell: doorbell index * - * Free a doorbell page allocated for use by the driver (all asics) + * Free a doorbell allocated for use by the driver (all asics) */ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell) { - if (doorbell < rdev->doorbell.num_pages) - rdev->doorbell.free[doorbell] = true; + if (doorbell < rdev->doorbell.num_doorbells) + __clear_bit(doorbell, rdev->doorbell.used); } /* diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 1aee32213f66..9f5ff28864f6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -76,9 +76,10 @@ * 2.32.0 - new info request for rings working * 2.33.0 - Add SI tiling mode array query * 2.34.0 - Add CIK tiling mode array query + * 2.35.0 - Add CIK macrotile mode array query */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 34 +#define KMS_DRIVER_MINOR 35 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 281d14c22a47..d3a86e43c012 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -472,6 +472,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev, } /** + * radeon_fence_wait_locked - wait for a fence to signal + * + * @fence: radeon fence object + * + * Wait for the requested fence to signal (all asics). + * Returns 0 if the fence has passed, error for all other cases. + */ +int radeon_fence_wait_locked(struct radeon_fence *fence) +{ + uint64_t seq[RADEON_NUM_RINGS] = {}; + int r; + + if (fence == NULL) { + WARN(1, "Querying an invalid fence : %p !\n", fence); + return -EINVAL; + } + + seq[fence->ring] = fence->seq; + if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) + return 0; + + r = radeon_fence_wait_seq(fence->rdev, seq, false, false); + if (r) + return r; + + fence->seq = RADEON_FENCE_SIGNALED_SEQ; + return 0; +} + +/** * radeon_fence_wait_next_locked - wait for the next fence to signal * * @rdev: radeon device pointer diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 8a83b89d4709..3044e504f4ec 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -651,7 +651,7 @@ retry: radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr, 0, pd_entries, 0, 0); - radeon_ib_sync_to(&ib, vm->fence); + radeon_semaphore_sync_to(ib.semaphore, vm->fence); r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { radeon_ib_free(rdev, &ib); @@ -1209,6 +1209,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, return -ENOMEM; r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); + if (r) + return r; ib.length_dw = 0; r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); @@ -1220,7 +1222,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, addr, radeon_vm_page_flags(bo_va->flags)); - radeon_ib_sync_to(&ib, vm->fence); + radeon_semaphore_sync_to(ib.semaphore, vm->fence); r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { radeon_ib_free(rdev, &ib); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index bb8710531a1b..55d0b474bd37 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) break; case RADEON_INFO_BACKEND_MAP: if (rdev->family >= CHIP_BONAIRE) - return -EINVAL; + *value = rdev->config.cik.backend_map; else if (rdev->family >= CHIP_TAHITI) *value = rdev->config.si.backend_map; else if (rdev->family >= CHIP_CAYMAN) @@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } break; + case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: + if (rdev->family >= CHIP_BONAIRE) { + value = rdev->config.cik.macrotile_mode_array; + value_size = sizeof(uint32_t)*16; + } else { + DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n"); + return -EINVAL; + } + break; case RADEON_INFO_SI_CP_DMA_COMPUTE: *value = 1; break; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 0c7b8c66301b..0b158f98d287 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, /* Pin framebuffer & get tilling informations */ obj = radeon_fb->obj; rbo = gem_to_radeon_bo(obj); +retry: r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) return r; @@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, &base); if (unlikely(r != 0)) { radeon_bo_unreserve(rbo); + + /* On old GPU like RN50 with little vram pining can fails because + * current fb is taking all space needed. So instead of unpining + * the old buffer after pining the new one, first unpin old one + * and then retry pining new one. + * + * As only master can set mode only master can pin and it is + * unlikely the master client will race with itself especialy + * on those old gpu with single crtc. + * + * We don't shutdown the display controller because new buffer + * will end up in same spot. + */ + if (!atomic && fb && fb != crtc->fb) { + struct radeon_bo *old_rbo; + unsigned long nsize, osize; + + old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj); + osize = radeon_bo_size(old_rbo); + nsize = radeon_bo_size(rbo); + if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) { + radeon_bo_unpin(old_rbo); + radeon_bo_unreserve(old_rbo); + fb = NULL; + goto retry; + } + } return -EINVAL; } radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 866ace070b91..d1385ccc672c 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1252,7 +1252,6 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_RS780: case CHIP_RS880: case CHIP_CAYMAN: - case CHIP_ARUBA: case CHIP_BONAIRE: case CHIP_KABINI: case CHIP_KAVERI: @@ -1284,6 +1283,7 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_BARTS: case CHIP_TURKS: case CHIP_CAICOS: + case CHIP_ARUBA: case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 18254e1c3e71..9214403ae173 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib *ib, struct radeon_vm *vm, unsigned size) { - int i, r; + int r; r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); if (r) { @@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); } ib->is_const_ib = false; - for (i = 0; i < RADEON_NUM_RINGS; ++i) - ib->sync_to[i] = NULL; return 0; } @@ -109,25 +107,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) } /** - * radeon_ib_sync_to - sync to fence before executing the IB - * - * @ib: IB object to add fence to - * @fence: fence to sync to - * - * Sync to the fence before executing the IB - */ -void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence) -{ - struct radeon_fence *other; - - if (!fence) - return; - - other = ib->sync_to[fence->ring]; - ib->sync_to[fence->ring] = radeon_fence_later(fence, other); -} - -/** * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring * * @rdev: radeon_device pointer @@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, struct radeon_ib *const_ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; - bool need_sync = false; - int i, r = 0; + int r = 0; if (!ib->length_dw || !ring->ready) { /* TODO: Nothings in the ib we should report. */ @@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); return r; } - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - struct radeon_fence *fence = ib->sync_to[i]; - if (radeon_fence_need_sync(fence, ib->ring)) { - need_sync = true; - radeon_semaphore_sync_rings(rdev, ib->semaphore, - fence->ring, ib->ring); - radeon_fence_note_sync(fence, ib->ring); - } - } - /* immediately free semaphore when we don't need to sync */ - if (!need_sync) { - radeon_semaphore_free(rdev, &ib->semaphore, NULL); + + /* sync with other rings */ + r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); + if (r) { + dev_err(rdev->dev, "failed to sync rings (%d)\n", r); + radeon_ring_unlock_undo(rdev, ring); + return r; } + /* if we can't remember our last VM flush then flush now! */ /* XXX figure out why we have to flush for every IB */ if (ib->vm /*&& !ib->vm->last_flush*/) { diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 8dcc20f53d73..2b42aa1914f2 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -29,12 +29,12 @@ */ #include <drm/drmP.h> #include "radeon.h" - +#include "radeon_trace.h" int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore) { - int r; + int i, r; *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); if (*semaphore == NULL) { @@ -50,54 +50,121 @@ int radeon_semaphore_create(struct radeon_device *rdev, (*semaphore)->waiters = 0; (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; + + for (i = 0; i < RADEON_NUM_RINGS; ++i) + (*semaphore)->sync_to[i] = NULL; + return 0; } -void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, +bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx, struct radeon_semaphore *semaphore) { - --semaphore->waiters; - radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false); + struct radeon_ring *ring = &rdev->ring[ridx]; + + trace_radeon_semaphore_signale(ridx, semaphore); + + if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) { + --semaphore->waiters; + + /* for debugging lockup only, used by sysfs debug files */ + ring->last_semaphore_signal_addr = semaphore->gpu_addr; + return true; + } + return false; } -void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, +bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx, struct radeon_semaphore *semaphore) { - ++semaphore->waiters; - radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true); + struct radeon_ring *ring = &rdev->ring[ridx]; + + trace_radeon_semaphore_wait(ridx, semaphore); + + if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) { + ++semaphore->waiters; + + /* for debugging lockup only, used by sysfs debug files */ + ring->last_semaphore_wait_addr = semaphore->gpu_addr; + return true; + } + return false; +} + +/** + * radeon_semaphore_sync_to - use the semaphore to sync to a fence + * + * @semaphore: semaphore object to add fence to + * @fence: fence to sync to + * + * Sync to the fence using this semaphore object + */ +void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, + struct radeon_fence *fence) +{ + struct radeon_fence *other; + + if (!fence) + return; + + other = semaphore->sync_to[fence->ring]; + semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other); } -/* caller must hold ring lock */ +/** + * radeon_semaphore_sync_rings - sync ring to all registered fences + * + * @rdev: radeon_device pointer + * @semaphore: semaphore object to use for sync + * @ring: ring that needs sync + * + * Ensure that all registered fences are signaled before letting + * the ring continue. The caller must hold the ring lock. + */ int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, - int signaler, int waiter) + int ring) { - int r; + int i, r; - /* no need to signal and wait on the same ring */ - if (signaler == waiter) { - return 0; - } + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + struct radeon_fence *fence = semaphore->sync_to[i]; - /* prevent GPU deadlocks */ - if (!rdev->ring[signaler].ready) { - dev_err(rdev->dev, "Trying to sync to a disabled ring!"); - return -EINVAL; - } + /* check if we really need to sync */ + if (!radeon_fence_need_sync(fence, ring)) + continue; - r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8); - if (r) { - return r; - } - radeon_semaphore_emit_signal(rdev, signaler, semaphore); - radeon_ring_commit(rdev, &rdev->ring[signaler]); + /* prevent GPU deadlocks */ + if (!rdev->ring[i].ready) { + dev_err(rdev->dev, "Syncing to a disabled ring!"); + return -EINVAL; + } - /* we assume caller has already allocated space on waiters ring */ - radeon_semaphore_emit_wait(rdev, waiter, semaphore); + /* allocate enough space for sync command */ + r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); + if (r) { + return r; + } - /* for debugging lockup only, used by sysfs debug files */ - rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; - rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; + /* emit the signal semaphore */ + if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { + /* signaling wasn't successful wait manually */ + radeon_ring_undo(&rdev->ring[i]); + radeon_fence_wait_locked(fence); + continue; + } + + /* we assume caller has already allocated space on waiters ring */ + if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { + /* waiting wasn't successful wait manually */ + radeon_ring_undo(&rdev->ring[i]); + radeon_fence_wait_locked(fence); + continue; + } + + radeon_ring_commit(rdev, &rdev->ring[i]); + radeon_fence_note_sync(fence, ring); + } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index 811bca691b36..9f0e18172b6e 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -111,6 +111,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end, TP_ARGS(dev, seqno) ); +DECLARE_EVENT_CLASS(radeon_semaphore_request, + + TP_PROTO(int ring, struct radeon_semaphore *sem), + + TP_ARGS(ring, sem), + + TP_STRUCT__entry( + __field(int, ring) + __field(signed, waiters) + __field(uint64_t, gpu_addr) + ), + + TP_fast_assign( + __entry->ring = ring; + __entry->waiters = sem->waiters; + __entry->gpu_addr = sem->gpu_addr; + ), + + TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring, + __entry->waiters, __entry->gpu_addr) +); + +DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale, + + TP_PROTO(int ring, struct radeon_semaphore *sem), + + TP_ARGS(ring, sem) +); + +DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait, + + TP_PROTO(int ring, struct radeon_semaphore *sem), + + TP_ARGS(ring, sem) +); + #endif /* This part must be outside protection */ diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index f9b02e3d6830..aca8cbe8a335 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_dw = size_in_dw; diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 8e8f46133532..59be2cfcbb47 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev, return r; } - if (radeon_fence_need_sync(*fence, ring->idx)) { - radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, - ring->idx); - radeon_fence_note_sync(*fence, ring->idx); - } else { - radeon_semaphore_free(rdev, &sem, NULL); - } + radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { cur_size_in_bytes = size_in_bytes; diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 9364129ba292..d700698a1f22 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev) pi->enable_sclk_ds = true; pi->enable_gfx_power_gating = true; pi->enable_gfx_clock_gating = true; - pi->enable_mg_clock_gating = true; - pi->enable_gfx_dynamic_mgpg = true; /* ??? */ - pi->override_dynamic_mgpg = true; + pi->enable_mg_clock_gating = false; + pi->enable_gfx_dynamic_mgpg = false; + pi->override_dynamic_mgpg = false; pi->enable_auto_thermal_throttling = true; pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */ pi->uvd_dpm = true; /* ??? */ diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 7266805d9786..d4a68af1a279 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) * * Emit a semaphore command (either wait or signal) to the UVD ring. */ -void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, +bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); radeon_ring_write(ring, emit_wait ? 1 : 0); + + return true; } /** diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c index 5b6fa1f62d4e..d722db2cf340 100644 --- a/drivers/gpu/drm/radeon/uvd_v3_1.c +++ b/drivers/gpu/drm/radeon/uvd_v3_1.c @@ -37,7 +37,7 @@ * * Emit a semaphore command (either wait or signal) to the UVD ring. */ -void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, +bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) @@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); + + return true; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 8d5a646ebe6a..07e02c4bf5a8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref) atomic_dec(&bo->glob->bo_count); if (bo->resv == &bo->ttm_resv) reservation_object_fini(&bo->ttm_resv); - + mutex_destroy(&bo->wu_mutex); if (bo->destroy) bo->destroy(bo); else { @@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); INIT_LIST_HEAD(&bo->io_reserve_lru); + mutex_init(&bo->wu_mutex); bo->bdev = bdev; bo->glob = bdev->glob; bo->type = type; @@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) ; } EXPORT_SYMBOL(ttm_bo_swapout_all); + +/** + * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become + * unreserved + * + * @bo: Pointer to buffer + */ +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) +{ + int ret; + + /* + * In the absense of a wait_unlocked API, + * Use the bo::wu_mutex to avoid triggering livelocks due to + * concurrent use of this function. Note that this use of + * bo::wu_mutex can go away if we change locking order to + * mmap_sem -> bo::reserve. + */ + ret = mutex_lock_interruptible(&bo->wu_mutex); + if (unlikely(ret != 0)) + return -ERESTARTSYS; + if (!ww_mutex_is_locked(&bo->resv->lock)) + goto out_unlock; + ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL); + if (unlikely(ret != 0)) + goto out_unlock; + ww_mutex_unlock(&bo->resv->lock); + +out_unlock: + mutex_unlock(&bo->wu_mutex); + return ret; +} diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 4834c463c38b..15b86a94949d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -350,10 +350,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, goto out2; /* - * Move nonexistent data. NOP. + * Don't move nonexistent data. Clear destination instead. */ - if (old_iomap == NULL && ttm == NULL) + if (old_iomap == NULL && + (ttm == NULL || ttm->state == tt_unpopulated)) { + memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); goto out2; + } /* * TTM might be null for moves within the same region. diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index ac617f3ecd0c..b249ab9b1eb2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* * Work around locking order reversal in fault / nopfn * between mmap_sem and bo_reserve: Perform a trylock operation - * for reserve, and if it fails, retry the fault after scheduling. + * for reserve, and if it fails, retry the fault after waiting + * for the buffer to become unreserved. */ - - ret = ttm_bo_reserve(bo, true, true, false, 0); + ret = ttm_bo_reserve(bo, true, true, false, NULL); if (unlikely(ret != 0)) { - if (ret == -EBUSY) - set_need_resched(); + if (ret != -EBUSY) + return VM_FAULT_NOPAGE; + + if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { + if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { + up_read(&vma->vm_mm->mmap_sem); + (void) ttm_bo_wait_unreserved(bo); + } + + return VM_FAULT_RETRY; + } + + /* + * If we'd want to change locking order to + * mmap_sem -> bo::reserve, we'd use a blocking reserve here + * instead of retrying the fault... + */ return VM_FAULT_NOPAGE; } @@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) case 0: break; case -EBUSY: - set_need_resched(); case -ERESTARTSYS: retval = VM_FAULT_NOPAGE; goto out_unlock; diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 6c911789ae5c..479e9418e3d7 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -32,8 +32,7 @@ #include <linux/sched.h> #include <linux/module.h> -static void ttm_eu_backoff_reservation_locked(struct list_head *list, - struct ww_acquire_ctx *ticket) +static void ttm_eu_backoff_reservation_locked(struct list_head *list) { struct ttm_validate_buffer *entry; @@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list, ticket); - ww_acquire_fini(ticket); + ttm_eu_backoff_reservation_locked(list); + if (ticket) + ww_acquire_fini(ticket); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_eu_backoff_reservation); @@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; - ww_acquire_init(ticket, &reservation_ww_class); + if (ticket) + ww_acquire_init(ticket, &reservation_ww_class); retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; @@ -139,16 +140,17 @@ retry: if (entry->reserved) continue; - - ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); + ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true, + ticket); if (ret == -EDEADLK) { /* uh oh, we lost out, drop every reservation and try * to only reserve this buffer, then start over if * this succeeds. */ + BUG_ON(ticket == NULL); spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list, ticket); + ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, @@ -175,7 +177,8 @@ retry: } } - ww_acquire_done(ticket); + if (ticket) + ww_acquire_done(ticket); spin_lock(&glob->lru_lock); ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); @@ -184,12 +187,14 @@ retry: err: spin_lock(&glob->lru_lock); - ttm_eu_backoff_reservation_locked(list, ticket); + ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); err_fini: - ww_acquire_done(ticket); - ww_acquire_fini(ticket); + if (ticket) { + ww_acquire_done(ticket); + ww_acquire_fini(ticket); + } return ret; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); @@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, } spin_unlock(&bdev->fence_lock); spin_unlock(&glob->lru_lock); - ww_acquire_fini(ticket); + if (ticket) + ww_acquire_fini(ticket); list_for_each_entry(entry, list, head) { if (entry->old_sync_obj) diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index a868176c258a..6fe7b92a82d1 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -26,6 +26,12 @@ **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + * + * While no substantial code is shared, the prime code is inspired by + * drm_prime.c, with + * Authors: + * Dave Airlie <airlied@redhat.com> + * Rob Clark <rob.clark@linaro.org> */ /** @file ttm_ref_object.c * @@ -34,6 +40,7 @@ * and release on file close. */ + /** * struct ttm_object_file * @@ -84,6 +91,9 @@ struct ttm_object_device { struct drm_open_hash object_hash; atomic_t object_count; struct ttm_mem_global *mem_glob; + struct dma_buf_ops ops; + void (*dmabuf_release)(struct dma_buf *dma_buf); + size_t dma_buf_size; }; /** @@ -116,6 +126,8 @@ struct ttm_ref_object { struct ttm_object_file *tfile; }; +static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf); + static inline struct ttm_object_file * ttm_object_file_ref(struct ttm_object_file *tfile) { @@ -416,9 +428,10 @@ out_err: } EXPORT_SYMBOL(ttm_object_file_init); -struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global - *mem_glob, - unsigned int hash_order) +struct ttm_object_device * +ttm_object_device_init(struct ttm_mem_global *mem_glob, + unsigned int hash_order, + const struct dma_buf_ops *ops) { struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); int ret; @@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global spin_lock_init(&tdev->object_lock); atomic_set(&tdev->object_count, 0); ret = drm_ht_create(&tdev->object_hash, hash_order); + if (ret != 0) + goto out_no_object_hash; - if (likely(ret == 0)) - return tdev; + tdev->ops = *ops; + tdev->dmabuf_release = tdev->ops.release; + tdev->ops.release = ttm_prime_dmabuf_release; + tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) + + ttm_round_pot(sizeof(struct file)); + return tdev; +out_no_object_hash: kfree(tdev); return NULL; } @@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) kfree(tdev); } EXPORT_SYMBOL(ttm_object_device_release); + +/** + * get_dma_buf_unless_doomed - get a dma_buf reference if possible. + * + * @dma_buf: Non-refcounted pointer to a struct dma-buf. + * + * Obtain a file reference from a lookup structure that doesn't refcount + * the file, but synchronizes with its release method to make sure it has + * not been freed yet. See for example kref_get_unless_zero documentation. + * Returns true if refcounting succeeds, false otherwise. + * + * Nobody really wants this as a public API yet, so let it mature here + * for some time... + */ +static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf) +{ + return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L; +} + +/** + * ttm_prime_refcount_release - refcount release method for a prime object. + * + * @p_base: Pointer to ttm_base_object pointer. + * + * This is a wrapper that calls the refcount_release founction of the + * underlying object. At the same time it cleans up the prime object. + * This function is called when all references to the base object we + * derive from are gone. + */ +static void ttm_prime_refcount_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct ttm_prime_object *prime; + + *p_base = NULL; + prime = container_of(base, struct ttm_prime_object, base); + BUG_ON(prime->dma_buf != NULL); + mutex_destroy(&prime->mutex); + if (prime->refcount_release) + prime->refcount_release(&base); +} + +/** + * ttm_prime_dmabuf_release - Release method for the dma-bufs we export + * + * @dma_buf: + * + * This function first calls the dma_buf release method the driver + * provides. Then it cleans up our dma_buf pointer used for lookup, + * and finally releases the reference the dma_buf has on our base + * object. + */ +static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) +{ + struct ttm_prime_object *prime = + (struct ttm_prime_object *) dma_buf->priv; + struct ttm_base_object *base = &prime->base; + struct ttm_object_device *tdev = base->tfile->tdev; + + if (tdev->dmabuf_release) + tdev->dmabuf_release(dma_buf); + mutex_lock(&prime->mutex); + if (prime->dma_buf == dma_buf) + prime->dma_buf = NULL; + mutex_unlock(&prime->mutex); + ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size); + ttm_base_object_unref(&base); +} + +/** + * ttm_prime_fd_to_handle - Get a base object handle from a prime fd + * + * @tfile: A struct ttm_object_file identifying the caller. + * @fd: The prime / dmabuf fd. + * @handle: The returned handle. + * + * This function returns a handle to an object that previously exported + * a dma-buf. Note that we don't handle imports yet, because we simply + * have no consumers of that implementation. + */ +int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, + int fd, u32 *handle) +{ + struct ttm_object_device *tdev = tfile->tdev; + struct dma_buf *dma_buf; + struct ttm_prime_object *prime; + struct ttm_base_object *base; + int ret; + + dma_buf = dma_buf_get(fd); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + + if (dma_buf->ops != &tdev->ops) + return -ENOSYS; + + prime = (struct ttm_prime_object *) dma_buf->priv; + base = &prime->base; + *handle = base->hash.key; + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + + dma_buf_put(dma_buf); + + return ret; +} +EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle); + +/** + * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object + * + * @tfile: Struct ttm_object_file identifying the caller. + * @handle: Handle to the object we're exporting from. + * @flags: flags for dma-buf creation. We just pass them on. + * @prime_fd: The returned file descriptor. + * + */ +int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, + uint32_t handle, uint32_t flags, + int *prime_fd) +{ + struct ttm_object_device *tdev = tfile->tdev; + struct ttm_base_object *base; + struct dma_buf *dma_buf; + struct ttm_prime_object *prime; + int ret; + + base = ttm_base_object_lookup(tfile, handle); + if (unlikely(base == NULL || + base->object_type != ttm_prime_type)) { + ret = -ENOENT; + goto out_unref; + } + + prime = container_of(base, struct ttm_prime_object, base); + if (unlikely(!base->shareable)) { + ret = -EPERM; + goto out_unref; + } + + ret = mutex_lock_interruptible(&prime->mutex); + if (unlikely(ret != 0)) { + ret = -ERESTARTSYS; + goto out_unref; + } + + dma_buf = prime->dma_buf; + if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { + + /* + * Need to create a new dma_buf, with memory accounting. + */ + ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size, + false, true); + if (unlikely(ret != 0)) { + mutex_unlock(&prime->mutex); + goto out_unref; + } + + dma_buf = dma_buf_export(prime, &tdev->ops, + prime->size, flags); + if (IS_ERR(dma_buf)) { + ret = PTR_ERR(dma_buf); + ttm_mem_global_free(tdev->mem_glob, + tdev->dma_buf_size); + mutex_unlock(&prime->mutex); + goto out_unref; + } + + /* + * dma_buf has taken the base object reference + */ + base = NULL; + prime->dma_buf = dma_buf; + } + mutex_unlock(&prime->mutex); + + ret = dma_buf_fd(dma_buf, flags); + if (ret >= 0) { + *prime_fd = ret; + ret = 0; + } else + dma_buf_put(dma_buf); + +out_unref: + if (base) + ttm_base_object_unref(&base); + return ret; +} +EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd); + +/** + * ttm_prime_object_init - Initialize a ttm_prime_object + * + * @tfile: struct ttm_object_file identifying the caller + * @size: The size of the dma_bufs we export. + * @prime: The object to be initialized. + * @shareable: See ttm_base_object_init + * @type: See ttm_base_object_init + * @refcount_release: See ttm_base_object_init + * @ref_obj_release: See ttm_base_object_init + * + * Initializes an object which is compatible with the drm_prime model + * for data sharing between processes and devices. + */ +int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, + struct ttm_prime_object *prime, bool shareable, + enum ttm_object_type type, + void (*refcount_release) (struct ttm_base_object **), + void (*ref_obj_release) (struct ttm_base_object *, + enum ttm_ref_type ref_type)) +{ + mutex_init(&prime->mutex); + prime->size = PAGE_ALIGN(size); + prime->real_type = type; + prime->dma_buf = NULL; + prime->refcount_release = refcount_release; + return ttm_base_object_init(tfile, &prime->base, shareable, + ttm_prime_type, + ttm_prime_refcount_release, + ref_obj_release); +} +EXPORT_SYMBOL(ttm_prime_object_init); diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 2cc6cd91ac11..9f8b690bcf52 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ - vmwgfx_surface.o + vmwgfx_surface.o vmwgfx_prime.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 20d5485eaf98..c7a549694e59 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -677,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } dev_priv->tdev = ttm_object_device_init - (dev_priv->mem_global_ref.object, 12); + (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { DRM_ERROR("Unable to initialize TTM object management.\n"); @@ -1210,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = { static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | - DRIVER_MODESET, + DRIVER_MODESET | DRIVER_PRIME, .load = vmw_driver_load, .unload = vmw_driver_unload, .lastclose = vmw_lastclose, @@ -1235,6 +1235,9 @@ static struct drm_driver driver = { .dumb_map_offset = vmw_dumb_map_offset, .dumb_destroy = vmw_dumb_destroy, + .prime_fd_to_handle = vmw_prime_fd_to_handle, + .prime_handle_to_fd = vmw_prime_handle_to_fd, + .fops = &vmwgfx_driver_fops, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index e401d5dbcb96..db85985c7086 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -819,6 +819,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func; /** + * Prime - vmwgfx_prime.c + */ + +extern const struct dma_buf_ops vmw_prime_dmabuf_ops; +extern int vmw_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, + int fd, u32 *handle); +extern int vmw_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t handle, uint32_t flags, + int *prime_fd); + + +/** * Inline helper functions */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c new file mode 100644 index 000000000000..31fe32d8d65a --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c @@ -0,0 +1,137 @@ +/************************************************************************** + * + * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: + * Thomas Hellstrom <thellstrom@vmware.com> + * + */ + +#include "vmwgfx_drv.h" +#include <linux/dma-buf.h> +#include <drm/ttm/ttm_object.h> + +/* + * DMA-BUF attach- and mapping methods. No need to implement + * these until we have other virtual devices use them. + */ + +static int vmw_prime_map_attach(struct dma_buf *dma_buf, + struct device *target_dev, + struct dma_buf_attachment *attach) +{ + return -ENOSYS; +} + +static void vmw_prime_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) +{ +} + +static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + return ERR_PTR(-ENOSYS); +} + +static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgb, + enum dma_data_direction dir) +{ +} + +static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf) +{ + return NULL; +} + +static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ +} + +static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} + +static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num, void *addr) +{ + +} +static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} + +static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf, + unsigned long page_num, void *addr) +{ + +} + +static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf, + struct vm_area_struct *vma) +{ + WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n"); + return -ENOSYS; +} + +const struct dma_buf_ops vmw_prime_dmabuf_ops = { + .attach = vmw_prime_map_attach, + .detach = vmw_prime_map_detach, + .map_dma_buf = vmw_prime_map_dma_buf, + .unmap_dma_buf = vmw_prime_unmap_dma_buf, + .release = NULL, + .kmap = vmw_prime_dmabuf_kmap, + .kmap_atomic = vmw_prime_dmabuf_kmap_atomic, + .kunmap = vmw_prime_dmabuf_kunmap, + .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic, + .mmap = vmw_prime_dmabuf_mmap, + .vmap = vmw_prime_dmabuf_vmap, + .vunmap = vmw_prime_dmabuf_vunmap, +}; + +int vmw_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, + int fd, u32 *handle) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + + return ttm_prime_fd_to_handle(tfile, fd, handle); +} + +int vmw_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, + uint32_t handle, uint32_t flags, + int *prime_fd) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + + return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 252501a54def..efe2b74c5eb1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -35,7 +35,7 @@ #define VMW_RES_EVICT_ERR_COUNT 10 struct vmw_user_dma_buffer { - struct ttm_base_object base; + struct ttm_prime_object prime; struct vmw_dma_buffer dma; }; @@ -297,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, if (unlikely(base == NULL)) return -EINVAL; - if (unlikely(base->object_type != converter->object_type)) + if (unlikely(ttm_base_object_type(base) != converter->object_type)) goto out_bad_resource; res = converter->base_obj_to_res(base); @@ -387,7 +387,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) { struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); - ttm_base_object_kfree(vmw_user_bo, base); + ttm_prime_object_kfree(vmw_user_bo, prime); } static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) @@ -401,7 +401,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) if (unlikely(base == NULL)) return; - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + prime.base); bo = &vmw_user_bo->dma.base; ttm_bo_unref(&bo); } @@ -442,18 +443,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, return ret; tmp = ttm_bo_reference(&user_bo->dma.base); - ret = ttm_base_object_init(tfile, - &user_bo->base, - shareable, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = ttm_prime_object_init(tfile, + size, + &user_bo->prime, + shareable, + ttm_buffer_type, + &vmw_user_dmabuf_release, NULL); if (unlikely(ret != 0)) { ttm_bo_unref(&tmp); goto out_no_base_object; } *p_dma_buf = &user_bo->dma; - *handle = user_bo->base.hash.key; + *handle = user_bo->prime.base.hash.key; out_no_base_object: return ret; @@ -475,8 +477,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, return -EPERM; vmw_user_bo = vmw_user_dma_buffer(bo); - return (vmw_user_bo->base.tfile == tfile || - vmw_user_bo->base.shareable) ? 0 : -EPERM; + return (vmw_user_bo->prime.base.tfile == tfile || + vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; } int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, @@ -538,14 +540,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, return -ESRCH; } - if (unlikely(base->object_type != ttm_buffer_type)) { + if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { ttm_base_object_unref(&base); printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", (unsigned long)handle); return -EINVAL; } - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + prime.base); (void)ttm_bo_reference(&vmw_user_bo->dma.base); ttm_base_object_unref(&base); *out = &vmw_user_bo->dma; @@ -562,7 +565,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, return -EINVAL; user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); - return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); + return ttm_ref_object_add(tfile, &user_bo->prime.base, + TTM_REF_USAGE, NULL); } /* @@ -807,15 +811,16 @@ int vmw_dumb_create(struct drm_file *file_priv, goto out_no_dmabuf; tmp = ttm_bo_reference(&vmw_user_bo->dma.base); - ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, - &vmw_user_bo->base, - false, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile, + args->size, + &vmw_user_bo->prime, + false, + ttm_buffer_type, + &vmw_user_dmabuf_release, NULL); if (unlikely(ret != 0)) goto out_no_base_object; - args->handle = vmw_user_bo->base.hash.key; + args->handle = vmw_user_bo->prime.base.hash.key; out_no_base_object: ttm_bo_unref(&tmp); @@ -994,7 +999,6 @@ void vmw_resource_unreserve(struct vmw_resource *res, */ static int vmw_resource_check_buffer(struct vmw_resource *res, - struct ww_acquire_ctx *ticket, bool interruptible, struct ttm_validate_buffer *val_buf) { @@ -1011,7 +1015,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, INIT_LIST_HEAD(&val_list); val_buf->bo = ttm_bo_reference(&res->backup->base); list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(ticket, &val_list); + ret = ttm_eu_reserve_buffers(NULL, &val_list); if (unlikely(ret != 0)) goto out_no_reserve; @@ -1029,7 +1033,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, return 0; out_no_validate: - ttm_eu_backoff_reservation(ticket, &val_list); + ttm_eu_backoff_reservation(NULL, &val_list); out_no_reserve: ttm_bo_unref(&val_buf->bo); if (backup_dirty) @@ -1074,8 +1078,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) * @val_buf: Backup buffer information. */ static void -vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, - struct ttm_validate_buffer *val_buf) +vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) { struct list_head val_list; @@ -1084,7 +1087,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, INIT_LIST_HEAD(&val_list); list_add_tail(&val_buf->head, &val_list); - ttm_eu_backoff_reservation(ticket, &val_list); + ttm_eu_backoff_reservation(NULL, &val_list); ttm_bo_unref(&val_buf->bo); } @@ -1099,14 +1102,12 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) { struct ttm_validate_buffer val_buf; const struct vmw_res_func *func = res->func; - struct ww_acquire_ctx ticket; int ret; BUG_ON(!func->may_evict); val_buf.bo = NULL; - ret = vmw_resource_check_buffer(res, &ticket, interruptible, - &val_buf); + ret = vmw_resource_check_buffer(res, interruptible, &val_buf); if (unlikely(ret != 0)) return ret; @@ -1121,7 +1122,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) res->backup_dirty = true; res->res_dirty = false; out_no_unbind: - vmw_resource_backoff_reservation(&ticket, &val_buf); + vmw_resource_backoff_reservation(&val_buf); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 582814339748..7de2ea8bd553 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -38,7 +38,7 @@ * @size: TTM accounting size for the surface. */ struct vmw_user_surface { - struct ttm_base_object base; + struct ttm_prime_object prime; struct vmw_surface srf; uint32_t size; uint32_t backup_handle; @@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv, static struct vmw_resource * vmw_user_surface_base_to_res(struct ttm_base_object *base) { - return &(container_of(base, struct vmw_user_surface, base)->srf.res); + return &(container_of(base, struct vmw_user_surface, + prime.base)->srf.res); } /** @@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res) kfree(srf->offsets); kfree(srf->sizes); kfree(srf->snooper.image); - ttm_base_object_kfree(user_srf, base); + ttm_prime_object_kfree(user_srf, prime); ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } @@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) { struct ttm_base_object *base = *p_base; struct vmw_user_surface *user_srf = - container_of(base, struct vmw_user_surface, base); + container_of(base, struct vmw_user_surface, prime.base); struct vmw_resource *res = &user_srf->srf.res; *p_base = NULL; @@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, } srf->snooper.crtc = NULL; - user_srf->base.shareable = false; - user_srf->base.tfile = NULL; + user_srf->prime.base.shareable = false; + user_srf->prime.base.tfile = NULL; /** * From this point, the generic resource management functions @@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, goto out_unlock; tmp = vmw_resource_reference(&srf->res); - ret = ttm_base_object_init(tfile, &user_srf->base, - req->shareable, VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); + ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, + req->shareable, VMW_RES_SURFACE, + &vmw_user_surface_base_release, NULL); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - rep->sid = user_srf->base.hash.key; + rep->sid = user_srf->prime.base.hash.key; vmw_resource_unreference(&res); ttm_read_unlock(&vmaster->lock); @@ -823,7 +824,7 @@ out_no_copy: out_no_offsets: kfree(srf->sizes); out_no_sizes: - ttm_base_object_kfree(user_srf, base); + ttm_prime_object_kfree(user_srf, prime); out_no_user_srf: ttm_mem_global_free(vmw_mem_glob(dev_priv), size); out_unlock: @@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (unlikely(base->object_type != VMW_RES_SURFACE)) + if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) goto out_bad_resource; - user_srf = container_of(base, struct vmw_user_surface, base); + user_srf = container_of(base, struct vmw_user_surface, prime.base); srf = &user_srf->srf; - ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, &user_srf->prime.base, + TTM_REF_USAGE, NULL); if (unlikely(ret != 0)) { DRM_ERROR("Could not add a reference to a surface.\n"); goto out_no_reference; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index b3ab9d43bb3e..52d548f1dc1d 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -656,6 +656,7 @@ config SENSORS_LM75 - Analog Devices ADT75 - Dallas Semiconductor DS75, DS1775 and DS7505 + - Global Mixed-mode Technology (GMT) G751 - Maxim MAX6625 and MAX6626 - Microchip MCP980x - National Semiconductor LM75, LM75A diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 8d40da314a8e..6a34f7f48eb9 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -602,9 +602,8 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource) /* Create a symlink to domain objects */ resource->domain_devices[i] = NULL; - status = acpi_bus_get_device(element->reference.handle, - &resource->domain_devices[i]); - if (ACPI_FAILURE(status)) + if (acpi_bus_get_device(element->reference.handle, + &resource->domain_devices[i])) continue; obj = resource->domain_devices[i]; diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index c03b490bba81..7e3ef134f1d2 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -39,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */ ds1775, ds75, ds7505, + g751, lm75, lm75a, max6625, @@ -208,6 +209,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) data->resolution = 12; data->sample_time = HZ / 4; break; + case g751: case lm75: case lm75a: data->resolution = 9; @@ -296,6 +298,7 @@ static const struct i2c_device_id lm75_ids[] = { { "ds1775", ds1775, }, { "ds75", ds75, }, { "ds7505", ds7505, }, + { "g751", g751, }, { "lm75", lm75, }, { "lm75a", lm75a, }, { "max6625", max6625, }, diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index d17325db0ea3..cf811c1a1475 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -274,6 +274,8 @@ static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 }; static const u16 NCT6775_REG_TEMP[] = { 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; +static const u16 NCT6775_REG_TEMP_MON[] = { 0x73, 0x75, 0x77 }; + static const u16 NCT6775_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6775_REG_TEMP)] = { 0, 0x152, 0x252, 0x628, 0x629, 0x62A }; static const u16 NCT6775_REG_TEMP_HYST[ARRAY_SIZE(NCT6775_REG_TEMP)] = { @@ -454,6 +456,7 @@ static const u16 NCT6779_REG_CRITICAL_PWM[] = { 0x137, 0x237, 0x337, 0x837, 0x937, 0xa37 }; static const u16 NCT6779_REG_TEMP[] = { 0x27, 0x150 }; +static const u16 NCT6779_REG_TEMP_MON[] = { 0x73, 0x75, 0x77, 0x79, 0x7b }; static const u16 NCT6779_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6779_REG_TEMP)] = { 0x18, 0x152 }; static const u16 NCT6779_REG_TEMP_HYST[ARRAY_SIZE(NCT6779_REG_TEMP)] = { @@ -507,6 +510,13 @@ static const u16 NCT6779_REG_TEMP_CRIT[ARRAY_SIZE(nct6779_temp_label) - 1] #define NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE 0x28 +static const u16 NCT6791_REG_WEIGHT_TEMP_SEL[6] = { 0, 0x239 }; +static const u16 NCT6791_REG_WEIGHT_TEMP_STEP[6] = { 0, 0x23a }; +static const u16 NCT6791_REG_WEIGHT_TEMP_STEP_TOL[6] = { 0, 0x23b }; +static const u16 NCT6791_REG_WEIGHT_DUTY_STEP[6] = { 0, 0x23c }; +static const u16 NCT6791_REG_WEIGHT_TEMP_BASE[6] = { 0, 0x23d }; +static const u16 NCT6791_REG_WEIGHT_DUTY_BASE[6] = { 0, 0x23e }; + static const u16 NCT6791_REG_ALARM[NUM_REG_ALARM] = { 0x459, 0x45A, 0x45B, 0x568, 0x45D }; @@ -534,6 +544,7 @@ static const u16 NCT6106_REG_IN[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x07, 0x08, 0x09 }; static const u16 NCT6106_REG_TEMP[] = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15 }; +static const u16 NCT6106_REG_TEMP_MON[] = { 0x18, 0x19, 0x1a }; static const u16 NCT6106_REG_TEMP_HYST[] = { 0xc3, 0xc7, 0xcb, 0xcf, 0xd3, 0xd7 }; static const u16 NCT6106_REG_TEMP_OVER[] = { @@ -1307,6 +1318,9 @@ static void nct6775_update_pwm(struct device *dev) if (reg & 0x80) data->pwm[2][i] = 0; + if (!data->REG_WEIGHT_TEMP_SEL[i]) + continue; + reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); data->pwm_weight_temp_sel[i] = reg & 0x1f; /* If weight is disabled, report weight source as 0 */ @@ -2852,6 +2866,9 @@ static umode_t nct6775_pwm_is_visible(struct kobject *kobj, if (!(data->has_pwm & (1 << pwm))) return 0; + if ((nr >= 14 && nr <= 18) || nr == 21) /* weight */ + if (!data->REG_WEIGHT_TEMP_SEL[pwm]) + return 0; if (nr == 19 && data->REG_PWM[3] == NULL) /* pwm_max */ return 0; if (nr == 20 && data->REG_PWM[4] == NULL) /* pwm_step */ @@ -2945,11 +2962,11 @@ static struct sensor_device_template *nct6775_attributes_pwm_template[] = { &sensor_dev_template_pwm_step_down_time, &sensor_dev_template_pwm_start, &sensor_dev_template_pwm_floor, - &sensor_dev_template_pwm_weight_temp_sel, + &sensor_dev_template_pwm_weight_temp_sel, /* 14 */ &sensor_dev_template_pwm_weight_temp_step, &sensor_dev_template_pwm_weight_temp_step_tol, &sensor_dev_template_pwm_weight_temp_step_base, - &sensor_dev_template_pwm_weight_duty_step, + &sensor_dev_template_pwm_weight_duty_step, /* 18 */ &sensor_dev_template_pwm_max, /* 19 */ &sensor_dev_template_pwm_step, /* 20 */ &sensor_dev_template_pwm_weight_duty_base, /* 21 */ @@ -3253,9 +3270,9 @@ static int nct6775_probe(struct platform_device *pdev) int i, s, err = 0; int src, mask, available; const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config; - const u16 *reg_temp_alternate, *reg_temp_crit; + const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit; const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL; - int num_reg_temp; + int num_reg_temp, num_reg_temp_mon; u8 cr2a; struct attribute_group *group; struct device *hwmon_dev; @@ -3338,7 +3355,9 @@ static int nct6775_probe(struct platform_device *pdev) data->BEEP_BITS = NCT6106_BEEP_BITS; reg_temp = NCT6106_REG_TEMP; + reg_temp_mon = NCT6106_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6106_REG_TEMP_MON); reg_temp_over = NCT6106_REG_TEMP_OVER; reg_temp_hyst = NCT6106_REG_TEMP_HYST; reg_temp_config = NCT6106_REG_TEMP_CONFIG; @@ -3410,7 +3429,9 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_BEEP = NCT6775_REG_BEEP; reg_temp = NCT6775_REG_TEMP; + reg_temp_mon = NCT6775_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON); reg_temp_over = NCT6775_REG_TEMP_OVER; reg_temp_hyst = NCT6775_REG_TEMP_HYST; reg_temp_config = NCT6775_REG_TEMP_CONFIG; @@ -3480,7 +3501,9 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_BEEP = NCT6776_REG_BEEP; reg_temp = NCT6775_REG_TEMP; + reg_temp_mon = NCT6775_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6775_REG_TEMP_MON); reg_temp_over = NCT6775_REG_TEMP_OVER; reg_temp_hyst = NCT6775_REG_TEMP_HYST; reg_temp_config = NCT6776_REG_TEMP_CONFIG; @@ -3554,7 +3577,9 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_BEEP = NCT6776_REG_BEEP; reg_temp = NCT6779_REG_TEMP; + reg_temp_mon = NCT6779_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON); reg_temp_over = NCT6779_REG_TEMP_OVER; reg_temp_hyst = NCT6779_REG_TEMP_HYST; reg_temp_config = NCT6779_REG_TEMP_CONFIG; @@ -3603,8 +3628,8 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_PWM[0] = NCT6775_REG_PWM; data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT; data->REG_PWM[2] = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_PWM[5] = NCT6775_REG_WEIGHT_DUTY_STEP; - data->REG_PWM[6] = NCT6776_REG_WEIGHT_DUTY_BASE; + data->REG_PWM[5] = NCT6791_REG_WEIGHT_DUTY_STEP; + data->REG_PWM[6] = NCT6791_REG_WEIGHT_DUTY_BASE; data->REG_PWM_READ = NCT6775_REG_PWM_READ; data->REG_PWM_MODE = NCT6776_REG_PWM_MODE; data->PWM_MODE_MASK = NCT6776_PWM_MODE_MASK; @@ -3620,15 +3645,17 @@ static int nct6775_probe(struct platform_device *pdev) data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET; data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE; data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL; - data->REG_WEIGHT_TEMP_SEL = NCT6775_REG_WEIGHT_TEMP_SEL; - data->REG_WEIGHT_TEMP[0] = NCT6775_REG_WEIGHT_TEMP_STEP; - data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL; - data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE; + data->REG_WEIGHT_TEMP_SEL = NCT6791_REG_WEIGHT_TEMP_SEL; + data->REG_WEIGHT_TEMP[0] = NCT6791_REG_WEIGHT_TEMP_STEP; + data->REG_WEIGHT_TEMP[1] = NCT6791_REG_WEIGHT_TEMP_STEP_TOL; + data->REG_WEIGHT_TEMP[2] = NCT6791_REG_WEIGHT_TEMP_BASE; data->REG_ALARM = NCT6791_REG_ALARM; data->REG_BEEP = NCT6776_REG_BEEP; reg_temp = NCT6779_REG_TEMP; + reg_temp_mon = NCT6779_REG_TEMP_MON; num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP); + num_reg_temp_mon = ARRAY_SIZE(NCT6779_REG_TEMP_MON); reg_temp_over = NCT6779_REG_TEMP_OVER; reg_temp_hyst = NCT6779_REG_TEMP_HYST; reg_temp_config = NCT6779_REG_TEMP_CONFIG; @@ -3729,6 +3756,50 @@ static int nct6775_probe(struct platform_device *pdev) s++; } + /* + * Repeat with temperatures used for fan control. + * This set of registers does not support limits. + */ + for (i = 0; i < num_reg_temp_mon; i++) { + if (reg_temp_mon[i] == 0) + continue; + + src = nct6775_read_value(data, data->REG_TEMP_SEL[i]) & 0x1f; + if (!src || (mask & (1 << src))) + continue; + + if (src >= data->temp_label_num || + !strlen(data->temp_label[src])) { + dev_info(dev, + "Invalid temperature source %d at index %d, source register 0x%x, temp register 0x%x\n", + src, i, data->REG_TEMP_SEL[i], + reg_temp_mon[i]); + continue; + } + + mask |= 1 << src; + + /* Use fixed index for SYSTIN(1), CPUTIN(2), AUXTIN(3) */ + if (src <= data->temp_fixed_num) { + if (data->have_temp & (1 << (src - 1))) + continue; + data->have_temp |= 1 << (src - 1); + data->have_temp_fixed |= 1 << (src - 1); + data->reg_temp[0][src - 1] = reg_temp_mon[i]; + data->temp_src[src - 1] = src; + continue; + } + + if (s >= NUM_TEMP) + continue; + + /* Use dynamic index for other sources */ + data->have_temp |= 1 << s; + data->reg_temp[0][s] = reg_temp_mon[i]; + data->temp_src[s] = src; + s++; + } + #ifdef USE_ALTERNATE /* * Go through the list of alternate temp registers and enable diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 6df23502059a..6be57c38638d 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -22,6 +22,7 @@ #include <linux/socket.h> #include <linux/in.h> #include <linux/in6.h> +#include <linux/llist.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> #include <target/target_core_base.h> @@ -489,6 +490,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) kref_init(&isert_conn->conn_kref); kref_get(&isert_conn->conn_kref); mutex_init(&isert_conn->conn_mutex); + mutex_init(&isert_conn->conn_comp_mutex); spin_lock_init(&isert_conn->conn_lock); cma_id->context = isert_conn; @@ -843,14 +845,32 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn, } static void -isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr) +isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, + struct ib_send_wr *send_wr, bool coalesce) { + struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; + isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; send_wr->opcode = IB_WR_SEND; - send_wr->send_flags = IB_SEND_SIGNALED; - send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0]; + send_wr->sg_list = &tx_desc->tx_sg[0]; send_wr->num_sge = isert_cmd->tx_desc.num_sge; + /* + * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED + * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. + */ + mutex_lock(&isert_conn->conn_comp_mutex); + if (coalesce && + ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { + llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); + mutex_unlock(&isert_conn->conn_comp_mutex); + return; + } + isert_conn->conn_comp_batch = 0; + tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); + mutex_unlock(&isert_conn->conn_comp_mutex); + + send_wr->send_flags = IB_SEND_SIGNALED; } static int @@ -1582,8 +1602,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc, } static void -isert_send_completion(struct iser_tx_desc *tx_desc, - struct isert_conn *isert_conn) +__isert_send_completion(struct iser_tx_desc *tx_desc, + struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; struct isert_cmd *isert_cmd = tx_desc->isert_cmd; @@ -1624,6 +1644,24 @@ isert_send_completion(struct iser_tx_desc *tx_desc, } static void +isert_send_completion(struct iser_tx_desc *tx_desc, + struct isert_conn *isert_conn) +{ + struct llist_node *llnode = tx_desc->comp_llnode_batch; + struct iser_tx_desc *t; + /* + * Drain coalesced completion llist starting from comp_llnode_batch + * setup in isert_init_send_wr(), and then complete trailing tx_desc. + */ + while (llnode) { + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); + llnode = llist_next(llnode); + __isert_send_completion(t, isert_conn); + } + __isert_send_completion(tx_desc, isert_conn); +} + +static void isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; @@ -1793,7 +1831,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_cmd->tx_desc.num_sge = 2; } - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, true); pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1813,7 +1851,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn, &isert_cmd->tx_desc.iscsi_header, nopout_response); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1831,7 +1869,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1849,7 +1887,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1881,7 +1919,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn) tx_dsg->lkey = isert_conn->conn_mr->lkey; isert_cmd->tx_desc.num_sge = 2; - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1921,7 +1959,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) tx_dsg->lkey = isert_conn->conn_mr->lkey; isert_cmd->tx_desc.num_sge = 2; } - isert_init_send_wr(isert_cmd, send_wr); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1991,8 +2029,6 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { data_left = se_cmd->data_length; - iscsit_increment_maxcmdsn(cmd, conn->sess); - cmd->stat_sn = conn->stat_sn++; } else { sg_off = cmd->write_data_done / PAGE_SIZE; data_left = se_cmd->data_length - cmd->write_data_done; @@ -2204,8 +2240,6 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { data_left = se_cmd->data_length; - iscsit_increment_maxcmdsn(cmd, conn->sess); - cmd->stat_sn = conn->stat_sn++; } else { sg_off = cmd->write_data_done / PAGE_SIZE; data_left = se_cmd->data_length - cmd->write_data_done; @@ -2259,18 +2293,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, data_len = min(data_left, rdma_write_max); wr->cur_rdma_length = data_len; - spin_lock_irqsave(&isert_conn->conn_lock, flags); - fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, - struct fast_reg_descriptor, list); - list_del(&fr_desc->list); - spin_unlock_irqrestore(&isert_conn->conn_lock, flags); - wr->fr_desc = fr_desc; + /* if there is a single dma entry, dma mr is sufficient */ + if (count == 1) { + ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]); + ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]); + ib_sge->lkey = isert_conn->conn_mr->lkey; + wr->fr_desc = NULL; + } else { + spin_lock_irqsave(&isert_conn->conn_lock, flags); + fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, + struct fast_reg_descriptor, list); + list_del(&fr_desc->list); + spin_unlock_irqrestore(&isert_conn->conn_lock, flags); + wr->fr_desc = fr_desc; - ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, - ib_sge, offset, data_len); - if (ret) { - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); - goto unmap_sg; + ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, + ib_sge, offset, data_len); + if (ret) { + list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); + goto unmap_sg; + } } return 0; @@ -2306,10 +2348,11 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) * Build isert_conn->tx_desc for iSCSI response PDU and attach */ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); - iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *) + iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *) &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); - isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr); + isert_init_send_wr(isert_conn, isert_cmd, + &isert_cmd->tx_desc.send_wr, true); atomic_inc(&isert_conn->post_send_buf_count); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 631f2090f0b8..691f90ff2d83 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -43,6 +43,8 @@ struct iser_tx_desc { struct ib_sge tx_sg[2]; int num_sge; struct isert_cmd *isert_cmd; + struct llist_node *comp_llnode_batch; + struct llist_node comp_llnode; struct ib_send_wr send_wr; } __packed; @@ -121,6 +123,10 @@ struct isert_conn { int conn_frwr_pool_size; /* lock to protect frwr_pool */ spinlock_t conn_lock; +#define ISERT_COMP_BATCH_COUNT 8 + int conn_comp_batch; + struct llist_head conn_comp_llist; + struct mutex conn_comp_mutex; }; #define ISERT_MAX_CQ 64 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 6c923c7039a1..520a7e5a490b 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1352,11 +1352,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) /* XXX(hch): this is a horrible layering violation.. */ spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); - ioctx->cmd.transport_state |= CMD_T_LUN_STOP; ioctx->cmd.transport_state &= ~CMD_T_ACTIVE; spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); - - complete(&ioctx->cmd.transport_lun_stop_comp); break; case SRPT_STATE_CMD_RSP_SENT: /* @@ -1364,9 +1361,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) * not been received in time. */ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); - spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); - ioctx->cmd.transport_state |= CMD_T_LUN_STOP; - spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); break; case SRPT_STATE_MGMT_RSP_SENT: @@ -1476,7 +1470,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, { struct se_cmd *cmd; enum srpt_command_state state; - unsigned long flags; cmd = &ioctx->cmd; state = srpt_get_cmd_state(ioctx); @@ -1496,9 +1489,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, __func__, __LINE__, state); break; case SRPT_RDMA_WRITE_LAST: - spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); - ioctx->cmd.transport_state |= CMD_T_LUN_STOP; - spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); break; default: printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__, diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index e47dcb9d1e91..5cefb479c707 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -117,7 +117,6 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, { struct sk_buff *skb; struct sock *sk = sock->sk; - struct sockaddr_mISDN *maddr; int copied, err; @@ -135,9 +134,9 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (!skb) return err; - if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { - msg->msg_namelen = sizeof(struct sockaddr_mISDN); - maddr = (struct sockaddr_mISDN *)msg->msg_name; + if (msg->msg_name) { + struct sockaddr_mISDN *maddr = msg->msg_name; + maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; if ((sk->sk_protocol == ISDN_P_LAPD_TE) || @@ -150,11 +149,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock, maddr->sapi = _pms(sk)->ch.addr & 0xFF; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; } - } else { - if (msg->msg_namelen) - printk(KERN_WARNING "%s: too small namelen %d\n", - __func__, msg->msg_namelen); - msg->msg_namelen = 0; + msg->msg_namelen = sizeof(*maddr); } copied = skb->len + MISDN_HEADER_LEN; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 74630e94fa3b..d6447b3f7409 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -697,7 +697,7 @@ static int genphy_config_advert(struct phy_device *phydev) * to the values in phydev. Assumes that the values are valid. * Please see phy_sanitize_settings(). */ -static int genphy_setup_forced(struct phy_device *phydev) +int genphy_setup_forced(struct phy_device *phydev) { int err; int ctl = 0; @@ -716,7 +716,7 @@ static int genphy_setup_forced(struct phy_device *phydev) return err; } - +EXPORT_SYMBOL(genphy_setup_forced); /** * genphy_restart_aneg - Enable and Restart Autonegotiation diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 69b482bce7d2..508e4359338b 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -3,7 +3,7 @@ * * Author: Kriston Carson * - * Copyright (c) 2005, 2009 Freescale Semiconductor, Inc. + * Copyright (c) 2005, 2009, 2011 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -18,6 +18,11 @@ #include <linux/ethtool.h> #include <linux/phy.h> +/* Vitesse Extended Page Magic Register(s) */ +#define MII_VSC82X4_EXT_PAGE_16E 0x10 +#define MII_VSC82X4_EXT_PAGE_17E 0x11 +#define MII_VSC82X4_EXT_PAGE_18E 0x12 + /* Vitesse Extended Control Register 1 */ #define MII_VSC8244_EXT_CON1 0x17 #define MII_VSC8244_EXTCON1_INIT 0x0000 @@ -54,7 +59,13 @@ #define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */ #define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004 +/* Vitesse Extended Page Access Register */ +#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f + +#define PHY_ID_VSC8234 0x000fc620 #define PHY_ID_VSC8244 0x000fc6c0 +#define PHY_ID_VSC8574 0x000704a0 +#define PHY_ID_VSC8662 0x00070660 #define PHY_ID_VSC8221 0x000fc550 #define PHY_ID_VSC8211 0x000fc4b0 @@ -118,7 +129,9 @@ static int vsc82xx_config_intr(struct phy_device *phydev) if (phydev->interrupts == PHY_INTERRUPT_ENABLED) err = phy_write(phydev, MII_VSC8244_IMASK, - phydev->drv->phy_id == PHY_ID_VSC8244 ? + (phydev->drv->phy_id == PHY_ID_VSC8234 || + phydev->drv->phy_id == PHY_ID_VSC8244 || + phydev->drv->phy_id == PHY_ID_VSC8574) ? MII_VSC8244_IMASK_MASK : MII_VSC8221_IMASK_MASK); else { @@ -149,21 +162,114 @@ static int vsc8221_config_init(struct phy_device *phydev) */ } -/* Vitesse 824x */ +/* vsc82x4_config_autocross_enable - Enable auto MDI/MDI-X for forced links + * @phydev: target phy_device struct + * + * Enable auto MDI/MDI-X when in 10/100 forced link speeds by writing + * special values in the VSC8234/VSC8244 extended reserved registers + */ +static int vsc82x4_config_autocross_enable(struct phy_device *phydev) +{ + int ret; + + if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed > SPEED_100) + return 0; + + /* map extended registers set 0x10 - 0x1e */ + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x52b5); + if (ret >= 0) + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_18E, 0x0012); + if (ret >= 0) + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_17E, 0x2803); + if (ret >= 0) + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_16E, 0x87fa); + /* map standard registers set 0x10 - 0x1e */ + if (ret >= 0) + ret = phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x0000); + else + phy_write(phydev, MII_VSC82X4_EXT_PAGE_ACCESS, 0x0000); + + return ret; +} + +/* vsc82x4_config_aneg - restart auto-negotiation or write BMCR + * @phydev: target phy_device struct + * + * Description: If auto-negotiation is enabled, we configure the + * advertising, and then restart auto-negotiation. If it is not + * enabled, then we write the BMCR and also start the auto + * MDI/MDI-X feature + */ +static int vsc82x4_config_aneg(struct phy_device *phydev) +{ + int ret; + + /* Enable auto MDI/MDI-X when in 10/100 forced link speeds by + * writing special values in the VSC8234 extended reserved registers + */ + if (phydev->autoneg != AUTONEG_ENABLE && phydev->speed <= SPEED_100) { + ret = genphy_setup_forced(phydev); + + if (ret < 0) /* error */ + return ret; + + return vsc82x4_config_autocross_enable(phydev); + } + + return genphy_config_aneg(phydev); +} + +/* Vitesse 82xx */ static struct phy_driver vsc82xx_driver[] = { { + .phy_id = PHY_ID_VSC8234, + .name = "Vitesse VSC8234", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &vsc824x_config_init, + .config_aneg = &vsc82x4_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc824x_ack_interrupt, + .config_intr = &vsc82xx_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { .phy_id = PHY_ID_VSC8244, .name = "Vitesse VSC8244", .phy_id_mask = 0x000fffc0, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, - .config_aneg = &genphy_config_aneg, + .config_aneg = &vsc82x4_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, .driver = { .owner = THIS_MODULE,}, }, { + .phy_id = PHY_ID_VSC8574, + .name = "Vitesse VSC8574", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &vsc824x_config_init, + .config_aneg = &vsc82x4_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc824x_ack_interrupt, + .config_intr = &vsc82xx_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { + .phy_id = PHY_ID_VSC8662, + .name = "Vitesse VSC8662", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &vsc824x_config_init, + .config_aneg = &vsc82x4_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc824x_ack_interrupt, + .config_intr = &vsc82xx_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { /* Vitesse 8221 */ .phy_id = PHY_ID_VSC8221, .phy_id_mask = 0x000ffff0, @@ -207,7 +313,10 @@ module_init(vsc82xx_init); module_exit(vsc82xx_exit); static struct mdio_device_id __maybe_unused vitesse_tbl[] = { + { PHY_ID_VSC8234, 0x000ffff0 }, { PHY_ID_VSC8244, 0x000fffc0 }, + { PHY_ID_VSC8574, 0x000ffff0 }, + { PHY_ID_VSC8662, 0x000ffff0 }, { PHY_ID_VSC8221, 0x000ffff0 }, { PHY_ID_VSC8211, 0x000ffff0 }, { } diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 5f66e30d9823..82ee6ed954cb 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -979,8 +979,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, if (error < 0) goto end; - m->msg_namelen = 0; - if (skb) { total_len = min_t(size_t, total_len, skb->len); error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index f3fce412c0c1..51073721e224 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -24,7 +24,7 @@ #include <linux/ipv6.h> /* Version Information */ -#define DRIVER_VERSION "v1.01.0 (2013/08/12)" +#define DRIVER_VERSION "v1.02.0 (2013/10/28)" #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters" #define MODULENAME "r8152" @@ -307,22 +307,22 @@ enum rtl8152_flags { #define MCU_TYPE_USB 0x0000 struct rx_desc { - u32 opts1; + __le32 opts1; #define RX_LEN_MASK 0x7fff - u32 opts2; - u32 opts3; - u32 opts4; - u32 opts5; - u32 opts6; + __le32 opts2; + __le32 opts3; + __le32 opts4; + __le32 opts5; + __le32 opts6; }; struct tx_desc { - u32 opts1; + __le32 opts1; #define TX_FS (1 << 31) /* First segment of a packet */ #define TX_LS (1 << 30) /* Final segment of a packet */ #define TX_LEN_MASK 0x3ffff - u32 opts2; + __le32 opts2; #define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */ #define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */ #define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */ @@ -365,6 +365,7 @@ struct r8152 { struct mii_if_info mii; int intr_interval; u32 msg_enable; + u32 tx_qlen; u16 ocp_base; u8 *intr_buff; u8 version; @@ -876,7 +877,7 @@ static void write_bulk_callback(struct urb *urb) static void intr_callback(struct urb *urb) { struct r8152 *tp; - __u16 *d; + __le16 *d; int status = urb->status; int res; @@ -1136,14 +1137,14 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb) static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) { - u32 remain; + int remain; u8 *tx_data; tx_data = agg->head; agg->skb_num = agg->skb_len = 0; - remain = rx_buf_sz - sizeof(struct tx_desc); + remain = rx_buf_sz; - while (remain >= ETH_ZLEN) { + while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) { struct tx_desc *tx_desc; struct sk_buff *skb; unsigned int len; @@ -1152,12 +1153,14 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) if (!skb) break; + remain -= sizeof(*tx_desc); len = skb->len; if (remain < len) { skb_queue_head(&tp->tx_queue, skb); break; } + tx_data = tx_agg_align(tx_data); tx_desc = (struct tx_desc *)tx_data; tx_data += sizeof(*tx_desc); @@ -1167,11 +1170,18 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) agg->skb_len += len; dev_kfree_skb_any(skb); - tx_data = tx_agg_align(tx_data + len); - remain = rx_buf_sz - sizeof(*tx_desc) - - (u32)((void *)tx_data - agg->head); + tx_data += len; + remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); } + netif_tx_lock(tp->netdev); + + if (netif_queue_stopped(tp->netdev) && + skb_queue_len(&tp->tx_queue) < tp->tx_qlen) + netif_wake_queue(tp->netdev); + + netif_tx_unlock(tp->netdev); + usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), agg->head, (int)(tx_data - (u8 *)agg->head), (usb_complete_t)write_bulk_callback, agg); @@ -1188,7 +1198,6 @@ static void rx_bottom(struct r8152 *tp) list_for_each_safe(cursor, next, &tp->rx_done) { struct rx_desc *rx_desc; struct rx_agg *agg; - unsigned pkt_len; int len_used = 0; struct urb *urb; u8 *rx_data; @@ -1204,17 +1213,22 @@ static void rx_bottom(struct r8152 *tp) rx_desc = agg->head; rx_data = agg->head; - pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; - len_used += sizeof(struct rx_desc) + pkt_len; + len_used += sizeof(struct rx_desc); - while (urb->actual_length >= len_used) { + while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; struct net_device_stats *stats; + unsigned int pkt_len; struct sk_buff *skb; + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) break; + len_used += pkt_len; + if (urb->actual_length < len_used) + break; + stats = rtl8152_get_stats(netdev); pkt_len -= 4; /* CRC */ @@ -1234,9 +1248,8 @@ static void rx_bottom(struct r8152 *tp) rx_data = rx_agg_align(rx_data + pkt_len + 4); rx_desc = (struct rx_desc *)rx_data; - pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; len_used = (int)(rx_data - (u8 *)agg->head); - len_used += sizeof(struct rx_desc) + pkt_len; + len_used += sizeof(struct rx_desc); } submit: @@ -1384,53 +1397,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); - struct net_device_stats *stats = rtl8152_get_stats(netdev); - unsigned long flags; - struct tx_agg *agg = NULL; - struct tx_desc *tx_desc; - unsigned int len; - u8 *tx_data; - int res; skb_tx_timestamp(skb); - /* If tx_queue is not empty, it means at least one previous packt */ - /* is waiting for sending. Don't send current one before it. */ - if (skb_queue_empty(&tp->tx_queue)) - agg = r8152_get_tx_agg(tp); - - if (!agg) { - skb_queue_tail(&tp->tx_queue, skb); - return NETDEV_TX_OK; - } + skb_queue_tail(&tp->tx_queue, skb); - tx_desc = (struct tx_desc *)agg->head; - tx_data = agg->head + sizeof(*tx_desc); - agg->skb_num = agg->skb_len = 0; + if (list_empty(&tp->tx_free) && + skb_queue_len(&tp->tx_queue) > tp->tx_qlen) + netif_stop_queue(netdev); - len = skb->len; - r8152_tx_csum(tp, tx_desc, skb); - memcpy(tx_data, skb->data, len); - dev_kfree_skb_any(skb); - agg->skb_num++; - agg->skb_len += len; - usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), - agg->head, len + sizeof(*tx_desc), - (usb_complete_t)write_bulk_callback, agg); - res = usb_submit_urb(agg->urb, GFP_ATOMIC); - if (res) { - /* Can we get/handle EPIPE here? */ - if (res == -ENODEV) { - netif_device_detach(tp->netdev); - } else { - netif_warn(tp, tx_err, netdev, - "failed tx_urb %d\n", res); - stats->tx_dropped++; - spin_lock_irqsave(&tp->tx_lock, flags); - list_add_tail(&agg->list, &tp->tx_free); - spin_unlock_irqrestore(&tp->tx_lock, flags); - } - } + if (!list_empty(&tp->tx_free)) + tasklet_schedule(&tp->tl); return NETDEV_TX_OK; } @@ -1459,6 +1436,14 @@ static void rtl8152_nic_reset(struct r8152 *tp) } } +static void set_tx_qlen(struct r8152 *tp) +{ + struct net_device *netdev = tp->netdev; + + tp->tx_qlen = rx_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN + + sizeof(struct tx_desc)); +} + static inline u8 rtl8152_get_speed(struct r8152 *tp) { return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); @@ -1470,6 +1455,7 @@ static int rtl8152_enable(struct r8152 *tp) int i, ret; u8 speed; + set_tx_qlen(tp); speed = rtl8152_get_speed(tp); if (speed & _10bps) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 11f53589a3f3..d39b79f5e841 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -701,6 +701,54 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah, return ret; } +static void ar9003_doubler_fix(struct ath_hw *ah) +{ + if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9550(ah)) { + REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0); + REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0); + REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S, 0); + + udelay(200); + + REG_CLR_BIT(ah, AR_PHY_65NM_CH0_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK); + REG_CLR_BIT(ah, AR_PHY_65NM_CH1_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK); + REG_CLR_BIT(ah, AR_PHY_65NM_CH2_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK); + + udelay(1); + + REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1); + REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX2, + AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK, 1); + + udelay(200); + + REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH12, + AR_PHY_65NM_CH0_SYNTH12_VREFMUL3, 0xf); + + REG_RMW(ah, AR_PHY_65NM_CH0_RXTX2, 0, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S); + REG_RMW(ah, AR_PHY_65NM_CH1_RXTX2, 0, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S); + REG_RMW(ah, AR_PHY_65NM_CH2_RXTX2, 0, + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S | + 1 << AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S); + } +} + static int ar9003_hw_process_ini(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -726,6 +774,8 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, modesIndex); } + ar9003_doubler_fix(ah); + /* * RXGAIN initvals. */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index fca624322dc8..2af667beb273 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -656,13 +656,24 @@ #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x00000001 : 0x00000002) #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0 : 1) #define AR_PHY_65NM_CH0_SYNTH7 0x16098 +#define AR_PHY_65NM_CH0_SYNTH12 0x160ac #define AR_PHY_65NM_CH0_BIAS1 0x160c0 #define AR_PHY_65NM_CH0_BIAS2 0x160c4 #define AR_PHY_65NM_CH0_BIAS4 0x160cc +#define AR_PHY_65NM_CH0_RXTX2 0x16104 +#define AR_PHY_65NM_CH1_RXTX2 0x16504 +#define AR_PHY_65NM_CH2_RXTX2 0x16904 #define AR_PHY_65NM_CH0_RXTX4 0x1610c #define AR_PHY_65NM_CH1_RXTX4 0x1650c #define AR_PHY_65NM_CH2_RXTX4 0x1690c +#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3 0x00780000 +#define AR_PHY_65NM_CH0_SYNTH12_VREFMUL3_S 19 +#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK 0x00000004 +#define AR_PHY_65NM_CH0_RXTX2_SYNTHON_MASK_S 2 +#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK 0x00000008 +#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S 3 + #define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \ (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280))) #define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300) diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h index 4dbc294df7e3..57fc5f459d0a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h @@ -361,7 +361,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = { {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, - {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, + {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, @@ -400,7 +400,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = { {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, - {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, + {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa}, {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, }; @@ -472,7 +472,7 @@ static const u32 ar9462_2p1_radio_postamble[][5] = { static const u32 ar9462_2p1_soc_preamble[][2] = { /* Addr allmodes */ - {0x000040a4, 0x00a0c1c9}, + {0x000040a4, 0x00a0c9c9}, {0x00007020, 0x00000000}, {0x00007034, 0x00000002}, {0x00007038, 0x000004c2}, diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index c00687e05688..1217c52ab28e 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -362,7 +362,8 @@ static int __ath_reg_dyn_country(struct wiphy *wiphy, { u16 country_code; - if (!ath_is_world_regd(reg)) + if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && + !ath_is_world_regd(reg)) return -EINVAL; country_code = ath_regd_find_country_by_name(request->alpha2); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index 5b5b952d47b1..4a2293041821 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -823,6 +823,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg, } err = brcmf_p2p_escan(p2p, num_nodfs, chanspecs, search_state, action, P2PAPI_BSSCFG_DEVICE); + kfree(chanspecs); } exit: if (err) diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index fbad00a5abc8..aeaea0e3b4c4 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -2210,8 +2210,10 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->bss_started = 0; priv->bss_num = 0; - if (mwifiex_cfg80211_init_p2p_client(priv)) - return ERR_PTR(-EFAULT); + if (mwifiex_cfg80211_init_p2p_client(priv)) { + wdev = ERR_PTR(-EFAULT); + goto done; + } break; default: @@ -2224,7 +2226,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, if (!dev) { wiphy_err(wiphy, "no memory available for netdevice\n"); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-ENOMEM); + wdev = ERR_PTR(-ENOMEM); + goto done; } mwifiex_init_priv_params(priv, dev); @@ -2264,7 +2267,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, wiphy_err(wiphy, "cannot register virtual network device\n"); free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-EFAULT); + priv->netdev = NULL; + wdev = ERR_PTR(-EFAULT); + goto done; } sema_init(&priv->async_sem, 1); @@ -2274,6 +2279,13 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_init(priv); #endif + +done: + if (IS_ERR(wdev)) { + kfree(priv->wdev); + priv->wdev = NULL; + } + return wdev; } EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf); @@ -2298,7 +2310,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) unregister_netdevice(wdev->netdev); /* Clear the priv in adapter */ + priv->netdev->ieee80211_ptr = NULL; priv->netdev = NULL; + kfree(wdev); + priv->wdev = NULL; priv->media_connected = false; diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 9d7c9d354d34..78e8a6666cc6 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -411,13 +411,14 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) */ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) { - int ret, i; + int ret; char fmt[64]; struct mwifiex_private *priv; struct mwifiex_adapter *adapter = context; struct mwifiex_fw_image fw; struct semaphore *sem = adapter->card_sem; bool init_failed = false; + struct wireless_dev *wdev; if (!firmware) { dev_err(adapter->dev, @@ -469,14 +470,16 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) priv = adapter->priv[MWIFIEX_BSS_ROLE_STA]; if (mwifiex_register_cfg80211(adapter)) { dev_err(adapter->dev, "cannot register with cfg80211\n"); - goto err_register_cfg80211; + goto err_init_fw; } rtnl_lock(); /* Create station interface by default */ - if (!mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", - NL80211_IFTYPE_STATION, NULL, NULL)) { + wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", + NL80211_IFTYPE_STATION, NULL, NULL); + if (IS_ERR(wdev)) { dev_err(adapter->dev, "cannot create default STA interface\n"); + rtnl_unlock(); goto err_add_intf; } rtnl_unlock(); @@ -486,17 +489,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) goto done; err_add_intf: - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - - if (!priv) - continue; - - if (priv->wdev && priv->netdev) - mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev); - } - rtnl_unlock(); -err_register_cfg80211: wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); err_init_fw: @@ -1006,12 +998,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) wiphy_unregister(priv->wdev->wiphy); wiphy_free(priv->wdev->wiphy); - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - if (priv) - kfree(priv->wdev); - } - mwifiex_terminate_workqueue(adapter); /* Unregister device */ diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 33fa9432b241..03688aa14e8a 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -232,7 +232,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) } mwifiex_remove_card(card->adapter, &add_remove_card_sem); - kfree(card); } static void mwifiex_pcie_shutdown(struct pci_dev *pdev) @@ -2313,6 +2312,7 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) pci_release_region(pdev, 0); pci_set_drvdata(pdev, NULL); } + kfree(card); } /* diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 9bf8898743ab..b44a31523461 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -196,7 +196,6 @@ mwifiex_sdio_remove(struct sdio_func *func) } mwifiex_remove_card(card->adapter, &add_remove_card_sem); - kfree(card); } /* @@ -1745,7 +1744,6 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter) sdio_claim_host(card->func); sdio_disable_func(card->func); sdio_release_host(card->func); - sdio_set_drvdata(card->func, NULL); } } @@ -1773,7 +1771,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) return ret; } - sdio_set_drvdata(func, card); adapter->dev = &func->dev; @@ -1801,6 +1798,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) int ret; u8 sdio_ireg; + sdio_set_drvdata(card->func, card); + /* * Read the HOST_INT_STATUS_REG for ACK the first interrupt got * from the bootloader. If we don't do this we get a interrupt @@ -1883,6 +1882,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter) kfree(card->mpa_rx.len_arr); kfree(card->mpa_tx.buf); kfree(card->mpa_rx.buf); + sdio_set_drvdata(card->func, NULL); + kfree(card); } /* diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 1c70b8d09227..edf5b7a24900 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -350,7 +350,6 @@ static int mwifiex_usb_probe(struct usb_interface *intf, card->udev = udev; card->intf = intf; - usb_card = card; pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n", udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass, @@ -525,25 +524,28 @@ static int mwifiex_usb_resume(struct usb_interface *intf) static void mwifiex_usb_disconnect(struct usb_interface *intf) { struct usb_card_rec *card = usb_get_intfdata(intf); - struct mwifiex_adapter *adapter; - if (!card || !card->adapter) { - pr_err("%s: card or card->adapter is NULL\n", __func__); + if (!card) { + pr_err("%s: card is NULL\n", __func__); return; } - adapter = card->adapter; - if (!adapter->priv_num) - return; - mwifiex_usb_free(card); - dev_dbg(adapter->dev, "%s: removing card\n", __func__); - mwifiex_remove_card(adapter, &add_remove_card_sem); + if (card->adapter) { + struct mwifiex_adapter *adapter = card->adapter; + + if (!adapter->priv_num) + return; + + dev_dbg(adapter->dev, "%s: removing card\n", __func__); + mwifiex_remove_card(adapter, &add_remove_card_sem); + } usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); kfree(card); + usb_card = NULL; return; } @@ -754,6 +756,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) card->adapter = adapter; adapter->dev = &card->udev->dev; strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME); + usb_card = card; return 0; } @@ -762,7 +765,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; - usb_set_intfdata(card->intf, NULL); + card->adapter = NULL; } static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, @@ -1004,7 +1007,7 @@ static void mwifiex_usb_cleanup_module(void) if (!down_interruptible(&add_remove_card_sem)) up(&add_remove_card_sem); - if (usb_card) { + if (usb_card && usb_card->adapter) { struct mwifiex_adapter *adapter = usb_card->adapter; int i; diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 080b1fcae5fa..9dd92a700442 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -181,6 +181,7 @@ static void rt2x00lib_autowakeup(struct work_struct *work) static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { + struct ieee80211_tx_control control = {}; struct rt2x00_dev *rt2x00dev = data; struct sk_buff *skb; @@ -195,7 +196,7 @@ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac, */ skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); while (skb) { - rt2x00mac_tx(rt2x00dev->hw, NULL, skb); + rt2x00mac_tx(rt2x00dev->hw, &control, skb); skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); } } diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 393685390f3e..e26312fb4356 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -769,7 +769,7 @@ static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw, static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, struct rtl_stats *pstats, - struct rx_desc_92c *pdesc, + struct rx_desc_92c *p_desc, struct rx_fwinfo_92c *p_drvinfo, bool packet_match_bssid, bool packet_toself, @@ -784,11 +784,11 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, u32 rssi, total_rssi = 0; bool in_powersavemode = false; bool is_cck_rate; + u8 *pdesc = (u8 *)p_desc; - is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); + is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc); pstats->packet_matchbssid = packet_match_bssid; pstats->packet_toself = packet_toself; - pstats->is_cck = is_cck_rate; pstats->packet_beacon = packet_beacon; pstats->is_cck = is_cck_rate; pstats->RX_SIGQ[0] = -1; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c index b0c346a9e4b8..1bc21ccfa71b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -303,10 +303,10 @@ out: bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, - u8 *p_desc, struct sk_buff *skb) + u8 *pdesc, struct sk_buff *skb) { struct rx_fwinfo_92c *p_drvinfo; - struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; + struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc; u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc); stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); @@ -345,7 +345,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, if (phystatus) { p_drvinfo = (struct rx_fwinfo_92c *)(skb->data + stats->rx_bufshift); - rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc, + rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc, p_drvinfo); } /*rx_status->qual = stats->signal; */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index b78ee10a956a..2329cccf1fa6 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -461,6 +461,9 @@ void xenvif_disconnect(struct xenvif *vif) if (netif_carrier_ok(vif->dev)) xenvif_carrier_off(vif); + if (vif->task) + kthread_stop(vif->task); + if (vif->tx_irq) { if (vif->tx_irq == vif->rx_irq) unbind_from_irqhandler(vif->tx_irq, vif); @@ -471,9 +474,6 @@ void xenvif_disconnect(struct xenvif *vif) vif->tx_irq = 0; } - if (vif->task) - kthread_stop(vif->task); - xenvif_unmap_frontend_rings(vif); } diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 95655d7c0d0b..e52d7ffa38b9 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -410,7 +410,7 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid); * Otherwise is returns a bitmask with supported features. Current * features reported are: * PCI_PASID_CAP_EXEC - Execute permission supported - * PCI_PASID_CAP_PRIV - Priviledged mode supported + * PCI_PASID_CAP_PRIV - Privileged mode supported */ int pci_pasid_features(struct pci_dev *pdev) { diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 7c4f38dd42ba..0afbbbc55c81 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -249,7 +249,7 @@ struct tegra_pcie { void __iomem *afi; int irq; - struct list_head busses; + struct list_head buses; struct resource *cs; struct resource io; @@ -399,14 +399,14 @@ free: /* * Look up a virtual address mapping for the specified bus number. If no such - * mapping existis, try to create one. + * mapping exists, try to create one. */ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie, unsigned int busnr) { struct tegra_pcie_bus *bus; - list_for_each_entry(bus, &pcie->busses, list) + list_for_each_entry(bus, &pcie->buses, list) if (bus->nr == busnr) return (void __iomem *)bus->area->addr; @@ -414,7 +414,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie, if (IS_ERR(bus)) return NULL; - list_add_tail(&bus->list, &pcie->busses); + list_add_tail(&bus->list, &pcie->buses); return (void __iomem *)bus->area->addr; } @@ -808,7 +808,7 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; afi_writel(pcie, value, AFI_FUSE); - /* initialze internal PHY, enable up to 16 PCIE lanes */ + /* initialize internal PHY, enable up to 16 PCIE lanes */ pads_writel(pcie, 0x0, PADS_CTL_SEL); /* override IDDQ to 1 on all 4 lanes */ @@ -1624,7 +1624,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) if (!pcie) return -ENOMEM; - INIT_LIST_HEAD(&pcie->busses); + INIT_LIST_HEAD(&pcie->buses); INIT_LIST_HEAD(&pcie->ports); pcie->soc_data = match->data; pcie->dev = &pdev->dev; diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 1e1fea4d959b..e33b68be0391 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -197,7 +197,7 @@ static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0) return -ENOSPC; /* * Check if this position is at correct offset.nvec is always a - * power of two. pos0 must be nvec bit alligned. + * power of two. pos0 must be nvec bit aligned. */ if (pos % msgvec) pos += msgvec - (pos % msgvec); diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig index 0a648af89531..df8caec59789 100644 --- a/drivers/pci/hotplug/Kconfig +++ b/drivers/pci/hotplug/Kconfig @@ -133,8 +133,8 @@ config HOTPLUG_PCI_RPA_DLPAR To compile this driver as a module, choose M here: the module will be called rpadlpar_io. - - When in doubt, say N. + + When in doubt, say N. config HOTPLUG_PCI_SGI tristate "SGI PCI Hotplug Support" diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 47ec8c80e16d..3e6532b945c1 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile @@ -31,7 +31,7 @@ pci_hotplug-objs += cpci_hotplug_core.o \ cpci_hotplug_pci.o endif ifdef CONFIG_ACPI -pci_hotplug-objs += acpi_pcihp.o +pci_hotplug-objs += acpi_pcihp.o endif cpqphp-objs := cpqphp_core.o \ diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 8650d39db392..dca66bc44578 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -111,7 +111,7 @@ int acpiphp_register_attention(struct acpiphp_attention_info *info) * @info: must match the pointer used to register * * Description: This is used to un-register a hardware specific acpi - * driver that manipulates the attention LED. The pointer to the + * driver that manipulates the attention LED. The pointer to the * info struct must be the same as the one used to set it. */ int acpiphp_unregister_attention(struct acpiphp_attention_info *info) @@ -169,8 +169,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) * was registered with us. This allows hardware specific * ACPI implementations to blink the light for us. */ - static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) - { +static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) +{ int retval = -ENODEV; pr_debug("%s - physical_slot = %s\n", __func__, @@ -182,8 +182,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) } else attention_info = NULL; return retval; - } - +} + /** * get_power_status - get power status of a slot @@ -323,7 +323,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot, if (retval) { pr_err("pci_hp_register failed with error %d\n", retval); goto error_hpslot; - } + } pr_info("Slot [%s] registered\n", slot_name(slot)); diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 5b4e9eb0e8ff..1cf605f67673 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -325,7 +325,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data, list_add_tail(&slot->node, &bridge->slots); - /* Register slots for ejectable funtions only. */ + /* Register slots for ejectable functions only. */ if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) { unsigned long long sun; int retval; diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 0d64c414bf78..ecfac7e72d91 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -116,7 +116,7 @@ static struct bin_attribute ibm_apci_table_attr = { .read = ibm_read_apci_table, .write = NULL, }; -static struct acpiphp_attention_info ibm_attention_info = +static struct acpiphp_attention_info ibm_attention_info = { .set_attn = ibm_set_attention_status, .get_attn = ibm_get_attention_status, @@ -171,9 +171,9 @@ ibm_slot_done: */ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) { - union acpi_object args[2]; + union acpi_object args[2]; struct acpi_object_list params = { .pointer = args, .count = 2 }; - acpi_status stat; + acpi_status stat; unsigned long long rc; union apci_descriptor *ibm_slot; @@ -208,7 +208,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) * * Description: This method is registered with the acpiphp module as a * callback to do the device specific task of getting the LED status. - * + * * Because there is no direct method of getting the LED status directly * from an ACPI call, we read the aPCI table and parse out our * slot descriptor to read the status from that. @@ -259,7 +259,7 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context) pr_debug("%s: Received notification %02x\n", __func__, event); if (subevent == 0x80) { - pr_debug("%s: generationg bus event\n", __func__); + pr_debug("%s: generating bus event\n", __func__); acpi_bus_generate_netlink_event(note->device->pnp.device_class, dev_name(¬e->device->dev), note->event, detail); @@ -387,7 +387,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_handle *phandle = (acpi_handle *)context; - acpi_status status; + acpi_status status; struct acpi_device_info *info; int retval = 0; @@ -405,7 +405,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, info->hardware_id.string, handle); *phandle = handle; /* returning non-zero causes the search to stop - * and returns this value to the caller of + * and returns this value to the caller of * acpi_walk_namespace, but it also causes some warnings * in the acpi debug code to print... */ diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index 2b4c412f94c3..00c81a3cefc9 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -46,7 +46,7 @@ do { \ if (cpci_debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index d8add34177f2..d3add9819f63 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -39,7 +39,7 @@ extern int cpci_debug; do { \ if (cpci_debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c index a6a71c41cdf8..7536eef620b0 100644 --- a/drivers/pci/hotplug/cpcihp_generic.c +++ b/drivers/pci/hotplug/cpcihp_generic.c @@ -13,14 +13,14 @@ * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along @@ -53,9 +53,9 @@ #define dbg(format, arg...) \ do { \ - if(debug) \ + if (debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while(0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c index 449b4bbc8301..e8c4a7ccf578 100644 --- a/drivers/pci/hotplug/cpcihp_zt5550.c +++ b/drivers/pci/hotplug/cpcihp_zt5550.c @@ -13,14 +13,14 @@ * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along @@ -48,9 +48,9 @@ #define dbg(format, arg...) \ do { \ - if(debug) \ + if (debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while(0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) @@ -285,7 +285,7 @@ static struct pci_device_id zt5550_hc_pci_tbl[] = { { 0, } }; MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl); - + static struct pci_driver zt5550_hc_driver = { .name = "zt5550_hc", .id_table = zt5550_hc_pci_tbl, diff --git a/drivers/pci/hotplug/cpcihp_zt5550.h b/drivers/pci/hotplug/cpcihp_zt5550.h index bebc6060a558..9a57fda5348c 100644 --- a/drivers/pci/hotplug/cpcihp_zt5550.h +++ b/drivers/pci/hotplug/cpcihp_zt5550.h @@ -13,14 +13,14 @@ * option) any later version. * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along @@ -55,7 +55,7 @@ #define HC_CMD_REG 0x0C #define ARB_CONFIG_GNT_REG 0x10 #define ARB_CONFIG_CFG_REG 0x12 -#define ARB_CONFIG_REG 0x10 +#define ARB_CONFIG_REG 0x10 #define ISOL_CONFIG_REG 0x18 #define FAULT_STATUS_REG 0x20 #define FAULT_CONFIG_REG 0x24 diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index c8eaeb43fa5d..31273e155e6c 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -862,10 +862,10 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_device; } - /* Check for the proper subsystem ID's + /* Check for the proper subsystem IDs * Intel uses a different SSID programming model than Compaq. * For Intel, each SSID bit identifies a PHP capability. - * Also Intel HPC's may have RID=0. + * Also Intel HPCs may have RID=0. */ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) { err(msg_HPC_not_supported); diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index d282019cda5f..11845b796799 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -1231,7 +1231,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ /* Only if mode change...*/ if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) || - ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) + ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz))) set_SOGO(ctrl); wait_for_ctrl_irq(ctrl); @@ -1828,7 +1828,7 @@ static void interrupt_event_handler(struct controller *ctrl) if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) { dbg("button pressed\n"); - } else if (ctrl->event_queue[loop].event_type == + } else if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) { dbg("button cancel\n"); del_timer(&p_slot->task_event); @@ -2411,11 +2411,11 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func if (rc) return rc; - /* find range of busses to use */ + /* find range of buses to use */ dbg("find ranges of buses to use\n"); bus_node = get_max_resource(&(resources->bus_head), 1); - /* If we don't have any busses to allocate, we can't continue */ + /* If we don't have any buses to allocate, we can't continue */ if (!bus_node) return -ENOMEM; @@ -2900,7 +2900,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func /* If this function needs an interrupt and we are behind * a bridge and the pin is tied to something that's - * alread mapped, set this one the same */ + * already mapped, set this one the same */ if (temp_byte && resources->irqs && (resources->irqs->valid_INT & (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) { diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 09801c6945ce..6e4a12c91adb 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c @@ -291,7 +291,7 @@ int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 s * * Reads configuration for all slots in a PCI bus and saves info. * - * Note: For non-hot plug busses, the slot # saved is the device # + * Note: For non-hot plug buses, the slot # saved is the device # * * returns 0 if success */ @@ -455,7 +455,7 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug) * cpqhp_save_slot_config * * Saves configuration info for all PCI devices in a given slot - * including subordinate busses. + * including subordinate buses. * * returns 0 if success */ @@ -1556,4 +1556,3 @@ void cpqhp_destroy_board_resources (struct pci_func * func) kfree(tres); } } - diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h index 8c5b25871d02..e3e46a7b3ee7 100644 --- a/drivers/pci/hotplug/ibmphp.h +++ b/drivers/pci/hotplug/ibmphp.h @@ -59,7 +59,7 @@ extern int ibmphp_debug; /************************************************************ -* RESOURE TYPE * +* RESOURCE TYPE * ************************************************************/ #define EBDA_RSRC_TYPE_MASK 0x03 @@ -103,7 +103,7 @@ extern int ibmphp_debug; //-------------------------------------------------------------- struct rio_table_hdr { - u8 ver_num; + u8 ver_num; u8 scal_count; u8 riodev_count; u16 offset; @@ -127,7 +127,7 @@ struct scal_detail { }; //-------------------------------------------------------------- -// RIO DETAIL +// RIO DETAIL //-------------------------------------------------------------- struct rio_detail { @@ -152,7 +152,7 @@ struct opt_rio { u8 first_slot_num; u8 middle_num; struct list_head opt_rio_list; -}; +}; struct opt_rio_lo { u8 rio_type; @@ -161,7 +161,7 @@ struct opt_rio_lo { u8 middle_num; u8 pack_count; struct list_head opt_rio_lo_list; -}; +}; /**************************************************************** * HPC DESCRIPTOR NODE * @@ -574,7 +574,7 @@ void ibmphp_hpc_stop_poll_thread(void); #define HPC_CTLR_IRQ_PENDG 0x80 //---------------------------------------------------------------------------- -// HPC_CTLR_WROKING status return codes +// HPC_CTLR_WORKING status return codes //---------------------------------------------------------------------------- #define HPC_CTLR_WORKING_NO 0x00 #define HPC_CTLR_WORKING_YES 0x01 diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index cbd72d81d253..efdc13adbe41 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -58,7 +58,7 @@ MODULE_DESCRIPTION (DRIVER_DESC); struct pci_bus *ibmphp_pci_bus; static int max_slots; -static int irqs[16]; /* PIC mode IRQ's we're using so far (in case MPS +static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS * tables don't provide default info for empty slots */ static int init_flag; @@ -71,20 +71,20 @@ static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value) return get_max_adapter_speed_1 (hs, value, 1); } */ -static inline int get_cur_bus_info(struct slot **sl) +static inline int get_cur_bus_info(struct slot **sl) { int rc = 1; struct slot * slot_cur = *sl; debug("options = %x\n", slot_cur->ctrl->options); - debug("revision = %x\n", slot_cur->ctrl->revision); + debug("revision = %x\n", slot_cur->ctrl->revision); - if (READ_BUS_STATUS(slot_cur->ctrl)) + if (READ_BUS_STATUS(slot_cur->ctrl)) rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL); - - if (rc) + + if (rc) return rc; - + slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus); if (READ_BUS_MODE(slot_cur->ctrl)) slot_cur->bus_on->current_bus_mode = @@ -96,7 +96,7 @@ static inline int get_cur_bus_info(struct slot **sl) slot_cur->busstatus, slot_cur->bus_on->current_speed, slot_cur->bus_on->current_bus_mode); - + *sl = slot_cur; return 0; } @@ -104,8 +104,8 @@ static inline int get_cur_bus_info(struct slot **sl) static inline int slot_update(struct slot **sl) { int rc; - rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL); - if (rc) + rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL); + if (rc) return rc; if (!init_flag) rc = get_cur_bus_info(sl); @@ -172,7 +172,7 @@ int ibmphp_init_devno(struct slot **cur_slot) debug("(*cur_slot)->irq[3] = %x\n", (*cur_slot)->irq[3]); - debug("rtable->exlusive_irqs = %x\n", + debug("rtable->exclusive_irqs = %x\n", rtable->exclusive_irqs); debug("rtable->slots[loop].irq[0].bitmap = %x\n", rtable->slots[loop].irq[0].bitmap); @@ -271,7 +271,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value) else rc = -ENODEV; } - } else + } else rc = -ENODEV; ibmphp_unlock_operations(); @@ -288,7 +288,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value) debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); - + ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; @@ -406,14 +406,14 @@ static int get_max_bus_speed(struct slot *slot) ibmphp_lock_operations(); mode = slot->supported_bus_mode; - speed = slot->supported_speed; + speed = slot->supported_speed; ibmphp_unlock_operations(); switch (speed) { case BUS_SPEED_33: break; case BUS_SPEED_66: - if (mode == BUS_MODE_PCIX) + if (mode == BUS_MODE_PCIX) speed += 0x01; break; case BUS_SPEED_100: @@ -515,13 +515,13 @@ static int __init init_ops(void) debug("BEFORE GETTING SLOT STATUS, slot # %x\n", slot_cur->number); - if (slot_cur->ctrl->revision == 0xFF) + if (slot_cur->ctrl->revision == 0xFF) if (get_ctrl_revision(slot_cur, &slot_cur->ctrl->revision)) return -1; - if (slot_cur->bus_on->current_speed == 0xFF) - if (get_cur_bus_info(&slot_cur)) + if (slot_cur->bus_on->current_speed == 0xFF) + if (get_cur_bus_info(&slot_cur)) return -1; get_max_bus_speed(slot_cur); @@ -539,8 +539,8 @@ static int __init init_ops(void) debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status)); debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status)); - if ((SLOT_PWRGD(slot_cur->status)) && - !(SLOT_PRESENT(slot_cur->status)) && + if ((SLOT_PWRGD(slot_cur->status)) && + !(SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) { debug("BEFORE POWER OFF COMMAND\n"); rc = power_off(slot_cur); @@ -581,13 +581,13 @@ static int validate(struct slot *slot_cur, int opn) switch (opn) { case ENABLE: - if (!(SLOT_PWRGD(slot_cur->status)) && - (SLOT_PRESENT(slot_cur->status)) && + if (!(SLOT_PWRGD(slot_cur->status)) && + (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; break; case DISABLE: - if ((SLOT_PWRGD(slot_cur->status)) && + if ((SLOT_PWRGD(slot_cur->status)) && (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; @@ -617,7 +617,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur) err("out of system memory\n"); return -ENOMEM; } - + info->power_status = SLOT_PWRGD(slot_cur->status); info->attention_status = SLOT_ATTN(slot_cur->status, slot_cur->ext_status); @@ -638,7 +638,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur) case BUS_SPEED_33: break; case BUS_SPEED_66: - if (mode == BUS_MODE_PCIX) + if (mode == BUS_MODE_PCIX) bus_speed += 0x01; else if (mode == BUS_MODE_PCI) ; @@ -654,8 +654,8 @@ int ibmphp_update_slot_info(struct slot *slot_cur) } bus->cur_bus_speed = bus_speed; - // To do: bus_names - + // To do: bus_names + rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); kfree(info); return rc; @@ -729,8 +729,8 @@ static void ibm_unconfigure_device(struct pci_func *func) } /* - * The following function is to fix kernel bug regarding - * getting bus entries, here we manually add those primary + * The following function is to fix kernel bug regarding + * getting bus entries, here we manually add those primary * bus entries to kernel bus structure whenever apply */ static u8 bus_structure_fixup(u8 busno) @@ -814,7 +814,7 @@ static int ibm_configure_device(struct pci_func *func) } /******************************************************* - * Returns whether the bus is empty or not + * Returns whether the bus is empty or not *******************************************************/ static int is_bus_empty(struct slot * slot_cur) { @@ -842,7 +842,7 @@ static int is_bus_empty(struct slot * slot_cur) } /*********************************************************** - * If the HPC permits and the bus currently empty, tries to set the + * If the HPC permits and the bus currently empty, tries to set the * bus speed and mode at the maximum card and bus capability * Parameters: slot * Returns: bus is set (0) or error code @@ -856,7 +856,7 @@ static int set_bus(struct slot * slot_cur) static struct pci_device_id ciobx[] = { { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) }, { }, - }; + }; debug("%s - entry slot # %d\n", __func__, slot_cur->number); if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) { @@ -877,7 +877,7 @@ static int set_bus(struct slot * slot_cur) else if (!SLOT_BUS_MODE(slot_cur->ext_status)) /* if max slot/bus capability is 66 pci and there's no bus mode mismatch, then - the adapter supports 66 pci */ + the adapter supports 66 pci */ cmd = HPC_BUS_66CONVMODE; else cmd = HPC_BUS_33CONVMODE; @@ -930,7 +930,7 @@ static int set_bus(struct slot * slot_cur) return -EIO; } } - /* This is for x440, once Brandon fixes the firmware, + /* This is for x440, once Brandon fixes the firmware, will not need this delay */ msleep(1000); debug("%s -Exit\n", __func__); @@ -938,9 +938,9 @@ static int set_bus(struct slot * slot_cur) } /* This routine checks the bus limitations that the slot is on from the BIOS. - * This is used in deciding whether or not to power up the slot. + * This is used in deciding whether or not to power up the slot. * (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on - * same bus) + * same bus) * Parameters: slot * Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus */ @@ -986,7 +986,7 @@ static int check_limitations(struct slot *slot_cur) static inline void print_card_capability(struct slot *slot_cur) { info("capability of the card is "); - if ((slot_cur->ext_status & CARD_INFO) == PCIX133) + if ((slot_cur->ext_status & CARD_INFO) == PCIX133) info(" 133 MHz PCI-X\n"); else if ((slot_cur->ext_status & CARD_INFO) == PCIX66) info(" 66 MHz PCI-X\n"); @@ -1020,7 +1020,7 @@ static int enable_slot(struct hotplug_slot *hs) } attn_LED_blink(slot_cur); - + rc = set_bus(slot_cur); if (rc) { err("was not able to set the bus\n"); @@ -1082,7 +1082,7 @@ static int enable_slot(struct hotplug_slot *hs) rc = slot_update(&slot_cur); if (rc) goto error_power; - + rc = -EINVAL; if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) { err("power fault occurred trying to power up...\n"); @@ -1093,7 +1093,7 @@ static int enable_slot(struct hotplug_slot *hs) "speed and card capability\n"); print_card_capability(slot_cur); goto error_power; - } + } /* Don't think this case will happen after above checks... * but just in case, for paranoia sake */ if (!(SLOT_POWER(slot_cur->status))) { @@ -1144,7 +1144,7 @@ static int enable_slot(struct hotplug_slot *hs) ibmphp_print_test(); rc = ibmphp_update_slot_info(slot_cur); exit: - ibmphp_unlock_operations(); + ibmphp_unlock_operations(); return rc; error_nopower: @@ -1180,7 +1180,7 @@ static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; int rc; - + ibmphp_lock_operations(); rc = ibmphp_do_disable_slot(slot); ibmphp_unlock_operations(); @@ -1192,12 +1192,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur) int rc; u8 flag; - debug("DISABLING SLOT...\n"); - + debug("DISABLING SLOT...\n"); + if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) { return -ENODEV; } - + flag = slot_cur->flag; slot_cur->flag = 1; @@ -1210,7 +1210,7 @@ int ibmphp_do_disable_slot(struct slot *slot_cur) attn_LED_blink(slot_cur); if (slot_cur->func == NULL) { - /* We need this for fncs's that were there on bootup */ + /* We need this for functions that were there on bootup */ slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { err("out of system memory\n"); @@ -1222,12 +1222,13 @@ int ibmphp_do_disable_slot(struct slot *slot_cur) } ibm_unconfigure_device(slot_cur->func); - - /* If we got here from latch suddenly opening on operating card or - a power fault, there's no power to the card, so cannot - read from it to determine what resources it occupied. This operation - is forbidden anyhow. The best we can do is remove it from kernel - lists at least */ + + /* + * If we got here from latch suddenly opening on operating card or + * a power fault, there's no power to the card, so cannot + * read from it to determine what resources it occupied. This operation + * is forbidden anyhow. The best we can do is remove it from kernel + * lists at least */ if (!flag) { attn_off(slot_cur); @@ -1264,7 +1265,7 @@ error: rc = -EFAULT; goto exit; } - if (flag) + if (flag) ibmphp_update_slot_info(slot_cur); goto exit; } @@ -1339,7 +1340,7 @@ static int __init ibmphp_init(void) debug("AFTER Resource & EBDA INITIALIZATIONS\n"); max_slots = get_max_slots(); - + if ((rc = ibmphp_register_pci())) goto error; diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 9df78bc14541..bd044158b36c 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c @@ -123,7 +123,7 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void) static void __init print_bus_info (void) { struct bus_info *ptr; - + list_for_each_entry(ptr, &bus_info_head, bus_info_list) { debug ("%s - slot_min = %x\n", __func__, ptr->slot_min); debug ("%s - slot_max = %x\n", __func__, ptr->slot_max); @@ -131,7 +131,7 @@ static void __init print_bus_info (void) debug ("%s - bus# = %x\n", __func__, ptr->busno); debug ("%s - current_speed = %x\n", __func__, ptr->current_speed); debug ("%s - controller_id = %x\n", __func__, ptr->controller_id); - + debug ("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv); debug ("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv); debug ("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix); @@ -144,7 +144,7 @@ static void __init print_bus_info (void) static void print_lo_info (void) { struct rio_detail *ptr; - debug ("print_lo_info ----\n"); + debug ("print_lo_info ----\n"); list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) { debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); @@ -176,7 +176,7 @@ static void __init print_ebda_pci_rsrc (void) struct ebda_pci_rsrc *ptr; list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) { - debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", + debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr); } } @@ -259,7 +259,7 @@ int __init ibmphp_access_ebda (void) ebda_seg = readw (io_mem); iounmap (io_mem); debug ("returned ebda segment: %x\n", ebda_seg); - + io_mem = ioremap(ebda_seg<<4, 1); if (!io_mem) return -ENOMEM; @@ -310,7 +310,7 @@ int __init ibmphp_access_ebda (void) re = readw (io_mem + sub_addr); /* next sub blk */ sub_addr += 2; - rc_id = readw (io_mem + sub_addr); /* sub blk id */ + rc_id = readw (io_mem + sub_addr); /* sub blk id */ sub_addr += 2; if (rc_id != 0x5243) @@ -330,7 +330,7 @@ int __init ibmphp_access_ebda (void) debug ("info about hpc descriptor---\n"); debug ("hot blk format: %x\n", format); debug ("num of controller: %x\n", num_ctlrs); - debug ("offset of hpc data structure enteries: %x\n ", sub_addr); + debug ("offset of hpc data structure entries: %x\n ", sub_addr); sub_addr = base + re; /* re sub blk */ /* FIXME: rc is never used/checked */ @@ -359,7 +359,7 @@ int __init ibmphp_access_ebda (void) debug ("info about rsrc descriptor---\n"); debug ("format: %x\n", format); debug ("num of rsrc: %x\n", num_entries); - debug ("offset of rsrc data structure enteries: %x\n ", sub_addr); + debug ("offset of rsrc data structure entries: %x\n ", sub_addr); hs_complete = 1; } else { @@ -376,7 +376,7 @@ int __init ibmphp_access_ebda (void) rio_table_ptr->scal_count = readb (io_mem + offset + 1); rio_table_ptr->riodev_count = readb (io_mem + offset + 2); rio_table_ptr->offset = offset +3 ; - + debug("info about rio table hdr ---\n"); debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ", rio_table_ptr->ver_num, rio_table_ptr->scal_count, @@ -440,12 +440,12 @@ static int __init ebda_rio_table (void) rio_detail_ptr->chassis_num = readb (io_mem + offset + 14); // debug ("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status); //create linked list of chassis - if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5) + if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5) list_add (&rio_detail_ptr->rio_detail_list, &rio_vg_head); - //create linked list of expansion box - else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7) + //create linked list of expansion box + else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7) list_add (&rio_detail_ptr->rio_detail_list, &rio_lo_head); - else + else // not in my concern kfree (rio_detail_ptr); offset += 15; @@ -456,7 +456,7 @@ static int __init ebda_rio_table (void) } /* - * reorganizing linked list of chassis + * reorganizing linked list of chassis */ static struct opt_rio *search_opt_vg (u8 chassis_num) { @@ -464,7 +464,7 @@ static struct opt_rio *search_opt_vg (u8 chassis_num) list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { if (ptr->chassis_num == chassis_num) return ptr; - } + } return NULL; } @@ -472,7 +472,7 @@ static int __init combine_wpg_for_chassis (void) { struct opt_rio *opt_rio_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; - + list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) { opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); if (!opt_rio_ptr) { @@ -484,14 +484,14 @@ static int __init combine_wpg_for_chassis (void) opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num; opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num; list_add (&opt_rio_ptr->opt_rio_list, &opt_vg_head); - } else { + } else { opt_rio_ptr->first_slot_num = min (opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num); opt_rio_ptr->middle_num = max (opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num); - } + } } print_opt_vg (); - return 0; -} + return 0; +} /* * reorganizing linked list of expansion box @@ -502,7 +502,7 @@ static struct opt_rio_lo *search_opt_lo (u8 chassis_num) list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) { if (ptr->chassis_num == chassis_num) return ptr; - } + } return NULL; } @@ -510,7 +510,7 @@ static int combine_wpg_for_expansion (void) { struct opt_rio_lo *opt_rio_lo_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; - + list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) { opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); if (!opt_rio_lo_ptr) { @@ -522,22 +522,22 @@ static int combine_wpg_for_expansion (void) opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num; opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num; opt_rio_lo_ptr->pack_count = 1; - + list_add (&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head); - } else { + } else { opt_rio_lo_ptr->first_slot_num = min (opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num); opt_rio_lo_ptr->middle_num = max (opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num); opt_rio_lo_ptr->pack_count = 2; - } + } } - return 0; + return 0; } - + /* Since we don't know the max slot number per each chassis, hence go * through the list of all chassis to find out the range - * Arguments: slot_num, 1st slot number of the chassis we think we are on, - * var (0 = chassis, 1 = expansion box) + * Arguments: slot_num, 1st slot number of the chassis we think we are on, + * var (0 = chassis, 1 = expansion box) */ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) { @@ -547,7 +547,7 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) if (!var) { list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { - if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { + if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { rc = -ENODEV; break; } @@ -569,7 +569,7 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num) list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { //check to see if this slot_num belongs to expansion box - if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) + if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) return opt_lo_ptr; } return NULL; @@ -580,8 +580,8 @@ static struct opt_rio * find_chassis_num (u8 slot_num) struct opt_rio *opt_vg_ptr; list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { - //check to see if this slot_num belongs to chassis - if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) + //check to see if this slot_num belongs to chassis + if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) return opt_vg_ptr; } return NULL; @@ -594,13 +594,13 @@ static u8 calculate_first_slot (u8 slot_num) { u8 first_slot = 1; struct slot * slot_cur; - + list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { if (slot_cur->ctrl) { - if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) + if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) first_slot = slot_cur->ctrl->ending_slot_num; } - } + } return first_slot + 1; } @@ -622,11 +622,11 @@ static char *create_file_name (struct slot * slot_cur) err ("Structure passed is empty\n"); return NULL; } - + slot_num = slot_cur->number; memset (str, 0, sizeof(str)); - + if (rio_table_ptr) { if (rio_table_ptr->ver_num == 3) { opt_vg_ptr = find_chassis_num (slot_num); @@ -660,7 +660,7 @@ static char *create_file_name (struct slot * slot_cur) /* if both NULL and we DO have correct RIO table in BIOS */ return NULL; } - } + } if (!flag) { if (slot_cur->ctrl->ctlr_type == 4) { first_slot = calculate_first_slot (slot_num); @@ -798,7 +798,7 @@ static int __init ebda_rsrc_controller (void) slot_ptr->ctl_index = readb (io_mem + addr_slot + 2*slot_num); slot_ptr->slot_cap = readb (io_mem + addr_slot + 3*slot_num); - // create bus_info lined list --- if only one slot per bus: slot_min = slot_max + // create bus_info lined list --- if only one slot per bus: slot_min = slot_max bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num); if (!bus_info_ptr2) { @@ -814,9 +814,9 @@ static int __init ebda_rsrc_controller (void) bus_info_ptr1->index = bus_index++; bus_info_ptr1->current_speed = 0xff; bus_info_ptr1->current_bus_mode = 0xff; - + bus_info_ptr1->controller_id = hpc_ptr->ctlr_id; - + list_add_tail (&bus_info_ptr1->bus_info_list, &bus_info_head); } else { @@ -851,7 +851,7 @@ static int __init ebda_rsrc_controller (void) bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv; bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix; bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix; - bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix; + bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix; } bus_ptr++; } @@ -864,7 +864,7 @@ static int __init ebda_rsrc_controller (void) hpc_ptr->u.pci_ctlr.dev_fun = readb (io_mem + addr + 1); hpc_ptr->irq = readb (io_mem + addr + 2); addr += 3; - debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n", + debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n", hpc_ptr->u.pci_ctlr.bus, hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq); break; @@ -932,7 +932,7 @@ static int __init ebda_rsrc_controller (void) tmp_slot->supported_speed = 2; else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX) tmp_slot->supported_speed = 1; - + if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP) tmp_slot->supported_bus_mode = 1; else @@ -1000,7 +1000,7 @@ error_no_hpc: return rc; } -/* +/* * map info (bus, devfun, start addr, end addr..) of i/o, memory, * pfm from the physical addr to a list of resource. */ @@ -1057,7 +1057,7 @@ static int __init ebda_rsrc_rsrc (void) addr += 10; debug ("rsrc from mem or pfm ---\n"); - debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", + debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr); list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head); @@ -1096,7 +1096,7 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num) struct bus_info *ptr; list_for_each_entry(ptr, &bus_info_head, bus_info_list) { - if (ptr->busno == num) + if (ptr->busno == num) return ptr; } return NULL; @@ -1110,7 +1110,7 @@ int ibmphp_get_bus_index (u8 num) struct bus_info *ptr; list_for_each_entry(ptr, &bus_info_head, bus_info_list) { - if (ptr->busno == num) + if (ptr->busno == num) return ptr->index; } return -ENODEV; @@ -1168,7 +1168,7 @@ static struct pci_device_id id_table[] = { .subdevice = HPC_SUBSYSTEM_ID, .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00), }, {} -}; +}; MODULE_DEVICE_TABLE(pci, id_table); @@ -1197,7 +1197,7 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) struct controller *ctrl; debug ("inside ibmphp_probe\n"); - + list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { if (ctrl->ctlr_type == 1) { if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { @@ -1210,4 +1210,3 @@ static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) } return -ENODEV; } - diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index f59ed30512b5..5fc7a089f532 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c @@ -258,7 +258,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 { u8 rc; void __iomem *wpg_addr; // base addr + offset - unsigned long wpg_data; // data to/from WPG LOHI format + unsigned long wpg_data; // data to/from WPG LOHI format unsigned long ultemp; unsigned long data; // actual data HILO format int i; @@ -351,7 +351,7 @@ static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 } //------------------------------------------------------------ -// Read from ISA type HPC +// Read from ISA type HPC //------------------------------------------------------------ static u8 isa_ctrl_read (struct controller *ctlr_ptr, u8 offset) { @@ -372,7 +372,7 @@ static void isa_ctrl_write (struct controller *ctlr_ptr, u8 offset, u8 data) { u16 start_address; u16 port_address; - + start_address = ctlr_ptr->u.isa_ctlr.io_start; port_address = start_address + (u16) offset; outb (data, port_address); @@ -656,11 +656,11 @@ int ibmphp_hpc_readslot (struct slot * pslot, u8 cmd, u8 * pstatus) //-------------------------------------------------------------------- // cleanup //-------------------------------------------------------------------- - + // remove physical to logical address mapping if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4)) iounmap (wpg_bbar); - + free_hpc_access (); debug_polling ("%s - Exit rc[%d]\n", __func__, rc); @@ -835,7 +835,7 @@ static int poll_hpc(void *data) down (&semOperations); switch (poll_state) { - case POLL_LATCH_REGISTER: + case POLL_LATCH_REGISTER: oldlatchlow = curlatchlow; ctrl_count = 0x00; list_for_each (pslotlist, &ibmphp_slot_head) { @@ -892,16 +892,16 @@ static int poll_hpc(void *data) if (kthread_should_stop()) goto out_sleep; - + down (&semOperations); - + if (poll_count >= POLL_LATCH_CNT) { poll_count = 0; poll_state = POLL_SLOTS; } else poll_state = POLL_LATCH_REGISTER; break; - } + } /* give up the hardware semaphore */ up (&semOperations); /* sleep for a short time just for good measure */ @@ -958,7 +958,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot) // bit 5 - HPC_SLOT_PWRGD if ((pslot->status & 0x20) != (poldslot->status & 0x20)) // OFF -> ON: ignore, ON -> OFF: disable slot - if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) + if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) disable = 1; // bit 6 - HPC_SLOT_BUS_SPEED @@ -980,7 +980,7 @@ static int process_changeinstatus (struct slot *pslot, struct slot *poldslot) pslot->status &= ~HPC_SLOT_POWER; } } - // CLOSE -> OPEN + // CLOSE -> OPEN else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) { disable = 1; @@ -1075,7 +1075,7 @@ void __exit ibmphp_hpc_stop_poll_thread (void) debug ("before locking operations \n"); ibmphp_lock_operations (); debug ("after locking operations \n"); - + // wait for poll thread to exit debug ("before sem_exit down \n"); down (&sem_exit); diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c index c60f5f3e838d..639ea3a75e14 100644 --- a/drivers/pci/hotplug/ibmphp_pci.c +++ b/drivers/pci/hotplug/ibmphp_pci.c @@ -1,8 +1,8 @@ /* * IBM Hot Plug Controller Driver - * + * * Written By: Irene Zubarev, IBM Corporation - * + * * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001,2002 IBM Corp. * @@ -42,7 +42,7 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno); /* * NOTE..... If BIOS doesn't provide default routing, we assign: - * 9 for SCSI, 10 for LAN adapters, and 11 for everything else. + * 9 for SCSI, 10 for LAN adapters, and 11 for everything else. * If adapter is bridged, then we assign 11 to it and devices behind it. * We also assign the same irq numbers for multi function devices. * These are PIC mode, so shouldn't matter n.e.ways (hopefully) @@ -71,11 +71,11 @@ static void assign_alt_irq (struct pci_func * cur_func, u8 class_code) * Configures the device to be added (will allocate needed resources if it * can), the device can be a bridge or a regular pci device, can also be * multi-functional - * + * * Input: function to be added - * + * * TO DO: The error case with Multifunction device or multi function bridge, - * if there is an error, will need to go through all previous functions and + * if there is an error, will need to go through all previous functions and * unconfigure....or can add some code into unconfigure_card.... */ int ibmphp_configure_card (struct pci_func *func, u8 slotno) @@ -98,7 +98,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno) cur_func = func; /* We only get bus and device from IRQ routing table. So at this point, - * func->busno is correct, and func->device contains only device (at the 5 + * func->busno is correct, and func->device contains only device (at the 5 * highest bits) */ @@ -151,7 +151,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno) cur_func->device, cur_func->busno); cleanup_count = 6; goto error; - } + } cur_func->next = NULL; function = 0x8; break; @@ -339,7 +339,7 @@ error: } /* - * This function configures the pci BARs of a single device. + * This function configures the pci BARs of a single device. * Input: pointer to the pci_func * Output: configured PCI, 0, or error */ @@ -371,17 +371,17 @@ static int configure_device (struct pci_func *func) for (count = 0; address[count]; count++) { /* for 6 BARs */ - /* not sure if i need this. per scott, said maybe need smth like this + /* not sure if i need this. per scott, said maybe need * something like this if devices don't adhere 100% to the spec, so don't want to write to the reserved bits - pcibios_read_config_byte(cur_func->busno, cur_func->device, + pcibios_read_config_byte(cur_func->busno, cur_func->device, PCI_BASE_ADDRESS_0 + 4 * count, &tmp); if (tmp & 0x01) // IO - pcibios_write_config_dword(cur_func->busno, cur_func->device, + pcibios_write_config_dword(cur_func->busno, cur_func->device, PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFD); else // Memory - pcibios_write_config_dword(cur_func->busno, cur_func->device, + pcibios_write_config_dword(cur_func->busno, cur_func->device, PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFF); */ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF); @@ -421,8 +421,8 @@ static int configure_device (struct pci_func *func) return -EIO; } pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start); - - /* _______________This is for debugging purposes only_____________________ */ + + /* _______________This is for debugging purposes only_____________________ */ debug ("b4 writing, the IO address is %x\n", func->io[count]->start); pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]); debug ("after writing.... the start address is %x\n", bar[count]); @@ -484,7 +484,7 @@ static int configure_device (struct pci_func *func) pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start); - /*_______________This is for debugging purposes only______________________________*/ + /*_______________This is for debugging purposes only______________________________*/ debug ("b4 writing, start address is %x\n", func->pfmem[count]->start); pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]); debug ("after writing, start address is %x\n", bar[count]); @@ -559,7 +559,7 @@ static int configure_device (struct pci_func *func) /****************************************************************************** * This routine configures a PCI-2-PCI bridge and the functions behind it * Parameters: pci_func - * Returns: + * Returns: ******************************************************************************/ static int configure_bridge (struct pci_func **func_passed, u8 slotno) { @@ -622,7 +622,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) debug ("AFTER FIND_SEC_NUMBER, func->busno IS %x\n", func->busno); pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, sec_number); - + /* __________________For debugging purposes only __________________________________ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number); debug ("sec_number after write/read is %x\n", sec_number); @@ -644,7 +644,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!! + !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/ @@ -670,7 +670,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) debug ("len[count] in IO = %x\n", len[count]); bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); - + if (!bus_io[count]) { err ("out of system memory\n"); retval = -ENOMEM; @@ -735,7 +735,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) ibmphp_add_pfmem_from_mem (bus_pfmem[count]); func->pfmem[count] = bus_pfmem[count]; } else { - err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n", + err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n", func->busno, func->device, len[count]); kfree (mem_tmp); kfree (bus_pfmem[count]); @@ -805,7 +805,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) debug ("amount_needed->mem = %x\n", amount_needed->mem); debug ("amount_needed->pfmem = %x\n", amount_needed->pfmem); - if (amount_needed->not_correct) { + if (amount_needed->not_correct) { debug ("amount_needed is not correct\n"); for (count = 0; address[count]; count++) { /* for 2 BARs */ @@ -830,7 +830,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) } else { debug ("it wants %x IO behind the bridge\n", amount_needed->io); io = kzalloc(sizeof(*io), GFP_KERNEL); - + if (!io) { err ("out of system memory\n"); retval = -ENOMEM; @@ -959,7 +959,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) if (bus->noIORanges) { pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8); - pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8); + pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8); /* _______________This is for debugging purposes only ____________________ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &temp); @@ -980,7 +980,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) if (bus->noMemRanges) { pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16); pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000 | bus->rangeMem->end >> 16); - + /* ____________________This is for debugging purposes only ________________________ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &temp); debug ("mem_base = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16); @@ -1017,7 +1017,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno) pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq); if ((irq > 0x00) && (irq < 0x05)) pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]); - /* + /* pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, ctrl); pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY); pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR); @@ -1071,7 +1071,7 @@ error: * This function adds up the amount of resources needed behind the PPB bridge * and passes it to the configure_bridge function * Input: bridge function - * Ouput: amount of resources needed + * Output: amount of resources needed *****************************************************************************/ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno) { @@ -1204,9 +1204,9 @@ static struct res_needed *scan_behind_bridge (struct pci_func * func, u8 busno) return amount; } -/* The following 3 unconfigure_boot_ routines deal with the case when we had the card - * upon bootup in the system, since we don't allocate func to such case, we need to read - * the start addresses from pci config space and then find the corresponding entries in +/* The following 3 unconfigure_boot_ routines deal with the case when we had the card + * upon bootup in the system, since we don't allocate func to such case, we need to read + * the start addresses from pci config space and then find the corresponding entries in * our resource lists. The functions return either 0, -ENODEV, or -1 (general failure) * Change: we also call these functions even if we configured the card ourselves (i.e., not * the bootup case), since it should work same way @@ -1561,8 +1561,8 @@ static int unconfigure_boot_card (struct slot *slot_cur) * unconfiguring the device * TO DO: will probably need to add some code in case there was some resource, * to remove it... this is from when we have errors in the configure_card... - * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!! - * Returns: 0, -1, -ENODEV + * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!! + * Returns: 0, -1, -ENODEV */ int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end) { @@ -1634,7 +1634,7 @@ int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end) * Input: bus and the amount of resources needed (we know we can assign those, * since they've been checked already * Output: bus added to the correct spot - * 0, -1, error + * 0, -1, error */ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno) { @@ -1650,7 +1650,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r err ("strange, cannot find bus which is supposed to be at the system... something is terribly wrong...\n"); return -ENODEV; } - + list_add (&bus->bus_list, &cur_bus->bus_list); } if (io) { @@ -1679,7 +1679,7 @@ static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct r } if (pfmem) { pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL); - if (!pfmem_range) { + if (!pfmem_range) { err ("out of system memory\n"); return -ENOMEM; } @@ -1726,4 +1726,3 @@ static u8 find_sec_number (u8 primary_busno, u8 slotno) return busno; return 0xff; } - diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c index e2dc289f767c..a265acb2d518 100644 --- a/drivers/pci/hotplug/ibmphp_res.c +++ b/drivers/pci/hotplug/ibmphp_res.c @@ -72,7 +72,7 @@ static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc * curr, u8 static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc * curr) { struct resource_node *rs; - + if (!curr) { err ("NULL passed to allocate\n"); return NULL; @@ -128,7 +128,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node } newrange->start = curr->start_addr; newrange->end = curr->end_addr; - + if (first_bus || (!num_ranges)) newrange->rangeno = 1; else { @@ -162,7 +162,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node newbus->rangePFMem = newrange; if (first_bus) newbus->noPFMemRanges = 1; - else { + else { debug ("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end); ++newbus->noPFMemRanges; fix_resources (newbus); @@ -190,7 +190,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node * This is the Resource Management initialization function. It will go through * the Resource list taken from EBDA and fill in this module's data structures * - * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES, + * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES, * SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW * * Input: ptr to the head of the resource list from EBDA @@ -382,7 +382,7 @@ int __init ibmphp_rsrc_init (void) * pci devices' resources for the appropriate resource * * Input: type of the resource, range to add, current bus - * Output: 0 or -1, bus and range ptrs + * Output: 0 or -1, bus and range ptrs ********************************************************************************/ static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur) { @@ -466,7 +466,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno) switch (type) { case MEM: - if (bus_cur->firstMem) + if (bus_cur->firstMem) res = bus_cur->firstMem; break; case PFMEM: @@ -583,7 +583,7 @@ static void fix_resources (struct bus_node *bus_cur) } /******************************************************************************* - * This routine adds a resource to the list of resources to the appropriate bus + * This routine adds a resource to the list of resources to the appropriate bus * based on their resource type and sorted by their starting addresses. It assigns * the ptrs to next and nextRange if needed. * @@ -605,11 +605,11 @@ int ibmphp_add_resource (struct resource_node *res) err ("NULL passed to add\n"); return -ENODEV; } - + bus_cur = find_bus_wprev (res->busno, NULL, 0); - + if (!bus_cur) { - /* didn't find a bus, smth's wrong!!! */ + /* didn't find a bus, something's wrong!!! */ debug ("no bus in the system, either pci_dev's wrong or allocation failed\n"); return -ENODEV; } @@ -648,7 +648,7 @@ int ibmphp_add_resource (struct resource_node *res) if (!range_cur) { switch (res->type) { case IO: - ++bus_cur->needIOUpdate; + ++bus_cur->needIOUpdate; break; case MEM: ++bus_cur->needMemUpdate; @@ -659,13 +659,13 @@ int ibmphp_add_resource (struct resource_node *res) } res->rangeno = -1; } - + debug ("The range is %d\n", res->rangeno); if (!res_start) { /* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */ switch (res->type) { case IO: - bus_cur->firstIO = res; + bus_cur->firstIO = res; break; case MEM: bus_cur->firstMem = res; @@ -673,7 +673,7 @@ int ibmphp_add_resource (struct resource_node *res) case PFMEM: bus_cur->firstPFMem = res; break; - } + } res->next = NULL; res->nextRange = NULL; } else { @@ -770,7 +770,7 @@ int ibmphp_add_resource (struct resource_node *res) * This routine will remove the resource from the list of resources * * Input: io, mem, and/or pfmem resource to be deleted - * Ouput: modified resource list + * Output: modified resource list * 0 or error code ****************************************************************************/ int ibmphp_remove_resource (struct resource_node *res) @@ -825,7 +825,7 @@ int ibmphp_remove_resource (struct resource_node *res) if (!res_cur) { if (res->type == PFMEM) { - /* + /* * case where pfmem might be in the PFMemFromMem list * so will also need to remove the corresponding mem * entry @@ -961,12 +961,12 @@ static struct range_node * find_range (struct bus_node *bus_cur, struct resource } /***************************************************************************** - * This routine will check to make sure the io/mem/pfmem->len that the device asked for + * This routine will check to make sure the io/mem/pfmem->len that the device asked for * can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL, * otherwise, returns 0 * * Input: resource - * Ouput: the correct start and end address are inputted into the resource node, + * Output: the correct start and end address are inputted into the resource node, * 0 or -EINVAL *****************************************************************************/ int ibmphp_check_resource (struct resource_node *res, u8 bridge) @@ -996,7 +996,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) bus_cur = find_bus_wprev (res->busno, NULL, 0); if (!bus_cur) { - /* didn't find a bus, smth's wrong!!! */ + /* didn't find a bus, something's wrong!!! */ debug ("no bus in the system, either pci_dev's wrong or allocation failed\n"); return -EINVAL; } @@ -1066,7 +1066,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) break; } } - + if (flag && len_cur == res->len) { debug ("but we are not here, right?\n"); res->start = start_cur; @@ -1118,10 +1118,10 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) if (res_prev) { if (res_prev->rangeno != res_cur->rangeno) { /* 1st device on this range */ - if ((res_cur->start != range->start) && + if ((res_cur->start != range->start) && ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) { if ((len_tmp < len_cur) || (len_cur == 0)) { - if ((range->start % tmp_divide) == 0) { + if ((range->start % tmp_divide) == 0) { /* just perfect, starting address is divisible by length */ flag = 1; len_cur = len_tmp; @@ -1344,7 +1344,7 @@ int ibmphp_check_resource (struct resource_node *res, u8 bridge) * This routine is called from remove_card if the card contained PPB. * It will remove all the resources on the bus as well as the bus itself * Input: Bus - * Ouput: 0, -ENODEV + * Output: 0, -ENODEV ********************************************************************************/ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno) { @@ -1353,7 +1353,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno) struct bus_node *prev_bus; int rc; - prev_bus = find_bus_wprev (parent_busno, NULL, 0); + prev_bus = find_bus_wprev (parent_busno, NULL, 0); if (!prev_bus) { debug ("something terribly wrong. Cannot find parent bus to the one to remove\n"); @@ -1424,7 +1424,7 @@ int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno) } /****************************************************************************** - * This routine deletes the ranges from a given bus, and the entries from the + * This routine deletes the ranges from a given bus, and the entries from the * parent's bus in the resources * Input: current bus, previous bus * Output: 0, -EINVAL @@ -1453,7 +1453,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev) if (bus_cur->noMemRanges) { range_cur = bus_cur->rangeMem; for (i = 0; i < bus_cur->noMemRanges; i++) { - if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0) + if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0) return -EINVAL; ibmphp_remove_resource (res); @@ -1467,7 +1467,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev) if (bus_cur->noPFMemRanges) { range_cur = bus_cur->rangePFMem; for (i = 0; i < bus_cur->noPFMemRanges; i++) { - if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0) + if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0) return -EINVAL; ibmphp_remove_resource (res); @@ -1482,7 +1482,7 @@ static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev) } /* - * find the resource node in the bus + * find the resource node in the bus * Input: Resource needed, start address of the resource, type of resource */ int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resource_node **res, int flag) @@ -1512,7 +1512,7 @@ int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resour err ("wrong type of flag\n"); return -EINVAL; } - + while (res_cur) { if (res_cur->start == start_address) { *res = res_cur; @@ -1718,7 +1718,7 @@ static int __init once_over (void) } /* end for pfmem */ } /* end if */ } /* end list_for_each bus */ - return 0; + return 0; } int ibmphp_add_pfmem_from_mem (struct resource_node *pfmem) @@ -1760,9 +1760,9 @@ static struct bus_node *find_bus_wprev (u8 bus_number, struct bus_node **prev, u list_for_each (tmp, &gbuses) { tmp_prev = tmp->prev; bus_cur = list_entry (tmp, struct bus_node, bus_list); - if (flag) + if (flag) *prev = list_entry (tmp_prev, struct bus_node, bus_list); - if (bus_cur->busno == bus_number) + if (bus_cur->busno == bus_number) return bus_cur; } @@ -1776,7 +1776,7 @@ void ibmphp_print_test (void) struct range_node *range; struct resource_node *res; struct list_head *tmp; - + debug_pci ("*****************START**********************\n"); if ((!list_empty(&gbuses)) && flags) { @@ -1906,7 +1906,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu return 1; range_cur = range_cur->next; } - + return 0; } @@ -1920,7 +1920,7 @@ static int range_exists_already (struct range_node * range, struct bus_node * bu * Returns: none * Note: this function doesn't take into account IO restrictions etc, * so will only work for bridges with no video/ISA devices behind them It - * also will not work for onboard PPB's that can have more than 1 *bus + * also will not work for onboard PPBs that can have more than 1 *bus * behind them All these are TO DO. * Also need to add more error checkings... (from fnc returns etc) */ @@ -1963,7 +1963,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) case PCI_HEADER_TYPE_BRIDGE: function = 0x8; case PCI_HEADER_TYPE_MULTIBRIDGE: - /* We assume here that only 1 bus behind the bridge + /* We assume here that only 1 bus behind the bridge TO DO: add functionality for several: temp = secondary; while (temp < subordinate) { @@ -1972,7 +1972,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) } */ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno); - bus_sec = find_bus_wprev (sec_busno, NULL, 0); + bus_sec = find_bus_wprev (sec_busno, NULL, 0); /* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */ if (!bus_sec) { bus_sec = alloc_error_bus (NULL, sec_busno, 1); @@ -2028,7 +2028,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) io->len = io->end - io->start + 1; ibmphp_add_resource (io); } - } + } pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address); pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address); diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index ec20f74c8981..cfa92a984e62 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -131,7 +131,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf, } module_put(slot->ops->owner); -exit: +exit: if (retval) return retval; return count; @@ -177,7 +177,7 @@ static ssize_t attention_write_file(struct pci_slot *slot, const char *buf, retval = ops->set_attention_status(slot->hotplug, attention); module_put(ops->owner); -exit: +exit: if (retval) return retval; return count; @@ -247,7 +247,7 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf, retval = slot->ops->hardware_test(slot, test); module_put(slot->ops->owner); -exit: +exit: if (retval) return retval; return count; @@ -512,7 +512,7 @@ int pci_hp_deregister(struct hotplug_slot *hotplug) * @hotplug: pointer to the slot whose info has changed * @info: pointer to the info copy into the slot's info structure * - * @slot must have been registered with the pci + * @slot must have been registered with the pci * hotplug subsystem previously with a call to pci_hp_register(). * * Returns 0 if successful, anything else for an error. diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 541bbe6d5343..21e865ded1dc 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -180,5 +180,5 @@ static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev) { return 0; } -#endif /* CONFIG_ACPI */ +#endif /* CONFIG_ACPI */ #endif /* _PCIEHP_H */ diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c index cff7cadfc2e4..eddddd447d0d 100644 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ b/drivers/pci/hotplug/pciehp_acpi.c @@ -78,7 +78,7 @@ static int __initdata dup_slot_id; static int __initdata acpi_slot_detected; static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); -/* Dummy driver for dumplicate name detection */ +/* Dummy driver for duplicate name detection */ static int __init dummy_probe(struct pcie_device *dev) { u32 slot_cap; diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index f4a18f51a29c..bbd48bbe4e9b 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -351,8 +351,8 @@ static int __init pcied_init(void) pciehp_firmware_init(); retval = pcie_port_service_register(&hpdriver_portdrv); - dbg("pcie_port_service_register = %d\n", retval); - info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); + dbg("pcie_port_service_register = %d\n", retval); + info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); if (retval) dbg("Failure to register service\n"); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 51f56ef4ab6f..3eea3fdd4b0b 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -92,7 +92,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec) { /* Clamp to sane value */ if ((sec <= 0) || (sec > 60)) - sec = 2; + sec = 2; ctrl->poll_timer.function = &int_poll_timeout; ctrl->poll_timer.data = (unsigned long)ctrl; @@ -194,7 +194,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n"); } else if (!NO_CMD_CMPL(ctrl)) { /* - * This controller semms to notify of command completed + * This controller seems to notify of command completed * event even though it supports none of power * controller, attention led, power led and EMI. */ @@ -926,7 +926,7 @@ struct controller *pcie_init(struct pcie_device *dev) if (pciehp_writew(ctrl, PCI_EXP_SLTSTA, 0x1f)) goto abort_ctrl; - /* Disable sotfware notification */ + /* Disable software notification */ pcie_disable_notification(ctrl); ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c index 1f00b937f721..ac69094e4b20 100644 --- a/drivers/pci/hotplug/pcihp_skeleton.c +++ b/drivers/pci/hotplug/pcihp_skeleton.c @@ -52,7 +52,7 @@ static LIST_HEAD(slot_list); do { \ if (debug) \ printk (KERN_DEBUG "%s: " format "\n", \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg) @@ -287,7 +287,7 @@ static int __init init_slots(void) hotplug_slot->release = &release_slot; make_slot_name(slot); hotplug_slot->ops = &skel_hotplug_slot_ops; - + /* * Initialize the slot info structure with some known * good values. @@ -296,7 +296,7 @@ static int __init init_slots(void) get_attention_status(hotplug_slot, &info->attention_status); get_latch_status(hotplug_slot, &info->latch_status); get_adapter_status(hotplug_slot, &info->adapter_status); - + dbg("registering slot %d\n", i); retval = pci_hp_register(slot->hotplug_slot); if (retval) { @@ -336,7 +336,7 @@ static void __exit cleanup_slots(void) pci_hp_deregister(slot->hotplug_slot); } } - + static int __init pcihp_skel_init(void) { int retval; diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index bb7af78e4eed..e9c044d15add 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -217,7 +217,7 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn) if (!pcibios_find_pci_bus(dn)) return -EINVAL; - /* If pci slot is hotplugable, use hotplug to remove it */ + /* If pci slot is hotpluggable, use hotplug to remove it */ slot = find_php_slot(dn); if (slot && rpaphp_deregister_slot(slot)) { printk(KERN_ERR "%s: unable to remove hotplug slot %s\n", diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h index 3135856e5e1c..b2593e876a09 100644 --- a/drivers/pci/hotplug/rpaphp.h +++ b/drivers/pci/hotplug/rpaphp.h @@ -49,9 +49,9 @@ extern bool rpaphp_debug; #define dbg(format, arg...) \ do { \ - if (rpaphp_debug) \ + if (rpaphp_debug) \ printk(KERN_DEBUG "%s: " format, \ - MY_NAME , ## arg); \ + MY_NAME , ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) @@ -99,5 +99,5 @@ void dealloc_slot_struct(struct slot *slot); struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain); int rpaphp_register_slot(struct slot *slot); int rpaphp_deregister_slot(struct slot *slot); - + #endif /* _PPC64PHP_H */ diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 127d6e600185..b7fc5c9255a5 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -226,7 +226,7 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index, for (i = 0; i < indexes[0]; i++) { if ((unsigned int) indexes[i + 1] == *my_index) { if (drc_name) - *drc_name = name_tmp; + *drc_name = name_tmp; if (drc_type) *drc_type = type_tmp; if (drc_index) @@ -289,7 +289,7 @@ static int is_php_dn(struct device_node *dn, const int **indexes, * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem. * @dn: device node of slot * - * This subroutine will register a hotplugable slot with the + * This subroutine will register a hotpluggable slot with the * PCI hotplug infrastructure. This routine is typically called * during boot time, if the hotplug slots are present at boot time, * or is called later, by the dlpar add code, if the slot is @@ -328,7 +328,7 @@ int rpaphp_add_slot(struct device_node *dn) return -ENOMEM; slot->type = simple_strtoul(type, NULL, 10); - + dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n", indexes[i + 1], name, type); @@ -356,7 +356,7 @@ static void __exit cleanup_slots(void) /* * Unregister all of our slots with the pci_hotplug subsystem, * and free up all memory that we had allocated. - * memory will be freed in release_slot callback. + * memory will be freed in release_slot callback. */ list_for_each_safe(tmp, n, &rpaphp_slot_head) { diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c index 513e1e282391..9243f3e7a1c9 100644 --- a/drivers/pci/hotplug/rpaphp_pci.c +++ b/drivers/pci/hotplug/rpaphp_pci.c @@ -44,7 +44,7 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state) dbg("%s: slot must be power up to get sensor-state\n", __func__); - /* some slots have to be powered up + /* some slots have to be powered up * before get-sensor will succeed. */ rc = rtas_set_power_level(slot->power_domain, POWER_ON, @@ -133,4 +133,3 @@ int rpaphp_enable_slot(struct slot *slot) return 0; } - diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index b283bbea6d24..a6082cc263f7 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -1,5 +1,5 @@ /* - * RPA Virtual I/O device functions + * RPA Virtual I/O device functions * Copyright (C) 2004 Linda Xie <lxie@us.ibm.com> * * All rights reserved. @@ -51,27 +51,27 @@ struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain) { struct slot *slot; - + slot = kzalloc(sizeof(struct slot), GFP_KERNEL); if (!slot) goto error_nomem; slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); if (!slot->hotplug_slot) - goto error_slot; + goto error_slot; slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!slot->hotplug_slot->info) goto error_hpslot; slot->name = kstrdup(drc_name, GFP_KERNEL); if (!slot->name) - goto error_info; + goto error_info; slot->dn = dn; slot->index = drc_index; slot->power_domain = power_domain; slot->hotplug_slot->private = slot; slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops; slot->hotplug_slot->release = &rpaphp_release_slot; - + return (slot); error_info: @@ -91,7 +91,7 @@ static int is_registered(struct slot *slot) list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) { if (!strcmp(tmp_slot->name, slot->name)) return 1; - } + } return 0; } @@ -104,7 +104,7 @@ int rpaphp_deregister_slot(struct slot *slot) __func__, slot->name); list_del(&slot->rpaphp_slot_list); - + retval = pci_hp_deregister(php_slot); if (retval) err("Problem unregistering a slot %s\n", slot->name); @@ -120,7 +120,7 @@ int rpaphp_register_slot(struct slot *slot) int retval; int slotno; - dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", + dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", __func__, slot->dn->full_name, slot->index, slot->name, slot->power_domain, slot->type); @@ -128,7 +128,7 @@ int rpaphp_register_slot(struct slot *slot) if (is_registered(slot)) { err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name); return -EAGAIN; - } + } if (slot->dn->child) slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); @@ -145,4 +145,3 @@ int rpaphp_register_slot(struct slot *slot) info("Slot [%s] registered\n", slot->name); return 0; } - diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index d876e4b3c6a9..61529097464d 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -216,13 +216,13 @@ struct ctrl_reg { /* offsets to the controller registers based on the above structure layout */ enum ctrl_offsets { - BASE_OFFSET = offsetof(struct ctrl_reg, base_offset), - SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1), + BASE_OFFSET = offsetof(struct ctrl_reg, base_offset), + SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1), SLOT_AVAIL2 = offsetof(struct ctrl_reg, slot_avail2), - SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config), + SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config), SEC_BUS_CONFIG = offsetof(struct ctrl_reg, sec_bus_config), MSI_CTRL = offsetof(struct ctrl_reg, msi_ctrl), - PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface), + PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface), CMD = offsetof(struct ctrl_reg, cmd), CMD_STATUS = offsetof(struct ctrl_reg, cmd_status), INTR_LOC = offsetof(struct ctrl_reg, intr_loc), diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index d3f757df691c..faf13abd5b99 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -143,11 +143,11 @@ static int init_slots(struct controller *ctrl) snprintf(name, SLOT_NAME_SIZE, "%d", slot->number); hotplug_slot->ops = &shpchp_hotplug_slot_ops; - ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " - "hp_slot=%x sun=%x slot_device_offset=%x\n", - pci_domain_nr(ctrl->pci_dev->subordinate), - slot->bus, slot->device, slot->hp_slot, slot->number, - ctrl->slot_device_offset); + ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " + "hp_slot=%x sun=%x slot_device_offset=%x\n", + pci_domain_nr(ctrl->pci_dev->subordinate), + slot->bus, slot->device, slot->hp_slot, slot->number, + ctrl->slot_device_offset); retval = pci_hp_register(slot->hotplug_slot, ctrl->pci_dev->subordinate, slot->device, name); if (retval) { diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 75ba2311b54f..2d7f474ca0ec 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -116,7 +116,7 @@ #define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21)) /* - * SHPC Command Code definitnions + * SHPC Command Code definitions * * Slot Operation 00h - 3Fh * Set Bus Segment Speed/Mode A 40h - 47h diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 21a7182dccd4..1fe2d6fb19d5 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -610,7 +610,7 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) struct resource tmp; enum pci_bar_type type; int reg = pci_iov_resource_bar(dev, resno, &type); - + if (!reg) return 0; diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index b008cf86b9c3..6684f153ab57 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -25,7 +25,7 @@ static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason) /** * pci_lost_interrupt - reports a lost PCI interrupt * @pdev: device whose interrupt is lost - * + * * The primary function of this routine is to report a lost interrupt * in a standard way which users can recognise (instead of blaming the * driver). diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 5e63645a7abe..3fcd67a16677 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -784,7 +784,7 @@ error: * @nvec: how many MSIs have been requested ? * @type: are we checking for MSI or MSI-X ? * - * Look at global flags, the device itself, and its parent busses + * Look at global flags, the device itself, and its parent buses * to determine if MSI/-X are supported for the device. If MSI/-X is * supported return 0, else return an error code. **/ diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index f166126e28d1..577074efbe62 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -141,7 +141,7 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) * if (_PRW at S-state x) * choose from highest power _SxD to lowest power _SxW * else // no _PRW at S-state x - * choose highest power _SxD or any lower power + * choose highest power _SxD or any lower power */ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 454853507b7e..9042fdbd7244 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -312,7 +312,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, * __pci_device_probe - check if a driver wants to claim a specific PCI device * @drv: driver to call to check if it wants the PCI device * @pci_dev: PCI device being probed - * + * * returns 0 on success, else error. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. */ @@ -378,7 +378,7 @@ static int pci_device_remove(struct device * dev) * We would love to complain here if pci_dev->is_enabled is set, that * the driver should have called pci_disable_device(), but the * unfortunate fact is there are too many odd BIOS and bridge setups - * that don't like drivers doing that all of the time. + * that don't like drivers doing that all of the time. * Oh well, we can dream of sane hardware when we sleep, no matter how * horrible the crap we have to deal with is when we are awake... */ @@ -1156,10 +1156,10 @@ static const struct dev_pm_ops pci_dev_pm_ops = { * @drv: the driver structure to register * @owner: owner module of drv * @mod_name: module name string - * + * * Adds the driver structure to the list of registered drivers. - * Returns a negative value on error, otherwise 0. - * If no error occurred, the driver remains registered even if + * Returns a negative value on error, otherwise 0. + * If no error occurred, the driver remains registered even if * no device was claimed during registration. */ int __pci_register_driver(struct pci_driver *drv, struct module *owner, @@ -1181,7 +1181,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner, /** * pci_unregister_driver - unregister a pci driver * @drv: the driver structure to unregister - * + * * Deletes the driver structure from the list of registered PCI drivers, * gives it a chance to clean up by calling its remove() function for * each device it was responsible for, and marks those devices as @@ -1203,7 +1203,7 @@ static struct pci_driver pci_compat_driver = { * pci_dev_driver - get the pci_driver of a device * @dev: the device to query * - * Returns the appropriate pci_driver structure or %NULL if there is no + * Returns the appropriate pci_driver structure or %NULL if there is no * registered driver for the device. */ struct pci_driver * @@ -1224,7 +1224,7 @@ pci_dev_driver(const struct pci_dev *dev) * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure * @dev: the PCI device structure to match against * @drv: the device driver to search for matching PCI device id structures - * + * * Used by a driver to check whether a PCI device present in the * system is in its list of supported devices. Returns the matching * pci_device_id structure or %NULL if there is no match. diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index 6e47c519c510..2ff77509d8e5 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c @@ -2,13 +2,13 @@ * * Copyright (C) 2008 Red Hat, Inc. * Author: - * Chris Wright + * Chris Wright * * This work is licensed under the terms of the GNU GPL, version 2. * * Usage is simple, allocate a new id to the stub driver and bind the * device to it. For example: - * + * * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 2aaa83c85a4e..c91e6c18debc 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -10,7 +10,7 @@ * * File attributes for PCI devices * - * Modeled after usb's driverfs.c + * Modeled after usb's driverfs.c * */ @@ -270,13 +270,17 @@ msi_bus_store(struct device *dev, struct device_attribute *attr, if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; - /* bad things may happen if the no_msi flag is changed - * while some drivers are loaded */ + /* + * Bad things may happen if the no_msi flag is changed + * while drivers are loaded. + */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - /* Maybe pci devices without subordinate busses shouldn't even have this - * attribute in the first place? */ + /* + * Maybe devices without subordinate buses shouldn't have this + * attribute in the first place? + */ if (!pdev->subordinate) return count; @@ -670,7 +674,7 @@ pci_write_config(struct file* filp, struct kobject *kobj, size = dev->cfg_size - off; count = size; } - + pci_config_pm_runtime_get(dev); if ((off & 1) && size) { @@ -678,7 +682,7 @@ pci_write_config(struct file* filp, struct kobject *kobj, off++; size--; } - + if ((off & 3) && size > 2) { u16 val = data[off - init_off]; val |= (u16) data[off - init_off + 1] << 8; @@ -696,7 +700,7 @@ pci_write_config(struct file* filp, struct kobject *kobj, off += 4; size -= 4; } - + if (size >= 2) { u16 val = data[off - init_off]; val |= (u16) data[off - init_off + 1] << 8; @@ -1229,21 +1233,21 @@ pci_read_rom(struct file *filp, struct kobject *kobj, if (!pdev->rom_attr_enabled) return -EINVAL; - + rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ if (!rom || !size) return -EIO; - + if (off >= size) count = 0; else { if (off + count > size) count = size - off; - + memcpy_fromio(buf, rom + off, count); } pci_unmap_rom(pdev, rom); - + return count; } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b127fbda6fc8..33120d156668 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -198,7 +198,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus, } /** - * pci_find_capability - query for devices' capabilities + * pci_find_capability - query for devices' capabilities * @dev: PCI device to query * @cap: capability code * @@ -207,12 +207,12 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus, * device's PCI configuration space or 0 in case the device does not * support it. Possible values for @cap: * - * %PCI_CAP_ID_PM Power Management - * %PCI_CAP_ID_AGP Accelerated Graphics Port - * %PCI_CAP_ID_VPD Vital Product Data - * %PCI_CAP_ID_SLOTID Slot Identification + * %PCI_CAP_ID_PM Power Management + * %PCI_CAP_ID_AGP Accelerated Graphics Port + * %PCI_CAP_ID_VPD Vital Product Data + * %PCI_CAP_ID_SLOTID Slot Identification * %PCI_CAP_ID_MSI Message Signalled Interrupts - * %PCI_CAP_ID_CHSWP CompactPCI HotSwap + * %PCI_CAP_ID_CHSWP CompactPCI HotSwap * %PCI_CAP_ID_PCIX PCI-X * %PCI_CAP_ID_EXP PCI Express */ @@ -228,13 +228,13 @@ int pci_find_capability(struct pci_dev *dev, int cap) } /** - * pci_bus_find_capability - query for devices' capabilities + * pci_bus_find_capability - query for devices' capabilities * @bus: the PCI bus to query * @devfn: PCI device to query * @cap: capability code * * Like pci_find_capability() but works for pci devices that do not have a - * pci_dev structure set up yet. + * pci_dev structure set up yet. * * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not @@ -515,7 +515,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) return -EINVAL; /* Validate current state: - * Can enter D0 from any state, but if we can only go deeper + * Can enter D0 from any state, but if we can only go deeper * to sleep if we're already in a low power state */ if (state != PCI_D0 && dev->current_state <= PCI_D3cold @@ -998,7 +998,7 @@ static void pci_restore_config_space(struct pci_dev *pdev) } } -/** +/** * pci_restore_state - Restore the saved state of a PCI device * @dev: - PCI device that we're dealing with */ @@ -1030,7 +1030,7 @@ struct pci_saved_state { * the device saved state. * @dev: PCI device that we're dealing with * - * Rerturn NULL if no state or error. + * Return NULL if no state or error. */ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) { @@ -1880,7 +1880,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev) * pci_dev_run_wake - Check if device can generate run-time wake-up events. * @dev: Device to check. * - * Return true if the device itself is cabable of generating wake-up events + * Return true if the device itself is capable of generating wake-up events * (through the platform or using the native PCIe PME) or if the device supports * PME and one of its upstream bridges can generate wake-up events. */ @@ -2447,7 +2447,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) switch (pci_pcie_type(pdev)) { /* * PCI/X-to-PCIe bridges are not specifically mentioned by the spec, - * but since their primary inteface is PCI/X, we conservatively + * but since their primary interface is PCI/X, we conservatively * handle them as we would a non-PCIe device. */ case PCI_EXP_TYPE_PCIE_BRIDGE: @@ -2471,7 +2471,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) /* * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be * implemented by the remaining PCIe types to indicate peer-to-peer - * capabilities, but only when they are part of a multifunciton + * capabilities, but only when they are part of a multifunction * device. The footnote for section 6.12 indicates the specific * PCIe types included here. */ @@ -2486,7 +2486,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) } /* - * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable + * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable * to single function devices with the exception of downstream ports. */ return true; @@ -2622,7 +2622,7 @@ void pci_release_region(struct pci_dev *pdev, int bar) * * If @exclusive is set, then the region is marked so that userspace * is explicitly not allowed to map the resource via /dev/mem or - * sysfs MMIO access. + * sysfs MMIO access. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. @@ -2634,7 +2634,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n if (pci_resource_len(pdev, bar) == 0) return 0; - + if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { if (!request_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), res_name)) @@ -2694,7 +2694,7 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) * * The key difference that _exclusive makes it that userspace is * explicitly not allowed to map the resource via /dev/mem or - * sysfs. + * sysfs. */ int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) { @@ -2799,7 +2799,7 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name) * successfully. * * pci_request_regions_exclusive() will mark the region so that - * /dev/mem and the sysfs MMIO access will not be allowed. + * /dev/mem and the sysfs MMIO access will not be allowed. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. @@ -2967,7 +2967,7 @@ pci_set_mwi(struct pci_dev *dev) cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } - + return 0; } @@ -3292,7 +3292,7 @@ clear: * * NOTE: This causes the caller to sleep for twice the device power transition * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms - * by devault (i.e. unless the @dev's d3_delay field has a different value). + * by default (i.e. unless the @dev's d3_delay field has a different value). * Moreover, only devices in D0 can be reset by this function. */ static int pci_pm_reset(struct pci_dev *dev, int probe) @@ -3341,7 +3341,7 @@ void pci_reset_bridge_secondary_bus(struct pci_dev *dev) pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); /* * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double - * this to 2ms to ensure that we meet the minium requirement. + * this to 2ms to ensure that we meet the minimum requirement. */ msleep(2); @@ -3998,7 +3998,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps) return -EINVAL; v = ffs(mps) - 8; - if (v > dev->pcie_mpss) + if (v > dev->pcie_mpss) return -EINVAL; v <<= 5; diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 6b3a958e1be6..b2c8881da764 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -525,7 +525,7 @@ static void handle_error_source(struct pcie_device *aerdev, if (info->severity == AER_CORRECTABLE) { /* - * Correctable error does not need software intevention. + * Correctable error does not need software intervention. * No need to go through error recovery process. */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 403a44374ed5..f1272dc54de1 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -548,7 +548,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) /* * pcie_aspm_init_link_state: Initiate PCI express link state. - * It is called after the pcie and its children devices are scaned. + * It is called after the pcie and its children devices are scanned. * @pdev: the root port or switch downstream port */ void pcie_aspm_init_link_state(struct pci_dev *pdev) diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index e56e594ce112..bbc3bdd2b189 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -419,8 +419,8 @@ static void pcie_pme_remove(struct pcie_device *srv) static struct pcie_port_service_driver pcie_pme_driver = { .name = "pcie_pme", - .port_type = PCI_EXP_TYPE_ROOT_PORT, - .service = PCIE_PORT_SERVICE_PME, + .port_type = PCI_EXP_TYPE_ROOT_PORT, + .service = PCIE_PORT_SERVICE_PME, .probe = pcie_pme_probe, .suspend = pcie_pme_suspend, diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index d2eb80aab569..d525548404d6 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -14,7 +14,7 @@ #define PCIE_PORT_DEVICE_MAXSERVICES 4 /* * According to the PCI Express Base Specification 2.0, the indices of - * the MSI-X table entires used by port services must not exceed 31 + * the MSI-X table entries used by port services must not exceed 31 */ #define PCIE_PORT_MAX_MSIX_ENTRIES 32 diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c index 67be55a7f260..87e79a6ffb5a 100644 --- a/drivers/pci/pcie/portdrv_bus.c +++ b/drivers/pci/pcie/portdrv_bus.c @@ -18,8 +18,8 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); struct bus_type pcie_port_bus_type = { - .name = "pci_express", - .match = pcie_port_bus_match, + .name = "pci_express", + .match = pcie_port_bus_match, }; EXPORT_SYMBOL_GPL(pcie_port_bus_type); diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 08d131f7815b..0b6e76604068 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -46,7 +46,7 @@ static void release_pcie_device(struct device *dev) * pcie_port_msix_add_entry - add entry to given array of MSI-X entries * @entries: Array of MSI-X entries * @new_entry: Index of the entry to add to the array - * @nr_entries: Number of entries aleady in the array + * @nr_entries: Number of entries already in the array * * Return value: Position of the added entry in the array */ diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 696caed5fdf5..0d8fdc48e642 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -223,7 +223,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev, static void pcie_portdrv_remove(struct pci_dev *dev) { pcie_port_device_remove(dev); - pci_disable_device(dev); } static int error_detected_iter(struct device *device, void *data) @@ -390,9 +389,9 @@ static struct pci_driver pcie_portdriver = { .probe = pcie_portdrv_probe, .remove = pcie_portdrv_remove, - .err_handler = &pcie_portdrv_err_handler, + .err_handler = &pcie_portdrv_err_handler, - .driver.pm = PCIE_PORTDRV_PM_OPS, + .driver.pm = PCIE_PORTDRV_PM_OPS, }; static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d) @@ -412,7 +411,7 @@ static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = { .ident = "MSI Wind U-100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, - "MICRO-STAR INTERNATIONAL CO., LTD"), + "MICRO-STAR INTERNATIONAL CO., LTD"), DMI_MATCH(DMI_PRODUCT_NAME, "U-100"), }, }, diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5e14f5a51357..38e403dddf6e 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -582,7 +582,7 @@ static enum pci_bus_speed agp_speed(int agp3, int agpstat) index = 1; else goto out; - + if (agp3) { index += 2; if (index == 5) @@ -789,7 +789,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) } /* Disable MasterAbortMode during probing to avoid reporting - of bus errors (in some architectures) */ + of bus errors (in some architectures) */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); @@ -1005,7 +1005,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) * pci_setup_device - fill in class and map information of a device * @dev: the device structure to fill * - * Initialize the device structure with information about the device's + * Initialize the device structure with information about the device's * vendor,class,memory and IO-space addresses,IRQ lines etc. * Called at initialisation of the PCI subsystem and by CardBus services. * Returns 0 on success and negative if unknown type of device (not normal, @@ -1111,7 +1111,7 @@ int pci_setup_device(struct pci_dev *dev) goto bad; /* The PCI-to-PCI bridge spec requires that subtractive decoding (i.e. transparent) bridge must have programming - interface code of 0x01. */ + interface code of 0x01. */ pci_read_irq(dev); dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); @@ -1570,7 +1570,7 @@ static void pcie_write_mrrs(struct pci_dev *dev) * subsequent read will verify if the value is acceptable or not. * If the MRRS value provided is not acceptable (e.g., too large), * shrink the value until it is acceptable to the HW. - */ + */ while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { rc = pcie_set_readrq(dev, mrrs); if (!rc) diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index cdc7836d7e3d..46d1378f2e9e 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -222,7 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, default: ret = -EINVAL; break; - }; + } return ret; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 91490453c229..b3b1b9aa8863 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -55,7 +55,7 @@ static void quirk_mellanox_tavor(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); -/* Deal with broken BIOS'es that neglect to enable passive release, +/* Deal with broken BIOSes that neglect to enable passive release, which can cause problems in combination with the 82441FX/PPro MTRRs */ static void quirk_passive_release(struct pci_dev *dev) { @@ -78,11 +78,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p /* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround but VIA don't answer queries. If you happen to have good contacts at VIA - ask them for me please -- Alan - - This appears to be BIOS not version dependent. So presumably there is a + ask them for me please -- Alan + + This appears to be BIOS not version dependent. So presumably there is a chipset level fix */ - + static void quirk_isa_dma_hangs(struct pci_dev *dev) { if (!isa_dma_bridge_buggy) { @@ -97,7 +97,7 @@ static void quirk_isa_dma_hangs(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); @@ -157,10 +157,10 @@ static void quirk_triton(struct pci_dev *dev) pci_pci_problems |= PCIPCI_TRITON; } } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton); /* * VIA Apollo KT133 needs PCI latency patch @@ -171,7 +171,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quir * the info on which Mr Breese based his work. * * Updated based on further information from the site and also on - * information provided by VIA + * information provided by VIA */ static void quirk_vialatency(struct pci_dev *dev) { @@ -179,7 +179,7 @@ static void quirk_vialatency(struct pci_dev *dev) u8 busarb; /* Ok we have a potential problem chipset here. Now see if we have a buggy southbridge */ - + p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL); if (p!=NULL) { /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */ @@ -194,9 +194,9 @@ static void quirk_vialatency(struct pci_dev *dev) if (p->revision < 0x10 || p->revision > 0x12) goto exit; } - + /* - * Ok we have the problem. Now set the PCI master grant to + * Ok we have the problem. Now set the PCI master grant to * occur every master grant. The apparent bug is that under high * PCI load (quite common in Linux of course) you can get data * loss when the CPU is held off the bus for 3 bus master requests @@ -209,7 +209,7 @@ static void quirk_vialatency(struct pci_dev *dev) */ pci_read_config_byte(dev, 0x76, &busarb); - /* Set bit 4 and bi 5 of byte 76 to 0x01 + /* Set bit 4 and bi 5 of byte 76 to 0x01 "Master priority rotation on every PCI master grant */ busarb &= ~(1<<5); busarb |= (1<<4); @@ -252,7 +252,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx) * that DMA to AGP space. Latency must be set to 0xA and triton * workaround applied too * [Info kindly provided by ALi] - */ + */ static void quirk_alimagik(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) { @@ -260,8 +260,8 @@ static void quirk_alimagik(struct pci_dev *dev) pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON; } } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik); /* * Natoma has some interesting boundary conditions with Zoran stuff @@ -274,12 +274,12 @@ static void quirk_natoma(struct pci_dev *dev) pci_pci_problems |= PCIPCI_NATOMA; } } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma); /* * This chip can cause PCI parity errors if config register 0xA0 is read @@ -400,7 +400,7 @@ static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int p /* * For now we only print it out. Eventually we'll want to * reserve it (at least if it's in the 0x1000+ range), but - * let's get enough confirmation reports first. + * let's get enough confirmation reports first. */ base &= -size; dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1); @@ -425,7 +425,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int } /* * For now we only print it out. Eventually we'll want to - * reserve it, but let's get enough confirmation reports first. + * reserve it, but let's get enough confirmation reports first. */ base &= -size; dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1); @@ -682,7 +682,7 @@ static void quirk_xio2000a(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, quirk_xio2000a); -#ifdef CONFIG_X86_IO_APIC +#ifdef CONFIG_X86_IO_APIC #include <asm/io_apic.h> @@ -696,12 +696,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, static void quirk_via_ioapic(struct pci_dev *dev) { u8 tmp; - + if (nr_ioapics < 1) tmp = 0; /* nothing routed to external APIC */ else tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ - + dev_info(&dev->dev, "%sbling VIA external APIC routing\n", tmp == 0 ? "Disa" : "Ena"); @@ -712,7 +712,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_i DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); /* - * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. + * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit. * This leads to doubled level interrupt rates. * Set this bit to get rid of cycle wastage. * Otherwise uncritical. @@ -986,7 +986,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, qu static void quirk_disable_pxb(struct pci_dev *pdev) { u16 config; - + if (pdev->revision != 0x04) /* Only C0 requires this */ return; pci_read_config_word(pdev, 0x40, &config); @@ -1094,11 +1094,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge * is not activated. The myth is that Asus said that they do not want the * users to be irritated by just another PCI Device in the Win98 device - * manager. (see the file prog/hotplug/README.p4b in the lm_sensors + * manager. (see the file prog/hotplug/README.p4b in the lm_sensors * package 2.7.0 for details) * - * The SMBus PCI Device can be activated by setting a bit in the ICH LPC - * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it + * The SMBus PCI Device can be activated by setting a bit in the ICH LPC + * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it * becomes necessary to do this tweak in two steps -- the chosen trigger * is either the Host bridge (preferred) or on-board VGA controller. * @@ -1253,7 +1253,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asu static void asus_hides_smbus_lpc(struct pci_dev *dev) { u16 val; - + if (likely(!asus_hides_smbus)) return; @@ -1640,8 +1640,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); /* * disable boot interrupts on HT-1000 @@ -1673,8 +1673,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); /* * disable boot interrupts on AMD and ATI chipsets @@ -1730,8 +1730,8 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); -DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); +DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); #endif /* CONFIG_X86_IO_APIC */ /* @@ -2127,8 +2127,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1); #ifdef CONFIG_PCI_MSI /* Some chipsets do not support MSI. We cannot easily rely on setting * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually - * some other busses controlled by the chipset even if Linux is not - * aware of it. Instead of setting the flag on all busses in the + * some other buses controlled by the chipset even if Linux is not + * aware of it. Instead of setting the flag on all buses in the * machine, simply disable MSI globally. */ static void quirk_disable_all_msi(struct pci_dev *dev) @@ -2288,14 +2288,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, nvenet_msi_disable); /* - * Some versions of the MCP55 bridge from nvidia have a legacy irq routing - * config register. This register controls the routing of legacy interrupts - * from devices that route through the MCP55. If this register is misprogramed - * interrupts are only sent to the bsp, unlike conventional systems where the - * irq is broadxast to all online cpus. Not having this register set - * properly prevents kdump from booting up properly, so lets make sure that - * we have it set correctly. - * Note this is an undocumented register. + * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing + * config register. This register controls the routing of legacy + * interrupts from devices that route through the MCP55. If this register + * is misprogrammed, interrupts are only sent to the BSP, unlike + * conventional systems where the IRQ is broadcast to all online CPUs. Not + * having this register set properly prevents kdump from booting up + * properly, so let's make sure that we have it set correctly. + * Note that this is an undocumented register. */ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev) { @@ -2626,7 +2626,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091, /* Allow manual resource allocation for PCI hotplug bridges * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), - * kernel fails to allocate resources when hotplug device is + * kernel fails to allocate resources when hotplug device is * inserted and PCI bus is rescanned. */ static void quirk_hotplug_bridge(struct pci_dev *dev) diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 8fc54b7327bc..1576851028db 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -7,7 +7,7 @@ static void pci_free_resources(struct pci_dev *dev) { int i; - msi_remove_pci_irq_vectors(dev); + msi_remove_pci_irq_vectors(dev); pci_cleanup_rom(dev); for (i = 0; i < PCI_NUM_RESOURCES; i++) { diff --git a/drivers/pci/search.c b/drivers/pci/search.c index d0627fa9f368..3ff2ac7c14e2 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -1,5 +1,5 @@ /* - * PCI searching functions. + * PCI searching functions. * * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter, * David Mosberger-Tang @@ -96,12 +96,12 @@ struct pci_bus * pci_find_bus(int domain, int busnr) * pci_find_next_bus - begin or continue searching for a PCI bus * @from: Previous PCI bus found, or %NULL for new search. * - * Iterates through the list of known PCI busses. A new search is + * Iterates through the list of known PCI buses. A new search is * initiated by passing %NULL as the @from argument. Otherwise if * @from is not %NULL, searches continue from next device on the * global list. */ -struct pci_bus * +struct pci_bus * pci_find_next_bus(const struct pci_bus *from) { struct list_head *n; @@ -119,11 +119,11 @@ pci_find_next_bus(const struct pci_bus *from) /** * pci_get_slot - locate PCI device for a given PCI slot * @bus: PCI bus on which desired PCI device resides - * @devfn: encodes number of PCI slot in which the desired PCI - * device resides and the logical device number within that slot + * @devfn: encodes number of PCI slot in which the desired PCI + * device resides and the logical device number within that slot * in case of multi-function devices. * - * Given a PCI bus and slot/function number, the desired PCI device + * Given a PCI bus and slot/function number, the desired PCI device * is located in the list of PCI devices. * If the device is found, its reference count is increased and this * function returns a pointer to its data structure. The caller must diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4ce83b26ae9e..219a4106480a 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -292,8 +292,8 @@ static void assign_requested_resources_sorted(struct list_head *head, (!(res->flags & IORESOURCE_ROM_ENABLE)))) add_to_list(fail_head, dev_res->dev, res, - 0 /* dont care */, - 0 /* dont care */); + 0 /* don't care */, + 0 /* don't care */); } reset_resource(res); } @@ -667,9 +667,9 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) if (!io) { pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); pci_read_config_word(bridge, PCI_IO_BASE, &io); - pci_write_config_word(bridge, PCI_IO_BASE, 0x0); - } - if (io) + pci_write_config_word(bridge, PCI_IO_BASE, 0x0); + } + if (io) b_res[0].flags |= IORESOURCE_IO; /* DECchip 21050 pass 2 errata: the bridge may miss an address disconnect boundary by one PCI data phase. @@ -819,7 +819,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, resource_size_t min_align, align; if (!b_res) - return; + return; min_align = window_alignment(bus, IORESOURCE_IO); list_for_each_entry(dev, &bus->devices, bus_list) { @@ -950,7 +950,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (realloc_head && i >= PCI_IOV_RESOURCES && i <= PCI_IOV_RESOURCE_END) { r->end = r->start - 1; - add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */); + add_to_list(realloc_head, dev, r, r_size, 0/* don't care */); children_add_size += r_size; continue; } @@ -1456,8 +1456,8 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus, /* * first try will not touch pci bridge res - * second and later try will clear small leaf bridge res - * will stop till to the max deepth if can not find good one + * second and later try will clear small leaf bridge res + * will stop till to the max depth if can not find good one */ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) { diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 07f2eddc09ce..83c4d3bc47ab 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -159,7 +159,7 @@ resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx) return 0; } -static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, +static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, int resno, resource_size_t size) { struct resource *root, *conflict; diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index c1e9284a677b..448ca562d1f8 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -53,7 +53,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) static const char *pci_bus_speed_strings[] = { "33 MHz PCI", /* 0x00 */ "66 MHz PCI", /* 0x01 */ - "66 MHz PCI-X", /* 0x02 */ + "66 MHz PCI-X", /* 0x02 */ "100 MHz PCI-X", /* 0x03 */ "133 MHz PCI-X", /* 0x04 */ NULL, /* 0x05 */ diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index e1c1ec540893..24750a1b39b6 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -44,7 +44,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, default: err = -EINVAL; goto error; - }; + } err = -EIO; if (cfg_ret != PCIBIOS_SUCCESSFUL) diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 15f166a470a7..007730222116 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -626,7 +626,7 @@ comment "Platform RTC drivers" config RTC_DRV_CMOS tristate "PC-style 'CMOS'" - depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS || SPARC64 + depends on X86 || ARM || M32R || ATARI || PPC || MIPS || SPARC64 default y if X86 help Say "yes" here to get direct support for the real time clock @@ -643,6 +643,14 @@ config RTC_DRV_CMOS This driver can also be built as a module. If so, the module will be called rtc-cmos. +config RTC_DRV_ALPHA + bool "Alpha PC-style CMOS" + depends on ALPHA + default y + help + Direct support for the real-time clock found on every Alpha + system, specifically MC146818 compatibles. If in doubt, say Y. + config RTC_DRV_VRTC tristate "Virtual RTC for Intel MID platforms" depends on X86_INTEL_MID diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 8b2cd8a5a2ff..c0da95e95702 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -428,6 +428,14 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) return 0; } +static void at91_rtc_shutdown(struct platform_device *pdev) +{ + /* Disable all interrupts */ + at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | + AT91_RTC_SECEV | AT91_RTC_TIMEV | + AT91_RTC_CALEV); +} + #ifdef CONFIG_PM_SLEEP /* AT91RM9200 RTC Power management control */ @@ -466,6 +474,7 @@ static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume); static struct platform_driver at91_rtc_driver = { .remove = __exit_p(at91_rtc_remove), + .shutdown = at91_rtc_shutdown, .driver = { .name = "at91_rtc", .owner = THIS_MODULE, diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index f85b9e5c1f05..7eb19be35d46 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -330,7 +330,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->generate_node_acls; + return tpg->tpg_attrib.generate_node_acls; } static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) @@ -338,7 +338,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; + return tpg->tpg_attrib.cache_dynamic_acls; } static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) @@ -346,7 +346,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; + return tpg->tpg_attrib.demo_mode_write_protect; } static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) @@ -354,7 +354,7 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; + return tpg->tpg_attrib.prod_mode_write_protect; } static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) @@ -362,7 +362,7 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only; + return tpg->tpg_attrib.demo_mode_login_only; } static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( @@ -847,7 +847,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ struct tcm_qla2xxx_tpg, se_tpg); \ \ - return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \ + return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ } \ \ static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ @@ -1027,10 +1027,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic * NodeACLs */ - QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; - QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; - QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; - QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1; + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -1830,16 +1830,16 @@ static int tcm_qla2xxx_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = tcm_qla2xxx_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the fabric for use within TCM */ @@ -1870,15 +1870,15 @@ static int tcm_qla2xxx_register_configfs(void) /* * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl */ - TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the npiv_fabric for use within TCM */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 329327528a55..771f7b816443 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -45,8 +45,6 @@ struct tcm_qla2xxx_tpg { struct se_portal_group se_tpg; }; -#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib) - struct tcm_qla2xxx_fc_loopid { struct se_node_acl *se_nacl; }; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 38e44b9abf0f..d70e9119e906 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -805,14 +805,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int iscsi_task_attr; int sam_task_attr; - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->cmd_pdus++; - if (conn->sess->se_sess->se_node_acl) { - spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); - conn->sess->se_sess->se_node_acl->num_cmds++; - spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); - } - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_inc(&conn->sess->cmd_pdus); hdr = (struct iscsi_scsi_req *) buf; payload_length = ntoh24(hdr->dlength); @@ -1254,20 +1247,12 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, int rc; if (!payload_length) { - pr_err("DataOUT payload is ZERO, protocol error.\n"); - return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, - buf); + pr_warn("DataOUT payload is ZERO, ignoring.\n"); + return 0; } /* iSCSI write */ - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->rx_data_octets += payload_length; - if (conn->sess->se_sess->se_node_acl) { - spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); - conn->sess->se_sess->se_node_acl->write_bytes += payload_length; - spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); - } - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_add(payload_length, &conn->sess->rx_data_octets); if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) { pr_err("DataSegmentLength: %u is greater than" @@ -1486,7 +1471,7 @@ EXPORT_SYMBOL(iscsit_check_dataout_payload); static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) { - struct iscsi_cmd *cmd; + struct iscsi_cmd *cmd = NULL; struct iscsi_data *hdr = (struct iscsi_data *)buf; int rc; bool data_crc_failed = false; @@ -1954,6 +1939,13 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, (unsigned char *)hdr); } + if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) || + (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) { + pr_err("Multi sequence text commands currently not supported\n"); + return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED, + (unsigned char *)hdr); + } + pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x," " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn, hdr->exp_statsn, payload_length); @@ -2630,14 +2622,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) return -1; } - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->tx_data_octets += datain.length; - if (conn->sess->se_sess->se_node_acl) { - spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock); - conn->sess->se_sess->se_node_acl->read_bytes += datain.length; - spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock); - } - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_add(datain.length, &conn->sess->tx_data_octets); /* * Special case for successfully execution w/ both DATAIN * and Sense Data. @@ -3162,9 +3147,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn, if (inc_stat_sn) cmd->stat_sn = conn->stat_sn++; - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->rsp_pdus++; - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_inc(&conn->sess->rsp_pdus); memset(hdr, 0, ISCSI_HDR_LEN); hdr->opcode = ISCSI_OP_SCSI_CMD_RSP; @@ -3374,6 +3357,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_tiqn *tiqn; struct iscsi_tpg_np *tpg_np; int buffer_len, end_of_buf = 0, len = 0, payload_len = 0; + int target_name_printed; unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; @@ -3411,19 +3395,23 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) continue; } - len = sprintf(buf, "TargetName=%s", tiqn->tiqn); - len += 1; - - if ((len + payload_len) > buffer_len) { - end_of_buf = 1; - goto eob; - } - memcpy(payload + payload_len, buf, len); - payload_len += len; + target_name_printed = 0; spin_lock(&tiqn->tiqn_tpg_lock); list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { + /* If demo_mode_discovery=0 and generate_node_acls=0 + * (demo mode dislabed) do not return + * TargetName+TargetAddress unless a NodeACL exists. + */ + + if ((tpg->tpg_attrib.generate_node_acls == 0) && + (tpg->tpg_attrib.demo_mode_discovery == 0) && + (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg, + cmd->conn->sess->sess_ops->InitiatorName))) { + continue; + } + spin_lock(&tpg->tpg_state_lock); if ((tpg->tpg_state == TPG_STATE_FREE) || (tpg->tpg_state == TPG_STATE_INACTIVE)) { @@ -3438,6 +3426,22 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) struct iscsi_np *np = tpg_np->tpg_np; bool inaddr_any = iscsit_check_inaddr_any(np); + if (!target_name_printed) { + len = sprintf(buf, "TargetName=%s", + tiqn->tiqn); + len += 1; + + if ((len + payload_len) > buffer_len) { + spin_unlock(&tpg->tpg_np_lock); + spin_unlock(&tiqn->tiqn_tpg_lock); + end_of_buf = 1; + goto eob; + } + memcpy(payload + payload_len, buf, len); + payload_len += len; + target_name_printed = 1; + } + len = sprintf(buf, "TargetAddress=" "%s:%hu,%hu", (inaddr_any == false) ? @@ -4092,9 +4096,7 @@ restart: * hit default in the switch below. */ memset(buffer, 0xff, ISCSI_HDR_LEN); - spin_lock_bh(&conn->sess->session_stats_lock); - conn->sess->conn_digest_errors++; - spin_unlock_bh(&conn->sess->session_stats_lock); + atomic_long_inc(&conn->sess->conn_digest_errors); } else { pr_debug("Got HeaderDigest CRC32C" " 0x%08x\n", checksum); @@ -4381,7 +4383,7 @@ int iscsit_close_connection( int iscsit_close_session(struct iscsi_session *sess) { - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; if (atomic_read(&sess->nconn)) { diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 7505fddca15f..de77d9aa22c6 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -111,7 +111,7 @@ static struct iscsi_chap *chap_server_open( /* * Set Identifier. */ - chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++; + chap->id = conn->tpg->tpg_chap_id++; *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id); *aic_len += 1; pr_debug("[server] Sending CHAP_I=%d\n", chap->id); @@ -146,6 +146,7 @@ static int chap_server_compute_md5( unsigned char client_digest[MD5_SIGNATURE_SIZE]; unsigned char server_digest[MD5_SIGNATURE_SIZE]; unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH]; + size_t compare_len; struct iscsi_chap *chap = conn->auth_protocol; struct crypto_hash *tfm; struct hash_desc desc; @@ -184,7 +185,9 @@ static int chap_server_compute_md5( goto out; } - if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) { + /* Include the terminating NULL in the compare */ + compare_len = strlen(auth->userid) + 1; + if (strncmp(chap_n, auth->userid, compare_len) != 0) { pr_err("CHAP_N values do not match!\n"); goto out; } diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index fd145259361d..e3318edb233d 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -372,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name( \ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \ se_node_acl); \ \ - return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \ + return sprintf(page, "%u\n", nacl->node_attrib.name); \ } \ \ static ssize_t iscsi_nacl_attrib_store_##name( \ @@ -897,7 +897,7 @@ static struct se_node_acl *lio_target_make_nodeacl( if (!se_nacl_new) return ERR_PTR(-ENOMEM); - cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; + cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth; /* * se_nacl_new may be released by core_tpg_add_initiator_node_acl() * when converting a NdoeACL from demo mode -> explict @@ -920,9 +920,9 @@ static struct se_node_acl *lio_target_make_nodeacl( return ERR_PTR(-ENOMEM); } - stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group; + stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group; stats_cg->default_groups[1] = NULL; - config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group, + config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group, "iscsi_sess_stats", &iscsi_stat_sess_cit); return se_nacl; @@ -967,7 +967,7 @@ static ssize_t iscsi_tpg_attrib_show_##name( \ if (iscsit_get_tpg(tpg) < 0) \ return -EINVAL; \ \ - rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \ + rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \ iscsit_put_tpg(tpg); \ return rb; \ } \ @@ -1041,6 +1041,16 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR); */ DEF_TPG_ATTRIB(prod_mode_write_protect); TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_demo_mode_discovery, + */ +DEF_TPG_ATTRIB(demo_mode_discovery); +TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR); +/* + * Define iscsi_tpg_attrib_s_default_erl + */ +DEF_TPG_ATTRIB(default_erl); +TPG_ATTR(default_erl, S_IRUGO | S_IWUSR); static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_authentication.attr, @@ -1051,6 +1061,8 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_cache_dynamic_acls.attr, &iscsi_tpg_attrib_demo_mode_write_protect.attr, &iscsi_tpg_attrib_prod_mode_write_protect.attr, + &iscsi_tpg_attrib_demo_mode_discovery.attr, + &iscsi_tpg_attrib_default_erl.attr, NULL, }; @@ -1514,21 +1526,21 @@ static struct se_wwn *lio_target_call_coreaddtiqn( return ERR_PTR(-ENOMEM); } - stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group; - stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group; - stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group; - stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group; - stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group; + stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group; + stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group; + stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group; + stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group; + stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group; stats_cg->default_groups[5] = NULL; - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group, "iscsi_instance", &iscsi_stat_instance_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group, "iscsi_sess_err", &iscsi_stat_sess_err_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group, "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group, "iscsi_login_stats", &iscsi_stat_login_cit); - config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group, + config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group, "iscsi_logout_stats", &iscsi_stat_logout_cit); pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn); @@ -1784,6 +1796,11 @@ static int lio_queue_status(struct se_cmd *se_cmd) struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); cmd->i_state = ISTATE_SEND_STATUS; + + if (cmd->se_cmd.scsi_status || cmd->sense_reason) { + iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); + return 0; + } cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); return 0; @@ -1815,21 +1832,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; + return tpg->tpg_attrib.default_cmdsn_depth; } static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls; + return tpg->tpg_attrib.generate_node_acls; } static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg) { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls; + return tpg->tpg_attrib.cache_dynamic_acls; } static int lio_tpg_check_demo_mode_write_protect( @@ -1837,7 +1854,7 @@ static int lio_tpg_check_demo_mode_write_protect( { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect; + return tpg->tpg_attrib.demo_mode_write_protect; } static int lio_tpg_check_prod_mode_write_protect( @@ -1845,7 +1862,7 @@ static int lio_tpg_check_prod_mode_write_protect( { struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr; - return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect; + return tpg->tpg_attrib.prod_mode_write_protect; } static void lio_tpg_release_fabric_acl( @@ -1908,9 +1925,12 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl) { struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl, se_node_acl); + struct se_portal_group *se_tpg = se_acl->se_tpg; + struct iscsi_portal_group *tpg = container_of(se_tpg, + struct iscsi_portal_group, tpg_se_tpg); - ISCSI_NODE_ATTRIB(acl)->nacl = acl; - iscsit_set_default_node_attribues(acl); + acl->node_attrib.nacl = acl; + iscsit_set_default_node_attribues(acl, tpg); } static int lio_check_stop_free(struct se_cmd *se_cmd) @@ -1995,17 +2015,17 @@ int iscsi_target_register_configfs(void) * Setup default attribute lists for various fabric->tf_cit_tmpl * sturct config_item_type's */ - TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; + fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs; + fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs; ret = target_fabric_configfs_register(fabric); if (ret < 0) { diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 9a5721b8ff96..48f7b3bf4e8c 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -37,9 +37,6 @@ #define NA_RANDOM_DATAIN_PDU_OFFSETS 0 #define NA_RANDOM_DATAIN_SEQ_OFFSETS 0 #define NA_RANDOM_R2T_OFFSETS 0 -#define NA_DEFAULT_ERL 0 -#define NA_DEFAULT_ERL_MAX 2 -#define NA_DEFAULT_ERL_MIN 0 /* struct iscsi_tpg_attrib sanity values */ #define TA_AUTHENTICATION 1 @@ -58,6 +55,8 @@ #define TA_DEMO_MODE_WRITE_PROTECT 1 /* Disabled by default in production mode w/ explict ACLs */ #define TA_PROD_MODE_WRITE_PROTECT 0 +#define TA_DEMO_MODE_DISCOVERY 1 +#define TA_DEFAULT_ERL 0 #define TA_CACHE_CORE_NPS 0 @@ -192,6 +191,7 @@ enum recover_cmdsn_ret_table { CMDSN_NORMAL_OPERATION = 0, CMDSN_LOWER_THAN_EXP = 1, CMDSN_HIGHER_THAN_EXP = 2, + CMDSN_MAXCMDSN_OVERRUN = 3, }; /* Used for iscsi_handle_immediate_data() return values */ @@ -650,14 +650,13 @@ struct iscsi_session { /* Used for session reference counting */ int session_usage_count; int session_waiting_on_uc; - u32 cmd_pdus; - u32 rsp_pdus; - u64 tx_data_octets; - u64 rx_data_octets; - u32 conn_digest_errors; - u32 conn_timeout_errors; + atomic_long_t cmd_pdus; + atomic_long_t rsp_pdus; + atomic_long_t tx_data_octets; + atomic_long_t rx_data_octets; + atomic_long_t conn_digest_errors; + atomic_long_t conn_timeout_errors; u64 creation_time; - spinlock_t session_stats_lock; /* Number of active connections */ atomic_t nconn; atomic_t session_continuation; @@ -755,11 +754,6 @@ struct iscsi_node_acl { struct se_node_acl se_node_acl; }; -#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps) - -#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib) -#define ISCSI_NODE_AUTH(t) (&(t)->node_auth) - struct iscsi_tpg_attrib { u32 authentication; u32 login_timeout; @@ -769,6 +763,8 @@ struct iscsi_tpg_attrib { u32 default_cmdsn_depth; u32 demo_mode_write_protect; u32 prod_mode_write_protect; + u32 demo_mode_discovery; + u32 default_erl; struct iscsi_portal_group *tpg; }; @@ -835,12 +831,6 @@ struct iscsi_portal_group { struct list_head tpg_list; } ____cacheline_aligned; -#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg) -#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l]) -#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg) -#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib) -#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg) - struct iscsi_wwn_stat_grps { struct config_group iscsi_stat_group; struct config_group iscsi_instance_group; @@ -871,8 +861,6 @@ struct iscsi_tiqn { struct iscsi_logout_stats logout_stats; } ____cacheline_aligned; -#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps) - struct iscsit_global { /* In core shutdown */ u32 in_shutdown; diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c index 6c7a5104a4cd..7087c736daa5 100644 --- a/drivers/target/iscsi/iscsi_target_device.c +++ b/drivers/target/iscsi/iscsi_target_device.c @@ -58,11 +58,7 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess cmd->maxcmdsn_inc = 1; - if (!mutex_trylock(&sess->cmdsn_mutex)) { - sess->max_cmd_sn += 1; - pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); - return; - } + mutex_lock(&sess->cmdsn_mutex); sess->max_cmd_sn += 1; pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn); mutex_unlock(&sess->cmdsn_mutex); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 41052e512d92..0d1e6ee3e992 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -757,7 +757,7 @@ int iscsit_check_post_dataout( static void iscsit_handle_time2retain_timeout(unsigned long data) { struct iscsi_session *sess = (struct iscsi_session *) data; - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; spin_lock_bh(&se_tpg->session_lock); @@ -785,7 +785,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data) tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; - sess->conn_timeout_errors++; + atomic_long_inc(&sess->conn_timeout_errors); spin_unlock(&tiqn->sess_err_stats.lock); } } @@ -801,9 +801,9 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess) * Only start Time2Retain timer when the associated TPG is still in * an ACTIVE (eg: not disabled or shutdown) state. */ - spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock); - tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE); - spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock); + spin_lock(&sess->tpg->tpg_state_lock); + tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE); + spin_unlock(&sess->tpg->tpg_state_lock); if (!tpg_active) return; @@ -829,7 +829,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess) */ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) { - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 1794c753954a..4eb93b2b6473 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -305,7 +305,6 @@ static int iscsi_login_zero_tsih_s1( } sess->creation_time = get_jiffies_64(); - spin_lock_init(&sess->session_stats_lock); /* * The FFP CmdSN window values will be allocated from the TPG's * Initiator Node's ACL once the login has been successfully completed. @@ -347,15 +346,15 @@ static int iscsi_login_zero_tsih_s2( * Assign a new TPG Session Handle. Note this is protected with * struct iscsi_portal_group->np_login_sem from iscsit_access_np(). */ - sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; + sess->tsih = ++sess->tpg->ntsih; if (!sess->tsih) - sess->tsih = ++ISCSI_TPG_S(sess)->ntsih; + sess->tsih = ++sess->tpg->ntsih; /* * Create the default params from user defined values.. */ if (iscsi_copy_param_list(&conn->param_list, - ISCSI_TPG_C(conn)->param_list, 1) < 0) { + conn->tpg->param_list, 1) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); return -1; @@ -380,7 +379,7 @@ static int iscsi_login_zero_tsih_s2( * In our case, we have already located the struct iscsi_tiqn at this point. */ memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); + sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt); if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); @@ -575,7 +574,7 @@ static int iscsi_login_non_zero_tsih_s2( iscsi_login_set_conn_values(sess, conn, pdu->cid); if (iscsi_copy_param_list(&conn->param_list, - ISCSI_TPG_C(conn)->param_list, 0) < 0) { + conn->tpg->param_list, 0) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); return -1; @@ -593,7 +592,7 @@ static int iscsi_login_non_zero_tsih_s2( * In our case, we have already located the struct iscsi_tiqn at this point. */ memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt); + sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt); if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); @@ -691,7 +690,7 @@ int iscsi_post_login_handler( int stop_timer = 0; struct iscsi_session *sess = conn->sess; struct se_session *se_sess = sess->se_sess; - struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess); + struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; struct iscsi_thread_set *ts; @@ -1154,7 +1153,7 @@ old_sess_out: spin_lock_bh(&conn->sess->conn_lock); if (conn->sess->session_state == TARG_SESS_STATE_FAILED) { struct se_portal_group *se_tpg = - &ISCSI_TPG_C(conn)->tpg_se_tpg; + &conn->tpg->tpg_se_tpg; atomic_set(&conn->sess->session_continuation, 0); spin_unlock_bh(&conn->sess->conn_lock); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index ef6d836a4d09..83c965c65386 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -88,7 +88,7 @@ int extract_param( if (len < 0) return -1; - if (len > max_length) { + if (len >= max_length) { pr_err("Length of input: %d exceeds max_length:" " %d\n", len, max_length); return -1; @@ -140,7 +140,7 @@ static u32 iscsi_handle_authentication( iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl); - auth = ISCSI_NODE_AUTH(iscsi_nacl); + auth = &iscsi_nacl->node_auth; } } else { /* @@ -789,7 +789,7 @@ static int iscsi_target_handle_csg_zero( return -1; if (!iscsi_check_negotiated_keys(conn->param_list)) { - if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && + if (conn->tpg->tpg_attrib.authentication && !strncmp(param->value, NONE, 4)) { pr_err("Initiator sent AuthMethod=None but" " Target is enforcing iSCSI Authentication," @@ -799,7 +799,7 @@ static int iscsi_target_handle_csg_zero( return -1; } - if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication && + if (conn->tpg->tpg_attrib.authentication && !login->auth_complete) return 0; @@ -862,7 +862,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log } if (!login->auth_complete && - ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) { + conn->tpg->tpg_attrib.authentication) { pr_err("Initiator is requesting CSG: 1, has not been" " successfully authenticated, and the Target is" " enforcing iSCSI Authentication, login failed.\n"); diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c index 93bdc475eb00..16454a922e2b 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.c +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c @@ -33,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname( } void iscsit_set_default_node_attribues( - struct iscsi_node_acl *acl) + struct iscsi_node_acl *acl, + struct iscsi_portal_group *tpg) { struct iscsi_node_attrib *a = &acl->node_attrib; @@ -44,7 +45,7 @@ void iscsit_set_default_node_attribues( a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS; a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS; a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS; - a->default_erl = NA_DEFAULT_ERL; + a->default_erl = tpg->tpg_attrib.default_erl; } int iscsit_na_dataout_timeout( diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h index c970b326ef23..0c69a46a62ec 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.h +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h @@ -1,7 +1,8 @@ #ifndef ISCSI_TARGET_NODEATTRIB_H #define ISCSI_TARGET_NODEATTRIB_H -extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *); +extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *, + struct iscsi_portal_group *); extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32); extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32); extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32); diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c index f788e8b5e855..103395510307 100644 --- a/drivers/target/iscsi/iscsi_target_stat.c +++ b/drivers/target/iscsi/iscsi_target_stat.c @@ -792,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->cmd_pdus)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -815,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->rsp_pdus)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -838,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%llu\n", - (unsigned long long)sess->tx_data_octets); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->tx_data_octets)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -862,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%llu\n", - (unsigned long long)sess->rx_data_octets); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->rx_data_octets)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -886,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", - sess->conn_digest_errors); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->conn_digest_errors)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); @@ -910,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors( if (se_sess) { sess = se_sess->fabric_sess_ptr; if (sess) - ret = snprintf(page, PAGE_SIZE, "%u\n", - sess->conn_timeout_errors); + ret = snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&sess->conn_timeout_errors)); } spin_unlock_bh(&se_nacl->nacl_sess_lock); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 4faeb47fa5e1..39761837608d 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -223,6 +223,8 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS; a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT; a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT; + a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY; + a->default_erl = TA_DEFAULT_ERL; } int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) @@ -237,7 +239,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro if (iscsi_create_default_params(&tpg->param_list) < 0) goto err_out; - ISCSI_TPG_ATTRIB(tpg)->tpg = tpg; + tpg->tpg_attrib.tpg = tpg; spin_lock(&tpg->tpg_state_lock); tpg->tpg_state = TPG_STATE_INACTIVE; @@ -330,7 +332,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) return -EINVAL; } - if (ISCSI_TPG_ATTRIB(tpg)->authentication) { + if (tpg->tpg_attrib.authentication) { if (!strcmp(param->value, NONE)) { ret = iscsi_update_param_value(param, CHAP); if (ret) @@ -820,3 +822,39 @@ int iscsit_ta_prod_mode_write_protect( return 0; } + +int iscsit_ta_demo_mode_discovery( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->demo_mode_discovery = flag; + pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:" + " %s\n", tpg->tpgt, (a->demo_mode_discovery) ? + "ON" : "OFF"); + + return 0; +} + +int iscsit_ta_default_erl( + struct iscsi_portal_group *tpg, + u32 default_erl) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) { + pr_err("Illegal value for default_erl: %u\n", default_erl); + return -EINVAL; + } + + a->default_erl = default_erl; + pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl); + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index b77693e2c209..213c0fc7fdc9 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -37,5 +37,7 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32); extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32); extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32); extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32); +extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32); +extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); #endif /* ISCSI_TARGET_TPG_H */ diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index b0cac0c342e1..0819e688a398 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -242,9 +242,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm */ if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) { pr_err("Received CmdSN: 0x%08x is greater than" - " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn, + " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, sess->max_cmd_sn); - ret = CMDSN_ERROR_CANNOT_RECOVER; + ret = CMDSN_MAXCMDSN_OVERRUN; } else if (cmdsn == sess->exp_cmd_sn) { sess->exp_cmd_sn++; @@ -303,14 +303,16 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ret = CMDSN_HIGHER_THAN_EXP; break; case CMDSN_LOWER_THAN_EXP: + case CMDSN_MAXCMDSN_OVERRUN: + default: cmd->i_state = ISTATE_REMOVE; iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state); - ret = cmdsn_ret; - break; - default: - reason = ISCSI_REASON_PROTOCOL_ERROR; - reject = true; - ret = cmdsn_ret; + /* + * Existing callers for iscsit_sequence_cmd() will silently + * ignore commands with CMDSN_LOWER_THAN_EXP, so force this + * return for CMDSN_MAXCMDSN_OVERRUN as well.. + */ + ret = CMDSN_LOWER_THAN_EXP; break; } mutex_unlock(&conn->sess->cmdsn_mutex); @@ -980,7 +982,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data) tiqn->sess_err_stats.last_sess_failure_type = ISCSI_SESS_ERR_CXN_TIMEOUT; tiqn->sess_err_stats.cxn_timeout_errors++; - conn->sess->conn_timeout_errors++; + atomic_long_inc(&conn->sess->conn_timeout_errors); spin_unlock_bh(&tiqn->sess_err_stats.lock); } } diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 0f6d69dabca1..1b41e6776152 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -135,6 +135,21 @@ static int tcm_loop_change_queue_depth( return sdev->queue_depth; } +static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag); + + if (tag) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag = 0; + + return tag; +} + /* * Locate the SAM Task Attr from struct scsi_cmnd * */ @@ -178,7 +193,10 @@ static void tcm_loop_submission_work(struct work_struct *work) set_host_byte(sc, DID_NO_CONNECT); goto out_done; } - + if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) { + set_host_byte(sc, DID_TRANSPORT_DISRUPTED); + goto out_done; + } tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) { scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" @@ -233,6 +251,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) } tl_cmd->sc = sc; + tl_cmd->sc_cmd_tag = sc->tag; INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); queue_work(tcm_loop_workqueue, &tl_cmd->work); return 0; @@ -242,41 +261,21 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) * Called from SCSI EH process context to issue a LUN_RESET TMR * to struct scsi_device */ -static int tcm_loop_device_reset(struct scsi_cmnd *sc) +static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, + struct tcm_loop_nexus *tl_nexus, + int lun, int task, enum tcm_tmreq_table tmr) { struct se_cmd *se_cmd = NULL; - struct se_portal_group *se_tpg; struct se_session *se_sess; + struct se_portal_group *se_tpg; struct tcm_loop_cmd *tl_cmd = NULL; - struct tcm_loop_hba *tl_hba; - struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tmr *tl_tmr = NULL; - struct tcm_loop_tpg *tl_tpg; - int ret = FAILED, rc; - /* - * Locate the tcm_loop_hba_t pointer - */ - tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); - /* - * Locate the tl_nexus and se_sess pointers - */ - tl_nexus = tl_hba->tl_nexus; - if (!tl_nexus) { - pr_err("Unable to perform device reset without" - " active I_T Nexus\n"); - return FAILED; - } - se_sess = tl_nexus->se_sess; - /* - * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id - */ - tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; - se_tpg = &tl_tpg->tl_se_tpg; + int ret = TMR_FUNCTION_FAILED, rc; tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); if (!tl_cmd) { pr_err("Unable to allocate memory for tl_cmd\n"); - return FAILED; + return ret; } tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); @@ -287,6 +286,8 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) init_waitqueue_head(&tl_tmr->tl_tmr_wait); se_cmd = &tl_cmd->tl_se_cmd; + se_tpg = &tl_tpg->tl_se_tpg; + se_sess = tl_nexus->se_sess; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ @@ -294,17 +295,23 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) DMA_NONE, MSG_SIMPLE_TAG, &tl_cmd->tl_sense_buf[0]); - rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL); + rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); if (rc < 0) goto release; + + if (tmr == TMR_ABORT_TASK) + se_cmd->se_tmr_req->ref_task_tag = task; + /* - * Locate the underlying TCM struct se_lun from sc->device->lun + * Locate the underlying TCM struct se_lun */ - if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) + if (transport_lookup_tmr_lun(se_cmd, lun) < 0) { + ret = TMR_LUN_DOES_NOT_EXIST; goto release; + } /* - * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() - * to wake us up. + * Queue the TMR to TCM Core and sleep waiting for + * tcm_loop_queue_tm_rsp() to wake us up. */ transport_generic_handle_tmr(se_cmd); wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); @@ -312,8 +319,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) * The TMR LUN_RESET has completed, check the response status and * then release allocations. */ - ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? - SUCCESS : FAILED; + ret = se_cmd->se_tmr_req->response; release: if (se_cmd) transport_generic_free_cmd(se_cmd, 1); @@ -323,6 +329,94 @@ release: return ret; } +static int tcm_loop_abort_task(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_tpg *tl_tpg; + int ret = FAILED; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + /* + * Locate the tl_nexus and se_sess pointers + */ + tl_nexus = tl_hba->tl_nexus; + if (!tl_nexus) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return FAILED; + } + + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + sc->tag, TMR_ABORT_TASK); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +/* + * Called from SCSI EH process context to issue a LUN_RESET TMR + * to struct scsi_device + */ +static int tcm_loop_device_reset(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_nexus *tl_nexus; + struct tcm_loop_tpg *tl_tpg; + int ret = FAILED; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + /* + * Locate the tl_nexus and se_sess pointers + */ + tl_nexus = tl_hba->tl_nexus; + if (!tl_nexus) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return FAILED; + } + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + 0, TMR_LUN_RESET); + return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; +} + +static int tcm_loop_target_reset(struct scsi_cmnd *sc) +{ + struct tcm_loop_hba *tl_hba; + struct tcm_loop_tpg *tl_tpg; + + /* + * Locate the tcm_loop_hba_t pointer + */ + tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); + if (!tl_hba) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return FAILED; + } + /* + * Locate the tl_tpg pointer from TargetID in sc->device->id + */ + tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + if (tl_tpg) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; + return SUCCESS; + } + return FAILED; +} + static int tcm_loop_slave_alloc(struct scsi_device *sd) { set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); @@ -331,6 +425,15 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd) static int tcm_loop_slave_configure(struct scsi_device *sd) { + if (sd->tagged_supported) { + scsi_activate_tcq(sd, sd->queue_depth); + scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG, + sd->host->cmd_per_lun); + } else { + scsi_adjust_queue_depth(sd, 0, + sd->host->cmd_per_lun); + } + return 0; } @@ -340,7 +443,10 @@ static struct scsi_host_template tcm_loop_driver_template = { .name = "TCM_Loopback", .queuecommand = tcm_loop_queuecommand, .change_queue_depth = tcm_loop_change_queue_depth, + .change_queue_type = tcm_loop_change_queue_type, + .eh_abort_handler = tcm_loop_abort_task, .eh_device_reset_handler = tcm_loop_device_reset, + .eh_target_reset_handler = tcm_loop_target_reset, .can_queue = 1024, .this_id = -1, .sg_tablesize = 256, @@ -699,7 +805,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl) static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd) { - return 1; + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + + return tl_cmd->sc_cmd_tag; } static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd) @@ -932,7 +1041,10 @@ static int tcm_loop_drop_nexus( struct tcm_loop_nexus *tl_nexus; struct tcm_loop_hba *tl_hba = tpg->tl_hba; - tl_nexus = tpg->tl_hba->tl_nexus; + if (!tl_hba) + return -ENODEV; + + tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) return -ENODEV; @@ -1061,8 +1173,56 @@ check_newline: TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR); +static ssize_t tcm_loop_tpg_show_transport_status( + struct se_portal_group *se_tpg, + char *page) +{ + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + const char *status = NULL; + ssize_t ret = -EINVAL; + + switch (tl_tpg->tl_transport_status) { + case TCM_TRANSPORT_ONLINE: + status = "online"; + break; + case TCM_TRANSPORT_OFFLINE: + status = "offline"; + break; + default: + break; + } + + if (status) + ret = snprintf(page, PAGE_SIZE, "%s\n", status); + + return ret; +} + +static ssize_t tcm_loop_tpg_store_transport_status( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, + struct tcm_loop_tpg, tl_se_tpg); + + if (!strncmp(page, "online", 6)) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE; + return count; + } + if (!strncmp(page, "offline", 7)) { + tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE; + return count; + } + return -EINVAL; +} + +TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR); + static struct configfs_attribute *tcm_loop_tpg_attrs[] = { &tcm_loop_tpg_nexus.attr, + &tcm_loop_tpg_transport_status.attr, NULL, }; @@ -1334,11 +1494,11 @@ static int tcm_loop_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; /* * Once fabric->tf_ops has been setup, now register the fabric for * use within TCM diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index dd7a84ee78e1..54c59d0b6608 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -10,6 +10,8 @@ struct tcm_loop_cmd { /* State of Linux/SCSI CDB+Data descriptor */ u32 sc_cmd_state; + /* Tagged command queueing */ + u32 sc_cmd_tag; /* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */ struct scsi_cmnd *sc; /* The TCM I/O descriptor that is accessed via container_of() */ @@ -40,8 +42,12 @@ struct tcm_loop_nacl { struct se_node_acl se_node_acl; }; +#define TCM_TRANSPORT_ONLINE 0 +#define TCM_TRANSPORT_OFFLINE 1 + struct tcm_loop_tpg { unsigned short tl_tpgt; + unsigned short tl_transport_status; atomic_t tl_tpg_port_count; struct se_portal_group tl_se_tpg; struct tcm_loop_hba *tl_hba; diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index e51b09a04d52..24884cac19ce 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -2556,15 +2556,15 @@ static int sbp_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; ret = target_fabric_configfs_register(fabric); if (ret < 0) { diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 47244102281e..fdcee326bfbc 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -44,7 +44,7 @@ static sense_reason_t core_alua_check_transition(int state, int *primary); static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, - struct se_port *port, int explict, int offline); + struct se_port *port, int explicit, int offline); static u16 alua_lu_gps_counter; static u32 alua_lu_gps_count; @@ -117,12 +117,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) /* * Set supported ASYMMETRIC ACCESS State bits */ - buf[off] = 0x80; /* T_SUP */ - buf[off] |= 0x40; /* O_SUP */ - buf[off] |= 0x8; /* U_SUP */ - buf[off] |= 0x4; /* S_SUP */ - buf[off] |= 0x2; /* AN_SUP */ - buf[off++] |= 0x1; /* AO_SUP */ + buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states; /* * TARGET PORT GROUP */ @@ -175,7 +170,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) if (ext_hdr != 0) { buf[4] = 0x10; /* - * Set the implict transition time (in seconds) for the application + * Set the implicit transition time (in seconds) for the application * client to use as a base for it's transition timeout value. * * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN @@ -188,7 +183,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; if (tg_pt_gp) - buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs; + buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); } } @@ -199,7 +194,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) } /* - * SET_TARGET_PORT_GROUPS for explict ALUA operation. + * SET_TARGET_PORT_GROUPS for explicit ALUA operation. * * See spc4r17 section 6.35 */ @@ -232,7 +227,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; /* - * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed + * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed * for the local tg_pt_gp. */ l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; @@ -251,9 +246,9 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) } spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); - if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) { + if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { pr_debug("Unable to process SET_TARGET_PORT_GROUPS" - " while TPGS_EXPLICT_ALUA is disabled\n"); + " while TPGS_EXPLICIT_ALUA is disabled\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } @@ -330,7 +325,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) spin_unlock(&dev->t10_alua.tg_pt_gps_lock); } else { /* - * Extact the RELATIVE TARGET PORT IDENTIFIER to identify + * Extract the RELATIVE TARGET PORT IDENTIFIER to identify * the Target Port in question for the the incoming * SET_TARGET_PORT_GROUPS op. */ @@ -487,7 +482,7 @@ static inline int core_alua_state_transition( u8 *alua_ascq) { /* - * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by + * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by * spc4r17 section 5.9.2.5 */ switch (cdb[0]) { @@ -515,9 +510,9 @@ static inline int core_alua_state_transition( } /* - * return 1: Is used to signal LUN not accecsable, and check condition/not ready + * return 1: Is used to signal LUN not accessible, and check condition/not ready * return 0: Used to signal success - * reutrn -1: Used to signal failure, and invalid cdb field + * return -1: Used to signal failure, and invalid cdb field */ sense_reason_t target_alua_state_check(struct se_cmd *cmd) @@ -566,12 +561,12 @@ target_alua_state_check(struct se_cmd *cmd) nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); /* - * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional + * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional * statement so the compiler knows explicitly to check this case first. * For the Optimized ALUA access state case, we want to process the * incoming fabric cmd ASAP.. */ - if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED) + if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED) return 0; switch (out_alua_state) { @@ -620,13 +615,13 @@ out: } /* - * Check implict and explict ALUA state change request. + * Check implicit and explicit ALUA state change request. */ static sense_reason_t core_alua_check_transition(int state, int *primary) { switch (state) { - case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: case ALUA_ACCESS_STATE_STANDBY: case ALUA_ACCESS_STATE_UNAVAILABLE: @@ -654,7 +649,7 @@ core_alua_check_transition(int state, int *primary) static char *core_alua_dump_state(int state) { switch (state) { - case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED: + case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: return "Active/Optimized"; case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: return "Active/NonOptimized"; @@ -676,10 +671,10 @@ char *core_alua_dump_status(int status) switch (status) { case ALUA_STATUS_NONE: return "None"; - case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG: - return "Altered by Explict STPG"; - case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA: - return "Altered by Implict ALUA"; + case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: + return "Altered by Explicit STPG"; + case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: + return "Altered by Implicit ALUA"; default: return "Unknown"; } @@ -770,7 +765,7 @@ static int core_alua_do_transition_tg_pt( struct se_node_acl *nacl, unsigned char *md_buf, int new_state, - int explict) + int explicit) { struct se_dev_entry *se_deve; struct se_lun_acl *lacl; @@ -784,9 +779,9 @@ static int core_alua_do_transition_tg_pt( old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, ALUA_ACCESS_STATE_TRANSITION); - tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ? - ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : - ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; + tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; /* * Check for the optional ALUA primary state transition delay */ @@ -802,7 +797,7 @@ static int core_alua_do_transition_tg_pt( * change, a device server shall establish a unit attention * condition for the initiator port associated with every I_T * nexus with the additional sense code set to ASYMMETRIC - * ACCESS STATE CHAGED. + * ACCESS STATE CHANGED. * * After an explicit target port asymmetric access state * change, a device server shall establish a unit attention @@ -821,12 +816,12 @@ static int core_alua_do_transition_tg_pt( lacl = se_deve->se_lun_acl; /* * se_deve->se_lun_acl pointer may be NULL for a - * entry created without explict Node+MappedLUN ACLs + * entry created without explicit Node+MappedLUN ACLs */ if (!lacl) continue; - if (explict && + if (explicit && (nacl != NULL) && (nacl == lacl->se_lun_nacl) && (l_port != NULL) && (l_port == port)) continue; @@ -866,8 +861,8 @@ static int core_alua_do_transition_tg_pt( atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" - " from primary access state %s to %s\n", (explict) ? "explict" : - "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + " from primary access state %s to %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), core_alua_dump_state(new_state)); @@ -880,7 +875,7 @@ int core_alua_do_port_transition( struct se_port *l_port, struct se_node_acl *l_nacl, int new_state, - int explict) + int explicit) { struct se_device *dev; struct se_port *port; @@ -917,7 +912,7 @@ int core_alua_do_port_transition( * success. */ core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl, - md_buf, new_state, explict); + md_buf, new_state, explicit); atomic_dec(&lu_gp->lu_gp_ref_cnt); smp_mb__after_atomic_dec(); kfree(md_buf); @@ -946,7 +941,7 @@ int core_alua_do_port_transition( continue; /* * If the target behavior port asymmetric access state - * is changed for any target port group accessiable via + * is changed for any target port group accessible via * a logical unit within a LU group, the target port * behavior group asymmetric access states for the same * target port group accessible via other logical units @@ -970,7 +965,7 @@ int core_alua_do_port_transition( * success. */ core_alua_do_transition_tg_pt(tg_pt_gp, port, - nacl, md_buf, new_state, explict); + nacl, md_buf, new_state, explicit); spin_lock(&dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); @@ -987,7 +982,7 @@ int core_alua_do_port_transition( pr_debug("Successfully processed LU Group: %s all ALUA TG PT" " Group IDs: %hu %s transition to primary state: %s\n", config_item_name(&lu_gp->lu_gp_group.cg_item), - l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", + l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit", core_alua_dump_state(new_state)); atomic_dec(&lu_gp->lu_gp_ref_cnt); @@ -1034,7 +1029,7 @@ static int core_alua_update_tpg_secondary_metadata( static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, - int explict, + int explicit, int offline) { struct t10_alua_tg_pt_gp *tg_pt_gp; @@ -1061,13 +1056,13 @@ static int core_alua_set_tg_pt_secondary_state( atomic_set(&port->sep_tg_pt_secondary_offline, 0); md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len; - port->sep_tg_pt_secondary_stat = (explict) ? - ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : - ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; + port->sep_tg_pt_secondary_stat = (explicit) ? + ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : + ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" - " to secondary access state: %s\n", (explict) ? "explict" : - "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + " to secondary access state: %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1232,7 +1227,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) * struct se_device is released via core_alua_free_lu_gp_mem(). * * If the passed lu_gp does NOT match the default_lu_gp, assume - * we want to re-assocate a given lu_gp_mem with default_lu_gp. + * we want to re-associate a given lu_gp_mem with default_lu_gp. */ spin_lock(&lu_gp_mem->lu_gp_mem_lock); if (lu_gp != default_lu_gp) @@ -1354,18 +1349,25 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, tg_pt_gp->tg_pt_gp_dev = dev; tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN; atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_ACTIVE_OPTMIZED); + ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); /* - * Enable both explict and implict ALUA support by default + * Enable both explicit and implicit ALUA support by default */ tg_pt_gp->tg_pt_gp_alua_access_type = - TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA; + TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA; /* * Set the default Active/NonOptimized Delay in milliseconds */ tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; - tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS; + tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; + + /* + * Enable all supported states + */ + tg_pt_gp->tg_pt_gp_alua_supported_states = + ALUA_T_SUP | ALUA_O_SUP | + ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP; if (def_group) { spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1465,7 +1467,7 @@ void core_alua_free_tg_pt_gp( * been called from target_core_alua_drop_tg_pt_gp(). * * Here we remove *tg_pt_gp from the global list so that - * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS + * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS * can be made while we are releasing struct t10_alua_tg_pt_gp. */ spin_lock(&dev->t10_alua.tg_pt_gps_lock); @@ -1501,7 +1503,7 @@ void core_alua_free_tg_pt_gp( * core_alua_free_tg_pt_gp_mem(). * * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, - * assume we want to re-assocate a given tg_pt_gp_mem with + * assume we want to re-associate a given tg_pt_gp_mem with * default_tg_pt_gp. */ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1740,13 +1742,13 @@ ssize_t core_alua_show_access_type( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) && - (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) - return sprintf(page, "Implict and Explict\n"); - else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA) - return sprintf(page, "Implict\n"); - else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) - return sprintf(page, "Explict\n"); + if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && + (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) + return sprintf(page, "Implicit and Explicit\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) + return sprintf(page, "Implicit\n"); + else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) + return sprintf(page, "Explicit\n"); else return sprintf(page, "None\n"); } @@ -1771,11 +1773,11 @@ ssize_t core_alua_store_access_type( } if (tmp == 3) tg_pt_gp->tg_pt_gp_alua_access_type = - TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA; + TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA; else if (tmp == 2) - tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA; + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA; else if (tmp == 1) - tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA; + tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA; else tg_pt_gp->tg_pt_gp_alua_access_type = 0; @@ -1844,14 +1846,14 @@ ssize_t core_alua_store_trans_delay_msecs( return count; } -ssize_t core_alua_show_implict_trans_secs( +ssize_t core_alua_show_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs); + return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); } -ssize_t core_alua_store_implict_trans_secs( +ssize_t core_alua_store_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) @@ -1861,16 +1863,16 @@ ssize_t core_alua_store_implict_trans_secs( ret = kstrtoul(page, 0, &tmp); if (ret < 0) { - pr_err("Unable to extract implict_trans_secs\n"); + pr_err("Unable to extract implicit_trans_secs\n"); return ret; } - if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) { - pr_err("Passed implict_trans_secs: %lu, exceeds" - " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp, - ALUA_MAX_IMPLICT_TRANS_SECS); + if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { + pr_err("Passed implicit_trans_secs: %lu, exceeds" + " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, + ALUA_MAX_IMPLICIT_TRANS_SECS); return -EINVAL; } - tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp; + tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp; return count; } @@ -1970,8 +1972,8 @@ ssize_t core_alua_store_secondary_status( return ret; } if ((tmp != ALUA_STATUS_NONE) && - (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && - (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { + (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { pr_err("Illegal value for alua_tg_pt_status: %lu\n", tmp); return -EINVAL; diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h index e539c3e7f4ad..88e2e835f14a 100644 --- a/drivers/target/target_core_alua.h +++ b/drivers/target/target_core_alua.h @@ -7,15 +7,15 @@ * from spc4r17 section 6.4.2 Table 135 */ #define TPGS_NO_ALUA 0x00 -#define TPGS_IMPLICT_ALUA 0x10 -#define TPGS_EXPLICT_ALUA 0x20 +#define TPGS_IMPLICIT_ALUA 0x10 +#define TPGS_EXPLICIT_ALUA 0x20 /* * ASYMMETRIC ACCESS STATE field * * from spc4r17 section 6.27 Table 245 */ -#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0 +#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0 #define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1 #define ALUA_ACCESS_STATE_STANDBY 0x2 #define ALUA_ACCESS_STATE_UNAVAILABLE 0x3 @@ -23,13 +23,24 @@ #define ALUA_ACCESS_STATE_TRANSITION 0xf /* + * from spc4r36j section 6.37 Table 306 + */ +#define ALUA_T_SUP 0x80 +#define ALUA_O_SUP 0x40 +#define ALUA_LBD_SUP 0x10 +#define ALUA_U_SUP 0x08 +#define ALUA_S_SUP 0x04 +#define ALUA_AN_SUP 0x02 +#define ALUA_AO_SUP 0x01 + +/* * REPORT_TARGET_PORT_GROUP STATUS CODE * * from spc4r17 section 6.27 Table 246 */ #define ALUA_STATUS_NONE 0x00 -#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01 -#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02 +#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01 +#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02 /* * From spc4r17, Table D.1: ASC and ASCQ Assignement @@ -46,17 +57,17 @@ #define ALUA_DEFAULT_NONOP_DELAY_MSECS 100 #define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */ /* - * Used for implict and explict ALUA transitional delay, that is disabled + * Used for implicit and explicit ALUA transitional delay, that is disabled * by default, and is intended to be used for debugging client side ALUA code. */ #define ALUA_DEFAULT_TRANS_DELAY_MSECS 0 #define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */ /* - * Used for the recommended application client implict transition timeout + * Used for the recommended application client implicit transition timeout * in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header. */ -#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0 -#define ALUA_MAX_IMPLICT_TRANS_SECS 255 +#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0 +#define ALUA_MAX_IMPLICIT_TRANS_SECS 255 /* * Used by core_alua_update_tpg_primary_metadata() and * core_alua_update_tpg_secondary_metadata() @@ -113,9 +124,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *, char *); extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *, const char *, size_t); -extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *, +extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *, char *); -extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *, +extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *, const char *, size_t); extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *, char *); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 82e81c542e43..272755d03e5a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -177,16 +177,16 @@ static struct config_group *target_core_register_fabric( * struct target_fabric_configfs *tf will contain a usage reference. */ pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", - &TF_CIT_TMPL(tf)->tfc_wwn_cit); + &tf->tf_cit_tmpl.tfc_wwn_cit); tf->tf_group.default_groups = tf->tf_default_groups; tf->tf_group.default_groups[0] = &tf->tf_disc_group; tf->tf_group.default_groups[1] = NULL; config_group_init_type_name(&tf->tf_group, name, - &TF_CIT_TMPL(tf)->tfc_wwn_cit); + &tf->tf_cit_tmpl.tfc_wwn_cit); config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", - &TF_CIT_TMPL(tf)->tfc_discovery_cit); + &tf->tf_cit_tmpl.tfc_discovery_cit); pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" " %s\n", tf->tf_group.cg_item.ci_name); @@ -2036,7 +2036,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( int new_state, ret; if (!tg_pt_gp->tg_pt_gp_valid_id) { - pr_err("Unable to do implict ALUA on non valid" + pr_err("Unable to do implicit ALUA on non valid" " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } @@ -2049,9 +2049,9 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( } new_state = (int)tmp; - if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { - pr_err("Unable to process implict configfs ALUA" - " transition while TPGS_IMPLICT_ALUA is disabled\n"); + if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) { + pr_err("Unable to process implicit configfs ALUA" + " transition while TPGS_IMPLICIT_ALUA is disabled\n"); return -EINVAL; } @@ -2097,8 +2097,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( new_status = (int)tmp; if ((new_status != ALUA_STATUS_NONE) && - (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && - (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { + (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && + (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { pr_err("Illegal ALUA access status: 0x%02x\n", new_status); return -EINVAL; @@ -2131,6 +2131,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type( SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR); /* + * alua_supported_states + */ + +#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \ +static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \ + struct t10_alua_tg_pt_gp *t, char *p) \ +{ \ + return sprintf(p, "%d\n", !!(t->_var & _bit)); \ +} + +#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \ +static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\ + struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \ +{ \ + unsigned long tmp; \ + int ret; \ + \ + if (!t->tg_pt_gp_valid_id) { \ + pr_err("Unable to do set ##_name ALUA state on non" \ + " valid tg_pt_gp ID: %hu\n", \ + t->tg_pt_gp_valid_id); \ + return -EINVAL; \ + } \ + \ + ret = kstrtoul(p, 0, &tmp); \ + if (ret < 0) { \ + pr_err("Invalid value '%s', must be '0' or '1'\n", p); \ + return -EINVAL; \ + } \ + if (tmp > 1) { \ + pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \ + return -EINVAL; \ + } \ + if (!tmp) \ + t->_var |= _bit; \ + else \ + t->_var &= ~_bit; \ + \ + return c; \ +} + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning, + tg_pt_gp_alua_supported_states, ALUA_T_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning, + tg_pt_gp_alua_supported_states, ALUA_T_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline, + tg_pt_gp_alua_supported_states, ALUA_O_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(offline, + tg_pt_gp_alua_supported_states, ALUA_O_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent, + tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent, + tg_pt_gp_alua_supported_states, ALUA_LBD_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable, + tg_pt_gp_alua_supported_states, ALUA_U_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable, + tg_pt_gp_alua_supported_states, ALUA_U_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby, + tg_pt_gp_alua_supported_states, ALUA_S_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(standby, + tg_pt_gp_alua_supported_states, ALUA_S_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized, + tg_pt_gp_alua_supported_states, ALUA_AO_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized, + tg_pt_gp_alua_supported_states, ALUA_AO_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR); + +SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized, + tg_pt_gp_alua_supported_states, ALUA_AN_SUP); +SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized, + tg_pt_gp_alua_supported_states, ALUA_AN_SUP); +SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR); + +/* * alua_write_metadata */ static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata( @@ -2210,24 +2294,24 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs( SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR); /* - * implict_trans_secs + * implicit_trans_secs */ -static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs( +static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - return core_alua_show_implict_trans_secs(tg_pt_gp, page); + return core_alua_show_implicit_trans_secs(tg_pt_gp, page); } -static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs( +static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs( struct t10_alua_tg_pt_gp *tg_pt_gp, const char *page, size_t count) { - return core_alua_store_implict_trans_secs(tg_pt_gp, page, count); + return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count); } -SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR); +SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR); /* * preferred @@ -2350,10 +2434,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { &target_core_alua_tg_pt_gp_alua_access_state.attr, &target_core_alua_tg_pt_gp_alua_access_status.attr, &target_core_alua_tg_pt_gp_alua_access_type.attr, + &target_core_alua_tg_pt_gp_alua_support_transitioning.attr, + &target_core_alua_tg_pt_gp_alua_support_offline.attr, + &target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr, + &target_core_alua_tg_pt_gp_alua_support_unavailable.attr, + &target_core_alua_tg_pt_gp_alua_support_standby.attr, + &target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr, + &target_core_alua_tg_pt_gp_alua_support_active_optimized.attr, &target_core_alua_tg_pt_gp_alua_write_metadata.attr, &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, &target_core_alua_tg_pt_gp_trans_delay_msecs.attr, - &target_core_alua_tg_pt_gp_implict_trans_secs.attr, + &target_core_alua_tg_pt_gp_implicit_trans_secs.attr, &target_core_alua_tg_pt_gp_preferred.attr, &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, &target_core_alua_tg_pt_gp_members.attr, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index d90dbb0f1a69..207b340498a3 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -92,6 +92,9 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + + percpu_ref_get(&se_lun->lun_ref); + se_cmd->lun_ref_active = true; } spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -119,24 +122,20 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->orig_fe_lun = 0; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + + percpu_ref_get(&se_lun->lun_ref); + se_cmd->lun_ref_active = true; } /* Directly associate cmd with se_dev */ se_cmd->se_dev = se_lun->lun_se_dev; - /* TODO: get rid of this and use atomics for stats */ dev = se_lun->lun_se_dev; - spin_lock_irqsave(&dev->stats_lock, flags); - dev->num_cmds++; + atomic_long_inc(&dev->num_cmds); if (se_cmd->data_direction == DMA_TO_DEVICE) - dev->write_bytes += se_cmd->data_length; + atomic_long_add(se_cmd->data_length, &dev->write_bytes); else if (se_cmd->data_direction == DMA_FROM_DEVICE) - dev->read_bytes += se_cmd->data_length; - spin_unlock_irqrestore(&dev->stats_lock, flags); - - spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); - list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); - spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); + atomic_long_add(se_cmd->data_length, &dev->read_bytes); return 0; } @@ -314,14 +313,14 @@ int core_enable_device_list_for_node( deve = nacl->device_list[mapped_lun]; /* - * Check if the call is handling demo mode -> explict LUN ACL + * Check if the call is handling demo mode -> explicit LUN ACL * transition. This transition must be for the same struct se_lun * + mapped_lun that was setup in demo mode.. */ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { if (deve->se_lun_acl != NULL) { pr_err("struct se_dev_entry->se_lun_acl" - " already set for demo mode -> explict" + " already set for demo mode -> explicit" " LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); return -EINVAL; @@ -329,7 +328,7 @@ int core_enable_device_list_for_node( if (deve->se_lun != lun) { pr_err("struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" - " -> explict LUN ACL transition\n"); + " -> explicit LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); return -EINVAL; } @@ -1407,6 +1406,7 @@ static void scsi_dump_inquiry(struct se_device *dev) struct se_device *target_alloc_device(struct se_hba *hba, const char *name) { struct se_device *dev; + struct se_lun *xcopy_lun; dev = hba->transport->alloc_device(hba, name); if (!dev) @@ -1423,7 +1423,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&dev->state_list); INIT_LIST_HEAD(&dev->qf_cmd_list); INIT_LIST_HEAD(&dev->g_dev_node); - spin_lock_init(&dev->stats_lock); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); spin_lock_init(&dev->dev_reservation_lock); @@ -1469,6 +1468,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; + xcopy_lun = &dev->xcopy_lun; + xcopy_lun->lun_se_dev = dev; + init_completion(&xcopy_lun->lun_shutdown_comp); + INIT_LIST_HEAD(&xcopy_lun->lun_acl_list); + spin_lock_init(&xcopy_lun->lun_acl_lock); + spin_lock_init(&xcopy_lun->lun_sep_lock); + init_completion(&xcopy_lun->lun_ref_comp); + return dev; } diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 3503996d7d10..dae2ad6a669e 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -385,9 +385,9 @@ static struct config_group *target_fabric_make_mappedlun( } config_group_init_type_name(&lacl->se_lun_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit); + &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit); config_group_init_type_name(&lacl->ml_stat_grps.stat_group, - "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit); + "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit); lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; lacl_cg->default_groups[1] = NULL; @@ -504,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl( nacl_cg->default_groups[4] = NULL; config_group_init_type_name(&se_nacl->acl_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit); config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit); config_group_init_type_name(&se_nacl->acl_auth_group, "auth", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit); config_group_init_type_name(&se_nacl->acl_param_group, "param", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit); config_group_init_type_name(&se_nacl->acl_fabric_stat_group, "fabric_statistics", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit); return &se_nacl->acl_group; } @@ -595,7 +595,7 @@ static struct config_group *target_fabric_make_np( se_tpg_np->tpg_np_parent = se_tpg; config_group_init_type_name(&se_tpg_np->tpg_np_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); + &tf->tf_cit_tmpl.tfc_tpg_np_base_cit); return &se_tpg_np->tpg_np_group; } @@ -899,9 +899,9 @@ static struct config_group *target_fabric_make_lun( } config_group_init_type_name(&lun->lun_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_port_cit); + &tf->tf_cit_tmpl.tfc_tpg_port_cit); config_group_init_type_name(&lun->port_stat_grps.stat_group, - "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit); + "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit); lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; lun_cg->default_groups[1] = NULL; @@ -1056,19 +1056,19 @@ static struct config_group *target_fabric_make_tpg( se_tpg->tpg_group.default_groups[6] = NULL; config_group_init_type_name(&se_tpg->tpg_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_base_cit); + &tf->tf_cit_tmpl.tfc_tpg_base_cit); config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", - &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit); + &tf->tf_cit_tmpl.tfc_tpg_lun_cit); config_group_init_type_name(&se_tpg->tpg_np_group, "np", - &TF_CIT_TMPL(tf)->tfc_tpg_np_cit); + &tf->tf_cit_tmpl.tfc_tpg_np_cit); config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", - &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit); + &tf->tf_cit_tmpl.tfc_tpg_nacl_cit); config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", - &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit); + &tf->tf_cit_tmpl.tfc_tpg_attrib_cit); config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", - &TF_CIT_TMPL(tf)->tfc_tpg_auth_cit); + &tf->tf_cit_tmpl.tfc_tpg_auth_cit); config_group_init_type_name(&se_tpg->tpg_param_group, "param", - &TF_CIT_TMPL(tf)->tfc_tpg_param_cit); + &tf->tf_cit_tmpl.tfc_tpg_param_cit); return &se_tpg->tpg_group; } @@ -1155,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn( wwn->wwn_group.default_groups[1] = NULL; config_group_init_type_name(&wwn->wwn_group, name, - &TF_CIT_TMPL(tf)->tfc_tpg_cit); + &tf->tf_cit_tmpl.tfc_tpg_cit); config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", - &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit); + &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit); return &wwn->wwn_group; } diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index b662f89dedac..0e34cda3271e 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -562,7 +562,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, } else { ret = fd_do_rw(cmd, sgl, sgl_nents, 1); /* - * Perform implict vfs_fsync_range() for fd_do_writev() ops + * Perform implicit vfs_fsync_range() for fd_do_writev() ops * for SCSI WRITEs with Forced Unit Access (FUA) set. * Allow this to happen independent of WCE=0 setting. */ diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index b9a3394fe479..c87959f12760 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -710,6 +710,45 @@ static sector_t iblock_get_blocks(struct se_device *dev) return iblock_emulate_read_cap_with_block_size(dev, bd, q); } +static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + int ret; + + ret = bdev_alignment_offset(bd); + if (ret == -1) + return 0; + + /* convert offset-bytes to offset-lbas */ + return ret / bdev_logical_block_size(bd); +} + +static unsigned int iblock_get_lbppbe(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); + + return ilog2(logs_per_phys); +} + +static unsigned int iblock_get_io_min(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + + return bdev_io_min(bd); +} + +static unsigned int iblock_get_io_opt(struct se_device *dev) +{ + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bd = ib_dev->ibd_bd; + + return bdev_io_opt(bd); +} + static struct sbc_ops iblock_sbc_ops = { .execute_rw = iblock_execute_rw, .execute_sync_cache = iblock_execute_sync_cache, @@ -749,6 +788,10 @@ static struct se_subsystem_api iblock_template = { .show_configfs_dev_params = iblock_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = iblock_get_blocks, + .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, + .get_lbppbe = iblock_get_lbppbe, + .get_io_min = iblock_get_io_min, + .get_io_opt = iblock_get_io_opt, .get_write_cache = iblock_get_write_cache, }; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 579128abe3f5..47b63b094cdc 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -75,8 +75,6 @@ extern struct se_device *g_lun0_dev; struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, const char *); -struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, - unsigned char *); void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *); void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *); struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32); @@ -102,7 +100,7 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); -int transport_clear_lun_from_sessions(struct se_lun *); +int transport_clear_lun_ref(struct se_lun *); void transport_send_task_abort(struct se_cmd *); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); void target_qf_do_work(struct work_struct *work); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index d1ae4c5c3ffd..2f5d77932c80 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -474,7 +474,7 @@ static int core_scsi3_pr_seq_non_holder( * statement. */ if (!ret && !other_cdb) { - pr_debug("Allowing explict CDB: 0x%02x for %s" + pr_debug("Allowing explicit CDB: 0x%02x for %s" " reservation holder\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder( */ if (!registered_nexus) { - pr_debug("Allowing implict CDB: 0x%02x" + pr_debug("Allowing implicit CDB: 0x%02x" " for %s reservation on unregistered" " nexus\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -522,7 +522,7 @@ static int core_scsi3_pr_seq_non_holder( * allow commands from registered nexuses. */ - pr_debug("Allowing implict CDB: 0x%02x for %s" + pr_debug("Allowing implicit CDB: 0x%02x for %s" " reservation\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -683,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( alua_port_list) { /* * This pointer will be NULL for demo mode MappedLUNs - * that have not been make explict via a ConfigFS + * that have not been make explicit via a ConfigFS * MappedLUN group for the SCSI Initiator Node ACL. */ if (!deve_tmp->se_lun_acl) @@ -1158,7 +1158,7 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) smp_mb__after_atomic_dec(); } -static int core_scsi3_check_implict_release( +static int core_scsi3_check_implicit_release( struct se_device *dev, struct t10_pr_registration *pr_reg) { @@ -1174,7 +1174,7 @@ static int core_scsi3_check_implict_release( } if (pr_res_holder == pr_reg) { /* - * Perform an implict RELEASE if the registration that + * Perform an implicit RELEASE if the registration that * is being released is holding the reservation. * * From spc4r17, section 5.7.11.1: @@ -1192,7 +1192,7 @@ static int core_scsi3_check_implict_release( * For 'All Registrants' reservation types, all existing * registrations are still processed as reservation holders * in core_scsi3_pr_seq_non_holder() after the initial - * reservation holder is implictly released here. + * reservation holder is implicitly released here. */ } else if (pr_reg->pr_reg_all_tg_pt && (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, @@ -2125,7 +2125,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key, /* * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. */ - pr_holder = core_scsi3_check_implict_release( + pr_holder = core_scsi3_check_implicit_release( cmd->se_dev, pr_reg); if (pr_holder < 0) { ret = TCM_RESERVATION_CONFLICT; @@ -2402,7 +2402,7 @@ static void __core_scsi3_complete_pro_release( struct se_device *dev, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, - int explict) + int explicit) { struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; char i_buf[PR_REG_ISID_ID_LEN]; @@ -2416,7 +2416,7 @@ static void __core_scsi3_complete_pro_release( pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" " reservation holder TYPE: %s ALL_TG_PT: %d\n", - tfo->get_fabric_name(), (explict) ? "explict" : "implict", + tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit", core_scsi3_pr_dump_type(pr_reg->pr_res_type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", @@ -2692,7 +2692,7 @@ static void __core_scsi3_complete_pro_preempt( memset(i_buf, 0, PR_REG_ISID_ID_LEN); core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); /* - * Do an implict RELEASE of the existing reservation. + * Do an implicit RELEASE of the existing reservation. */ if (dev->dev_pr_res_holder) __core_scsi3_complete_pro_release(dev, nacl, @@ -2845,7 +2845,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, * 5.7.11.4 Preempting, Table 52 and Figure 7. * * For a ZERO SA Reservation key, release - * all other registrations and do an implict + * all other registrations and do an implicit * release of active persistent reservation. * * For a non-ZERO SA Reservation key, only diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 131327ac7f5b..4ffe5f2ec0e9 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -27,7 +27,6 @@ #include <linux/string.h> #include <linux/parser.h> #include <linux/timer.h> -#include <linux/blkdev.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <scsi/scsi.h> diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index d9b92b2c524d..52ae54e60105 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -105,12 +105,22 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; buf[11] = dev->dev_attrib.block_size & 0xff; + + if (dev->transport->get_lbppbe) + buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; + + if (dev->transport->get_alignment_offset_lbas) { + u16 lalba = dev->transport->get_alignment_offset_lbas(dev); + buf[14] = (lalba >> 8) & 0x3f; + buf[15] = lalba & 0xff; + } + /* * Set Thin Provisioning Enable bit following sbc3r22 in section * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. */ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) - buf[14] = 0x80; + buf[14] |= 0x80; rbuf = transport_kmap_data_sg(cmd); if (rbuf) { diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 074539558a54..021c3f4a4f00 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -48,7 +48,7 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) buf[5] = 0x80; /* - * Set TPGS field for explict and/or implict ALUA access type + * Set TPGS field for explicit and/or implicit ALUA access type * and opteration. * * See spc4r17 section 6.4.2 Table 135 @@ -452,6 +452,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) struct se_device *dev = cmd->se_dev; u32 max_sectors; int have_tp = 0; + int opt, min; /* * Following spc3r22 section 6.5.3 Block Limits VPD page, when @@ -475,7 +476,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set OPTIMAL TRANSFER LENGTH GRANULARITY */ - put_unaligned_be16(1, &buf[6]); + if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) + put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); + else + put_unaligned_be16(1, &buf[6]); /* * Set MAXIMUM TRANSFER LENGTH @@ -487,7 +491,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set OPTIMAL TRANSFER LENGTH */ - put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); + if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) + put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); + else + put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); /* * Exit now if we don't support TP. @@ -1250,7 +1257,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) *size = (cdb[3] << 8) + cdb[4]; /* - * Do implict HEAD_OF_QUEUE processing for INQUIRY. + * Do implicit HEAD_OF_QUEUE processing for INQUIRY. * See spc4r17 section 5.3 */ cmd->sam_task_attr = MSG_HEAD_TAG; @@ -1284,7 +1291,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) cmd->execute_cmd = spc_emulate_report_luns; *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; /* - * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS + * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS * See spc4r17 section 5.3 */ cmd->sam_task_attr = MSG_HEAD_TAG; diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 9c642e02cba1..03538994d2f7 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -32,7 +32,6 @@ #include <linux/utsname.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/blkdev.h> #include <linux/configfs.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -214,7 +213,8 @@ static ssize_t target_stat_scsi_tgt_dev_show_attr_resets( struct se_device *dev = container_of(sgrps, struct se_device, dev_stat_grps); - return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->num_resets)); } DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets); @@ -397,8 +397,8 @@ static ssize_t target_stat_scsi_lu_show_attr_num_cmds( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuNumCommands */ - return snprintf(page, PAGE_SIZE, "%llu\n", - (unsigned long long)dev->num_cmds); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->num_cmds)); } DEV_STAT_SCSI_LU_ATTR_RO(num_cmds); @@ -409,7 +409,8 @@ static ssize_t target_stat_scsi_lu_show_attr_read_mbytes( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuReadMegaBytes */ - return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20)); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->read_bytes) >> 20); } DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes); @@ -420,7 +421,8 @@ static ssize_t target_stat_scsi_lu_show_attr_write_mbytes( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuWrittenMegaBytes */ - return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20)); + return snprintf(page, PAGE_SIZE, "%lu\n", + atomic_long_read(&dev->write_bytes) >> 20); } DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes); @@ -431,7 +433,7 @@ static ssize_t target_stat_scsi_lu_show_attr_resets( container_of(sgrps, struct se_device, dev_stat_grps); /* scsiLuInResets */ - return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets); + return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets)); } DEV_STAT_SCSI_LU_ATTR_RO(resets); diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 250009909d49..70c638f730af 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -386,9 +386,7 @@ int core_tmr_lun_reset( pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); } - spin_lock_irq(&dev->stats_lock); - dev->num_resets++; - spin_unlock_irq(&dev->stats_lock); + atomic_long_inc(&dev->num_resets); pr_debug("LUN_RESET: %s for [%s] Complete\n", (preempt_and_abort_list) ? "Preempt" : "TMR", diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index b9a6ec0aa5fe..f697f8baec54 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -116,6 +116,7 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( return acl; } +EXPORT_SYMBOL(core_tpg_get_initiator_node_acl); /* core_tpg_add_node_to_devs(): * @@ -633,6 +634,13 @@ int core_tpg_set_initiator_node_tag( } EXPORT_SYMBOL(core_tpg_set_initiator_node_tag); +static void core_tpg_lun_ref_release(struct percpu_ref *ref) +{ + struct se_lun *lun = container_of(ref, struct se_lun, lun_ref); + + complete(&lun->lun_ref_comp); +} + static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) { /* Set in core_dev_setup_virtual_lun0() */ @@ -646,15 +654,20 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_shutdown_comp); INIT_LIST_HEAD(&lun->lun_acl_list); - INIT_LIST_HEAD(&lun->lun_cmd_list); spin_lock_init(&lun->lun_acl_lock); - spin_lock_init(&lun->lun_cmd_lock); spin_lock_init(&lun->lun_sep_lock); + init_completion(&lun->lun_ref_comp); - ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); + ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); if (ret < 0) return ret; + ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); + if (ret < 0) { + percpu_ref_cancel_init(&lun->lun_ref); + return ret; + } + return 0; } @@ -691,10 +704,9 @@ int core_tpg_register( atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_shutdown_comp); INIT_LIST_HEAD(&lun->lun_acl_list); - INIT_LIST_HEAD(&lun->lun_cmd_list); spin_lock_init(&lun->lun_acl_lock); - spin_lock_init(&lun->lun_cmd_lock); spin_lock_init(&lun->lun_sep_lock); + init_completion(&lun->lun_ref_comp); } se_tpg->se_tpg_type = se_tpg_type; @@ -815,10 +827,16 @@ int core_tpg_post_addlun( { int ret; - ret = core_dev_export(lun_ptr, tpg, lun); + ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release); if (ret < 0) return ret; + ret = core_dev_export(lun_ptr, tpg, lun); + if (ret < 0) { + percpu_ref_cancel_init(&lun->lun_ref); + return ret; + } + spin_lock(&tpg->tpg_lun_lock); lun->lun_access = lun_access; lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE; @@ -827,14 +845,6 @@ int core_tpg_post_addlun( return 0; } -static void core_tpg_shutdown_lun( - struct se_portal_group *tpg, - struct se_lun *lun) -{ - core_clear_lun_from_tpg(lun, tpg); - transport_clear_lun_from_sessions(lun); -} - struct se_lun *core_tpg_pre_dellun( struct se_portal_group *tpg, u32 unpacked_lun) @@ -869,7 +879,8 @@ int core_tpg_post_dellun( struct se_portal_group *tpg, struct se_lun *lun) { - core_tpg_shutdown_lun(tpg, lun); + core_clear_lun_from_tpg(lun, tpg); + transport_clear_lun_ref(lun); core_dev_unexport(lun->lun_se_dev, tpg, lun); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 81e945eefbbd..91953da0f623 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -28,7 +28,6 @@ #include <linux/string.h> #include <linux/timer.h> #include <linux/slab.h> -#include <linux/blkdev.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/in.h> @@ -473,7 +472,7 @@ void transport_deregister_session(struct se_session *se_sess) pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", se_tpg->se_tpg_tfo->get_fabric_name()); /* - * If last kref is dropping now for an explict NodeACL, awake sleeping + * If last kref is dropping now for an explicit NodeACL, awake sleeping * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * removal context. */ @@ -515,23 +514,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, if (write_pending) cmd->t_state = TRANSPORT_WRITE_PENDING; - /* - * Determine if IOCTL context caller in requesting the stopping of this - * command for LUN shutdown purposes. - */ - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", - __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) - target_remove_from_state_list(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - complete(&cmd->transport_lun_stop_comp); - return 1; - } - if (remove_from_lists) { target_remove_from_state_list(cmd); @@ -585,15 +567,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) static void transport_lun_remove_cmd(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; - unsigned long flags; - if (!lun) + if (!lun || !cmd->lun_ref_active) return; - spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (!list_empty(&cmd->se_lun_node)) - list_del_init(&cmd->se_lun_node); - spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); + percpu_ref_put(&lun->lun_ref); } void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) @@ -668,7 +646,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) cmd->transport_state |= CMD_T_FAILED; /* - * Check for case where an explict ABORT_TASK has been received + * Check for case where an explicit ABORT_TASK has been received * and transport_wait_for_tasks() will be waiting for completion.. */ if (cmd->transport_state & CMD_T_ABORTED && @@ -1092,13 +1070,10 @@ void transport_init_se_cmd( int task_attr, unsigned char *sense_buffer) { - INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_cmd_list); INIT_LIST_HEAD(&cmd->state_list); - init_completion(&cmd->transport_lun_fe_stop_comp); - init_completion(&cmd->transport_lun_stop_comp); init_completion(&cmd->t_transport_stop_comp); init_completion(&cmd->cmd_wait_comp); init_completion(&cmd->task_stop_comp); @@ -1719,29 +1694,14 @@ void target_execute_cmd(struct se_cmd *cmd) /* * If the received CDB has aleady been aborted stop processing it here. */ - if (transport_check_aborted_status(cmd, 1)) { - complete(&cmd->transport_lun_stop_comp); + if (transport_check_aborted_status(cmd, 1)) return; - } /* - * Determine if IOCTL context caller in requesting the stopping of this - * command for LUN shutdown purposes. - */ - spin_lock_irq(&cmd->t_state_lock); - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", - __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_ACTIVE; - spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->transport_lun_stop_comp); - return; - } - /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. */ + spin_lock_irq(&cmd->t_state_lock); if (cmd->transport_state & CMD_T_STOP) { pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", __func__, __LINE__, @@ -2404,164 +2364,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) } EXPORT_SYMBOL(target_wait_for_sess_cmds); -/* transport_lun_wait_for_tasks(): - * - * Called from ConfigFS context to stop the passed struct se_cmd to allow - * an struct se_lun to be successfully shutdown. - */ -static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) -{ - unsigned long flags; - int ret = 0; - - /* - * If the frontend has already requested this struct se_cmd to - * be stopped, we can safely ignore this struct se_cmd. - */ - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->transport_state & CMD_T_STOP) { - cmd->transport_state &= ~CMD_T_LUN_STOP; - - pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", - cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_cmd_check_stop(cmd, false, false); - return -EPERM; - } - cmd->transport_state |= CMD_T_LUN_FE_STOP; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - // XXX: audit task_flags checks. - spin_lock_irqsave(&cmd->t_state_lock, flags); - if ((cmd->transport_state & CMD_T_BUSY) && - (cmd->transport_state & CMD_T_SENT)) { - if (!target_stop_cmd(cmd, &flags)) - ret++; - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - pr_debug("ConfigFS: cmd: %p stop tasks ret:" - " %d\n", cmd, ret); - if (!ret) { - pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", - cmd->se_tfo->get_task_tag(cmd)); - wait_for_completion(&cmd->transport_lun_stop_comp); - pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", - cmd->se_tfo->get_task_tag(cmd)); - } - - return 0; -} - -static void __transport_clear_lun_from_sessions(struct se_lun *lun) -{ - struct se_cmd *cmd = NULL; - unsigned long lun_flags, cmd_flags; - /* - * Do exception processing and return CHECK_CONDITION status to the - * Initiator Port. - */ - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - while (!list_empty(&lun->lun_cmd_list)) { - cmd = list_first_entry(&lun->lun_cmd_list, - struct se_cmd, se_lun_node); - list_del_init(&cmd->se_lun_node); - - spin_lock(&cmd->t_state_lock); - pr_debug("SE_LUN[%d] - Setting cmd->transport" - "_lun_stop for ITT: 0x%08x\n", - cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - cmd->transport_state |= CMD_T_LUN_STOP; - spin_unlock(&cmd->t_state_lock); - - spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - - if (!cmd->se_lun) { - pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); - BUG(); - } - /* - * If the Storage engine still owns the iscsi_cmd_t, determine - * and/or stop its context. - */ - pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" - "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - - if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - continue; - } - - pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" - "_wait_for_tasks(): SUCCESS\n", - cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - - spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); - if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - goto check_cond; - } - cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - target_remove_from_state_list(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - - /* - * The Storage engine stopped this struct se_cmd before it was - * send to the fabric frontend for delivery back to the - * Initiator Node. Return this SCSI CDB back with an - * CHECK_CONDITION status. - */ -check_cond: - transport_send_check_condition_and_sense(cmd, - TCM_NON_EXISTENT_LUN, 0); - /* - * If the fabric frontend is waiting for this iscsi_cmd_t to - * be released, notify the waiting thread now that LU has - * finished accessing it. - */ - spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); - if (cmd->transport_state & CMD_T_LUN_FE_STOP) { - pr_debug("SE_LUN[%d] - Detected FE stop for" - " struct se_cmd: %p ITT: 0x%08x\n", - lun->unpacked_lun, - cmd, cmd->se_tfo->get_task_tag(cmd)); - - spin_unlock_irqrestore(&cmd->t_state_lock, - cmd_flags); - transport_cmd_check_stop(cmd, false, false); - complete(&cmd->transport_lun_fe_stop_comp); - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - continue; - } - pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", - lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - } - spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); -} - -static int transport_clear_lun_thread(void *p) +static int transport_clear_lun_ref_thread(void *p) { struct se_lun *lun = p; - __transport_clear_lun_from_sessions(lun); + percpu_ref_kill(&lun->lun_ref); + + wait_for_completion(&lun->lun_ref_comp); complete(&lun->lun_shutdown_comp); return 0; } -int transport_clear_lun_from_sessions(struct se_lun *lun) +int transport_clear_lun_ref(struct se_lun *lun) { struct task_struct *kt; - kt = kthread_run(transport_clear_lun_thread, lun, + kt = kthread_run(transport_clear_lun_ref_thread, lun, "tcm_cl_%u", lun->unpacked_lun); if (IS_ERR(kt)) { pr_err("Unable to start clear_lun thread\n"); @@ -2595,43 +2414,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); return false; } - /* - * If we are already stopped due to an external event (ie: LUN shutdown) - * sleep until the connection can have the passed struct se_cmd back. - * The cmd->transport_lun_stopped_sem will be upped by - * transport_clear_lun_from_sessions() once the ConfigFS context caller - * has completed its operation on the struct se_cmd. - */ - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("wait_for_tasks: Stopping" - " wait_for_completion(&cmd->t_tasktransport_lun_fe" - "_stop_comp); for ITT: 0x%08x\n", - cmd->se_tfo->get_task_tag(cmd)); - /* - * There is a special case for WRITES where a FE exception + - * LUN shutdown means ConfigFS context is still sleeping on - * transport_lun_stop_comp in transport_lun_wait_for_tasks(). - * We go ahead and up transport_lun_stop_comp just to be sure - * here. - */ - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->transport_lun_stop_comp); - wait_for_completion(&cmd->transport_lun_fe_stop_comp); - spin_lock_irqsave(&cmd->t_state_lock, flags); - - target_remove_from_state_list(cmd); - /* - * At this point, the frontend who was the originator of this - * struct se_cmd, now owns the structure and can be released through - * normal means below. - */ - pr_debug("wait_for_tasks: Stopped" - " wait_for_completion(&cmd->t_tasktransport_lun_fe_" - "stop_comp); for ITT: 0x%08x\n", - cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_LUN_STOP; - } if (!(cmd->transport_state & CMD_T_ACTIVE)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2910,6 +2692,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; + cmd->scsi_status = SAM_STAT_TASK_ABORTED; trace_target_cmd_complete(cmd); cmd->se_tfo->queue_status(cmd); @@ -2938,6 +2721,7 @@ void transport_send_task_abort(struct se_cmd *cmd) if (cmd->se_tfo->write_pending_status(cmd) != 0) { cmd->transport_state |= CMD_T_ABORTED; smp_mb__after_atomic_inc(); + return; } } cmd->scsi_status = SAM_STAT_TASK_ABORTED; diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h index 0204952fe4d3..be912b36daae 100644 --- a/drivers/target/target_core_ua.h +++ b/drivers/target/target_core_ua.h @@ -19,7 +19,7 @@ #define ASCQ_2AH_RESERVATIONS_RELEASED 0x04 #define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05 #define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06 -#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 +#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07 #define ASCQ_2AH_PRIORITY_CHANGED 0x08 #define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09 diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 474cd44fac14..6b88a9958f61 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -405,9 +405,6 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, struct xcopy_pt_cmd, se_cmd); - if (xpt_cmd->remote_port) - kfree(se_cmd->se_lun); - kfree(xpt_cmd); } @@ -572,22 +569,10 @@ static int target_xcopy_init_pt_lun( return 0; } - pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL); - if (!pt_cmd->se_lun) { - pr_err("Unable to allocate pt_cmd->se_lun\n"); - return -ENOMEM; - } - init_completion(&pt_cmd->se_lun->lun_shutdown_comp); - INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list); - INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list); - spin_lock_init(&pt_cmd->se_lun->lun_acl_lock); - spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock); - spin_lock_init(&pt_cmd->se_lun->lun_sep_lock); - + pt_cmd->se_lun = &se_dev->xcopy_lun; pt_cmd->se_dev = se_dev; pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); - pt_cmd->se_lun->lun_se_dev = se_dev; pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", @@ -658,8 +643,6 @@ static int target_xcopy_setup_pt_cmd( return 0; out: - if (remote_port == true) - kfree(cmd->se_lun); return ret; } diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 0dd54a44abcf..752863acecb8 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -22,6 +22,7 @@ #define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */ #define FT_TPG_NAMELEN 32 /* max length of TPG name */ #define FT_LUN_NAMELEN 32 /* max length of LUN name */ +#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */ struct ft_transport_id { __u8 format; diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 0e5a1caed176..479ec5621a4e 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -28,6 +28,7 @@ #include <linux/configfs.h> #include <linux/ctype.h> #include <linux/hash.h> +#include <linux/percpu_ida.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -89,16 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; struct fc_lport *lport; + struct se_session *se_sess; if (!cmd) return; + se_sess = cmd->sess->se_sess; fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) lport->tt.seq_release(fr_seq(fp)); fc_frame_free(fp); + percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); ft_sess_put(cmd->sess); /* undo get from lookup at recv */ - kfree(cmd); } void ft_release_cmd(struct se_cmd *se_cmd) @@ -432,14 +435,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) { struct ft_cmd *cmd; struct fc_lport *lport = sess->tport->lport; + struct se_session *se_sess = sess->se_sess; + int tag; - cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); - if (!cmd) + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); + if (tag < 0) goto busy; + + cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag]; + memset(cmd, 0, sizeof(struct ft_cmd)); + + cmd->se_cmd.map_tag = tag; cmd->sess = sess; cmd->seq = lport->tt.seq_assign(lport, fp); if (!cmd->seq) { - kfree(cmd); + percpu_ida_free(&se_sess->sess_tag_pool, tag); goto busy; } cmd->req_frame = fp; /* hold frame during cmd */ diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 4e0050840a72..c6932fb53a8d 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -571,16 +571,16 @@ int ft_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = ft_nacl_base_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * register the fabric for use within TCM */ diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 4859505ae2ed..ae52c08dad09 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -210,7 +210,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, if (!sess) return NULL; - sess->se_sess = transport_init_session(); + sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS, + sizeof(struct ft_cmd)); if (IS_ERR(sess->se_sess)) { kfree(sess); return NULL; diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c index eccea1df702d..6c3d7950d2a9 100644 --- a/drivers/usb/gadget/tcm_usb_gadget.c +++ b/drivers/usb/gadget/tcm_usb_gadget.c @@ -1923,15 +1923,15 @@ static int usbg_register_configfs(void) } fabric->tf_ops = usbg_ops; - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; ret = target_fabric_configfs_register(fabric); if (ret < 0) { printk(KERN_ERR "target_fabric_configfs_register() failed" diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index e663921eebb6..f175629513ed 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -2168,15 +2168,15 @@ static int tcm_vhost_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the fabric for use within TCM */ |