diff options
Diffstat (limited to 'drivers')
682 files changed, 40260 insertions, 13581 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ab2cbb51c6aa..35da507411a0 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -54,6 +54,9 @@ config ACPI_GENERIC_GSI config ACPI_SYSTEM_POWER_STATES_SUPPORT bool +config ACPI_CCA_REQUIRED + bool + config ACPI_SLEEP bool depends on SUSPEND || HIBERNATION @@ -62,7 +65,7 @@ config ACPI_SLEEP config ACPI_PROCFS_POWER bool "Deprecated power /proc/acpi directories" - depends on PROC_FS + depends on X86 && PROC_FS help For backwards compatibility, this option allows deprecated power /proc/acpi/ directories to exist, even when diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 8a063e276530..73d840bef455 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -52,9 +52,6 @@ acpi-$(CONFIG_X86) += acpi_cmos_rtc.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-$(CONFIG_ACPI_NUMA) += numa.o acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o -ifdef CONFIG_ACPI_VIDEO -acpi-y += video_detect.o -endif acpi-y += acpi_lpat.o acpi-$(CONFIG_ACPI_GENERIC_GSI) += gsi.o @@ -95,3 +92,5 @@ obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o + +video-objs += acpi_video.o video_detect.o diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index bbcc2b5a70d4..9b5354a2cd08 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -308,7 +308,7 @@ static int thinkpad_e530_quirk(const struct dmi_system_id *d) return 0; } -static struct dmi_system_id ac_dmi_table[] = { +static const struct dmi_system_id ac_dmi_table[] = { { .callback = thinkpad_e530_quirk, .ident = "thinkpad e530", diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 37fb19047603..569ee090343f 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -129,50 +129,50 @@ static void byt_i2c_setup(struct lpss_private_data *pdata) writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); } -static struct lpss_device_desc lpt_dev_desc = { +static const struct lpss_device_desc lpt_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, .prv_offset = 0x800, }; -static struct lpss_device_desc lpt_i2c_dev_desc = { +static const struct lpss_device_desc lpt_i2c_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, .prv_offset = 0x800, }; -static struct lpss_device_desc lpt_uart_dev_desc = { +static const struct lpss_device_desc lpt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; -static struct lpss_device_desc lpt_sdio_dev_desc = { +static const struct lpss_device_desc lpt_sdio_dev_desc = { .flags = LPSS_LTR, .prv_offset = 0x1000, .prv_size_override = 0x1018, }; -static struct lpss_device_desc byt_pwm_dev_desc = { +static const struct lpss_device_desc byt_pwm_dev_desc = { .flags = LPSS_SAVE_CTX, }; -static struct lpss_device_desc byt_uart_dev_desc = { +static const struct lpss_device_desc byt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; -static struct lpss_device_desc byt_spi_dev_desc = { +static const struct lpss_device_desc byt_spi_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, .prv_offset = 0x400, }; -static struct lpss_device_desc byt_sdio_dev_desc = { +static const struct lpss_device_desc byt_sdio_dev_desc = { .flags = LPSS_CLK, }; -static struct lpss_device_desc byt_i2c_dev_desc = { +static const struct lpss_device_desc byt_i2c_dev_desc = { .flags = LPSS_CLK | LPSS_SAVE_CTX, .prv_offset = 0x800, .setup = byt_i2c_setup, @@ -323,14 +323,14 @@ out: static int acpi_lpss_create_device(struct acpi_device *adev, const struct acpi_device_id *id) { - struct lpss_device_desc *dev_desc; + const struct lpss_device_desc *dev_desc; struct lpss_private_data *pdata; struct resource_entry *rentry; struct list_head resource_list; struct platform_device *pdev; int ret; - dev_desc = (struct lpss_device_desc *)id->driver_data; + dev_desc = (const struct lpss_device_desc *)id->driver_data; if (!dev_desc) { pdev = acpi_create_platform_device(adev); return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1; diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 6bc9cbc01ad6..00b39802d7ec 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -105,7 +105,7 @@ static void round_robin_cpu(unsigned int tsk_index) mutex_lock(&round_robin_lock); cpumask_clear(tmp); for_each_cpu(cpu, pad_busy_cpus) - cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); + cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu)); cpumask_andnot(tmp, cpu_online_mask, tmp); /* avoid HT sibilings if possible */ if (cpumask_empty(tmp)) diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index 4bf75597f732..06a67d5f2846 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -103,7 +103,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) pdevinfo.res = resources; pdevinfo.num_res = count; pdevinfo.fwnode = acpi_fwnode_handle(adev); - pdevinfo.dma_mask = DMA_BIT_MASK(32); + pdevinfo.dma_mask = acpi_check_dma(adev, NULL) ? DMA_BIT_MASK(32) : 0; pdev = platform_device_register_full(&pdevinfo); if (IS_ERR(pdev)) dev_err(&adev->dev, "platform device creation failed: %ld\n", diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 58f335ca2e75..92a5f738e370 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) acpi_status status; int ret; - if (pr->phys_id == PHYS_CPUID_INVALID) + if (invalid_phys_cpuid(pr->phys_id)) return -ENODEV; status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); @@ -215,8 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device) union acpi_object object = { 0 }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; struct acpi_processor *pr = acpi_driver_data(device); - phys_cpuid_t phys_id; - int cpu_index, device_declaration = 0; + int device_declaration = 0; acpi_status status = AE_OK; static int cpu0_initialized; unsigned long long value; @@ -263,29 +262,28 @@ static int acpi_processor_get_info(struct acpi_device *device) pr->acpi_id = value; } - phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); - if (phys_id == PHYS_CPUID_INVALID) + pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, + pr->acpi_id); + if (invalid_phys_cpuid(pr->phys_id)) acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); - pr->phys_id = phys_id; - cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id); + pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id); if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { cpu0_initialized = 1; /* * Handle UP system running SMP kernel, with no CPU * entry in MADT */ - if ((cpu_index == -1) && (num_online_cpus() == 1)) - cpu_index = 0; + if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1)) + pr->id = 0; } - pr->id = cpu_index; /* * Extra Processor objects may be enumerated on MP systems with * less than the max # of CPUs. They should be ignored _iff * they are physically not present. */ - if (pr->id == -1) { + if (invalid_logical_cpuid(pr->id)) { int ret = acpi_processor_hotadd_init(pr); if (ret) return ret; diff --git a/drivers/acpi/video.c b/drivers/acpi/acpi_video.c index cc79d3fedfb2..8c2fe2f2f9fd 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/acpi_video.c @@ -43,7 +43,7 @@ #include <acpi/video.h> #include <asm/uaccess.h> -#include "internal.h" +#define PREFIX "ACPI: " #define ACPI_VIDEO_BUS_NAME "Video Bus" #define ACPI_VIDEO_DEVICE_NAME "Video Device" @@ -78,26 +78,17 @@ module_param(brightness_switch_enabled, bool, 0644); static bool allow_duplicates; module_param(allow_duplicates, bool, 0644); -/* - * For Windows 8 systems: used to decide if video module - * should skip registering backlight interface of its own. - */ -enum { - NATIVE_BACKLIGHT_NOT_SET = -1, - NATIVE_BACKLIGHT_OFF, - NATIVE_BACKLIGHT_ON, -}; - -static int use_native_backlight_param = NATIVE_BACKLIGHT_NOT_SET; -module_param_named(use_native_backlight, use_native_backlight_param, int, 0444); -static int use_native_backlight_dmi = NATIVE_BACKLIGHT_NOT_SET; +static int disable_backlight_sysfs_if = -1; +module_param(disable_backlight_sysfs_if, int, 0444); static int register_count; +static DEFINE_MUTEX(register_count_mutex); static struct mutex video_list_lock; static struct list_head video_bus_head; static int acpi_video_bus_add(struct acpi_device *device); static int acpi_video_bus_remove(struct acpi_device *device); static void acpi_video_bus_notify(struct acpi_device *device, u32 event); +void acpi_video_detect_exit(void); static const struct acpi_device_id video_device_ids[] = { {ACPI_VIDEO_HID, 0}, @@ -157,7 +148,6 @@ struct acpi_video_enumerated_device { struct acpi_video_bus { struct acpi_device *device; bool backlight_registered; - bool backlight_notifier_registered; u8 dos_setting; struct acpi_video_enumerated_device *attached_array; u8 attached_count; @@ -170,7 +160,6 @@ struct acpi_video_bus { struct input_dev *input; char phys[32]; /* for input device */ struct notifier_block pm_nb; - struct notifier_block backlight_nb; }; struct acpi_video_device_flags { @@ -241,24 +230,6 @@ static int acpi_video_get_next_level(struct acpi_video_device *device, u32 level_current, u32 event); static void acpi_video_switch_brightness(struct work_struct *work); -static bool acpi_video_use_native_backlight(void) -{ - if (use_native_backlight_param != NATIVE_BACKLIGHT_NOT_SET) - return use_native_backlight_param; - else if (use_native_backlight_dmi != NATIVE_BACKLIGHT_NOT_SET) - return use_native_backlight_dmi; - return acpi_osi_is_win8(); -} - -bool acpi_video_verify_backlight_support(void) -{ - if (acpi_video_use_native_backlight() && - backlight_device_registered(BACKLIGHT_RAW)) - return false; - return acpi_video_backlight_support(); -} -EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support); - /* backlight device sysfs support */ static int acpi_video_get_brightness(struct backlight_device *bd) { @@ -413,25 +384,21 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level) */ static int bqc_offset_aml_bug_workaround; -static int __init video_set_bqc_offset(const struct dmi_system_id *d) +static int video_set_bqc_offset(const struct dmi_system_id *d) { bqc_offset_aml_bug_workaround = 9; return 0; } -static int __init video_disable_native_backlight(const struct dmi_system_id *d) -{ - use_native_backlight_dmi = NATIVE_BACKLIGHT_OFF; - return 0; -} - -static int __init video_enable_native_backlight(const struct dmi_system_id *d) +static int video_disable_backlight_sysfs_if( + const struct dmi_system_id *d) { - use_native_backlight_dmi = NATIVE_BACKLIGHT_ON; + if (disable_backlight_sysfs_if == -1) + disable_backlight_sysfs_if = 1; return 0; } -static struct dmi_system_id video_dmi_table[] __initdata = { +static struct dmi_system_id video_dmi_table[] = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 */ @@ -477,110 +444,19 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, /* - * These models have a working acpi_video backlight control, and using - * native backlight causes a regression where backlight does not work - * when userspace is not handling brightness key events. Disable - * native_backlight on these to fix this: - * https://bugzilla.kernel.org/show_bug.cgi?id=81691 + * Some machines have a broken acpi-video interface for brightness + * control, but still need an acpi_video_device_lcd_set_level() call + * on resume to turn the backlight power on. We Enable backlight + * control on these systems, but do not register a backlight sysfs + * as brightness control does not work. */ { - .callback = video_disable_native_backlight, - .ident = "ThinkPad T420", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"), - }, - }, - { - .callback = video_disable_native_backlight, - .ident = "ThinkPad T520", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), - }, - }, - { - .callback = video_disable_native_backlight, - .ident = "ThinkPad X201s", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), - }, - }, - - /* The native backlight controls do not work on some older machines */ - { - /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */ - .callback = video_disable_native_backlight, - .ident = "HP ENVY 15 Notebook", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), - }, - }, - - { - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"), - }, - }, - { - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), - }, - }, - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */ - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "3570R/370R/470R/450R/510R/4450RV"), - }, - }, - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */ - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 730U3E/740U3E", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"), - }, - }, - { - /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */ - .callback = video_disable_native_backlight, - .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "900X3C/900X3D/900X3E/900X4C/900X4D"), - }, - }, - - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ - .callback = video_disable_native_backlight, - .ident = "Dell XPS15 L521X", + /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */ + .callback = video_disable_backlight_sysfs_if, + .ident = "Toshiba Portege R830", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), - }, - }, - - /* Non win8 machines which need native backlight nevertheless */ - { - /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */ - .callback = video_enable_native_backlight, - .ident = "Lenovo Ideapad Z570", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "102434U"), + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"), }, }, {} @@ -1382,7 +1258,7 @@ acpi_video_switch_brightness(struct work_struct *work) int result = -EINVAL; /* no warning message if acpi_backlight=vendor or a quirk is used */ - if (!acpi_video_verify_backlight_support()) + if (!device->backlight) return; if (!device->brightness) @@ -1657,8 +1533,9 @@ static int acpi_video_resume(struct notifier_block *nb, for (i = 0; i < video->attached_count; i++) { video_device = video->attached_array[i].bind_info; - if (video_device && video_device->backlight) - acpi_video_set_brightness(video_device->backlight); + if (video_device && video_device->brightness) + acpi_video_device_lcd_set_level(video_device, + video_device->brightness->curr); } return NOTIFY_OK; @@ -1707,6 +1584,10 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) result = acpi_video_init_brightness(device); if (result) return; + + if (disable_backlight_sysfs_if > 0) + return; + name = kasprintf(GFP_KERNEL, "acpi_video%d", count); if (!name) return; @@ -1729,8 +1610,10 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) &acpi_backlight_ops, &props); kfree(name); - if (IS_ERR(device->backlight)) + if (IS_ERR(device->backlight)) { + device->backlight = NULL; return; + } /* * Save current brightness level in case we have to restore it @@ -1787,7 +1670,7 @@ static int acpi_video_bus_register_backlight(struct acpi_video_bus *video) acpi_video_run_bcl_for_osi(video); - if (!acpi_video_verify_backlight_support()) + if (acpi_video_get_backlight_type() != acpi_backlight_video) return 0; mutex_lock(&video->device_list_lock); @@ -1931,56 +1814,6 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video) video->input = NULL; } -static int acpi_video_backlight_notify(struct notifier_block *nb, - unsigned long val, void *bd) -{ - struct backlight_device *backlight = bd; - struct acpi_video_bus *video; - - /* acpi_video_verify_backlight_support only cares about raw devices */ - if (backlight->props.type != BACKLIGHT_RAW) - return NOTIFY_DONE; - - video = container_of(nb, struct acpi_video_bus, backlight_nb); - - switch (val) { - case BACKLIGHT_REGISTERED: - if (!acpi_video_verify_backlight_support()) - acpi_video_bus_unregister_backlight(video); - break; - case BACKLIGHT_UNREGISTERED: - acpi_video_bus_register_backlight(video); - break; - } - - return NOTIFY_OK; -} - -static int acpi_video_bus_add_backlight_notify_handler( - struct acpi_video_bus *video) -{ - int error; - - video->backlight_nb.notifier_call = acpi_video_backlight_notify; - video->backlight_nb.priority = 0; - error = backlight_register_notifier(&video->backlight_nb); - if (error == 0) - video->backlight_notifier_registered = true; - - return error; -} - -static int acpi_video_bus_remove_backlight_notify_handler( - struct acpi_video_bus *video) -{ - if (!video->backlight_notifier_registered) - return 0; - - video->backlight_notifier_registered = false; - - return backlight_unregister_notifier(&video->backlight_nb); -} - static int acpi_video_bus_put_devices(struct acpi_video_bus *video) { struct acpi_video_device *dev, *next; @@ -2062,7 +1895,6 @@ static int acpi_video_bus_add(struct acpi_device *device) acpi_video_bus_register_backlight(video); acpi_video_bus_add_notify_handler(video); - acpi_video_bus_add_backlight_notify_handler(video); return 0; @@ -2086,7 +1918,6 @@ static int acpi_video_bus_remove(struct acpi_device *device) video = acpi_driver_data(device); - acpi_video_bus_remove_backlight_notify_handler(video); acpi_video_bus_remove_notify_handler(video); acpi_video_bus_unregister_backlight(video); acpi_video_bus_put_devices(video); @@ -2134,22 +1965,25 @@ static int __init intel_opregion_present(void) int acpi_video_register(void) { - int ret; + int ret = 0; + mutex_lock(®ister_count_mutex); if (register_count) { /* * if the function of acpi_video_register is already called, * don't register the acpi_vide_bus again and return no error. */ - return 0; + goto leave; } mutex_init(&video_list_lock); INIT_LIST_HEAD(&video_bus_head); + dmi_check_system(video_dmi_table); + ret = acpi_bus_register_driver(&acpi_video_bus); if (ret) - return ret; + goto leave; /* * When the acpi_video_bus is loaded successfully, increase @@ -2157,24 +1991,20 @@ int acpi_video_register(void) */ register_count = 1; - return 0; +leave: + mutex_unlock(®ister_count_mutex); + return ret; } EXPORT_SYMBOL(acpi_video_register); void acpi_video_unregister(void) { - if (!register_count) { - /* - * If the acpi video bus is already unloaded, don't - * unload it again and return directly. - */ - return; + mutex_lock(®ister_count_mutex); + if (register_count) { + acpi_bus_unregister_driver(&acpi_video_bus); + register_count = 0; } - acpi_bus_unregister_driver(&acpi_video_bus); - - register_count = 0; - - return; + mutex_unlock(®ister_count_mutex); } EXPORT_SYMBOL(acpi_video_unregister); @@ -2182,15 +2012,15 @@ void acpi_video_unregister_backlight(void) { struct acpi_video_bus *video; - if (!register_count) - return; - - mutex_lock(&video_list_lock); - list_for_each_entry(video, &video_bus_head, entry) - acpi_video_bus_unregister_backlight(video); - mutex_unlock(&video_list_lock); + mutex_lock(®ister_count_mutex); + if (register_count) { + mutex_lock(&video_list_lock); + list_for_each_entry(video, &video_bus_head, entry) + acpi_video_bus_unregister_backlight(video); + mutex_unlock(&video_list_lock); + } + mutex_unlock(®ister_count_mutex); } -EXPORT_SYMBOL(acpi_video_unregister_backlight); /* * This is kind of nasty. Hardware using Intel chipsets may require @@ -2212,8 +2042,6 @@ static int __init acpi_video_init(void) if (acpi_disabled) return 0; - dmi_check_system(video_dmi_table); - if (intel_opregion_present()) return 0; @@ -2222,6 +2050,7 @@ static int __init acpi_video_init(void) static void __exit acpi_video_exit(void) { + acpi_video_detect_exit(); acpi_video_unregister(); return; diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 4169bb87a996..43685dd36c77 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -231,7 +231,9 @@ void acpi_db_open_debug_file(char *name); acpi_status acpi_db_load_acpi_table(char *filename); acpi_status -acpi_db_get_table_from_file(char *filename, struct acpi_table_header **table); +acpi_db_get_table_from_file(char *filename, + struct acpi_table_header **table, + u8 must_be_aml_table); /* * dbhistry - debugger HISTORY command diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 87b27521fcac..ffdb956391f6 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -352,11 +352,21 @@ struct acpi_package_info3 { u16 reserved; }; +struct acpi_package_info4 { + u8 type; + u8 object_type1; + u8 count1; + u8 sub_object_types; + u8 pkg_count; + u16 reserved; +}; + union acpi_predefined_info { struct acpi_name_info info; struct acpi_package_info ret_info; struct acpi_package_info2 ret_info2; struct acpi_package_info3 ret_info3; + struct acpi_package_info4 ret_info4; }; /* Reset to default packing */ @@ -1165,4 +1175,9 @@ struct ah_uuid { char *string; }; +struct ah_table { + char *signature; + char *description; +}; + #endif /* __ACLOCAL_H__ */ diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index 74a390c6db16..0cdd2fce493a 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -70,6 +70,9 @@ * *****************************************************************************/ +extern const u8 acpi_gbl_short_op_index[]; +extern const u8 acpi_gbl_long_op_index[]; + /* * psxface - Parser external interfaces */ diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index a972d11c97c9..b9474b529fcb 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -105,6 +105,11 @@ * count = 0 (optional) * (Used for _DLM) * + * ACPI_PTYPE2_VAR_VAR: Variable number of subpackages, each of either a + * constant or variable length. The subpackages are preceded by a + * constant number of objects. + * (Used for _LPI, _RDI) + * * ACPI_PTYPE2_UUID_PAIR: Each subpackage is preceded by a UUID Buffer. The UUID * defines the format of the package. Zero-length parent package is * allowed. @@ -123,7 +128,8 @@ enum acpi_return_package_types { ACPI_PTYPE2_MIN = 8, ACPI_PTYPE2_REV_FIXED = 9, ACPI_PTYPE2_FIX_VAR = 10, - ACPI_PTYPE2_UUID_PAIR = 11 + ACPI_PTYPE2_VAR_VAR = 11, + ACPI_PTYPE2_UUID_PAIR = 12 }; /* Support macros for users of the predefined info table */ @@ -172,7 +178,7 @@ enum acpi_return_package_types { * These are the names that can actually be evaluated via acpi_evaluate_object. * Not present in this table are the following: * - * 1) Predefined/Reserved names that are never evaluated via + * 1) Predefined/Reserved names that are not usually evaluated via * acpi_evaluate_object: * _Lxx and _Exx GPE methods * _Qxx EC methods @@ -361,6 +367,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */ PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0), + {{"_BTH", METHOD_1ARGS(ACPI_TYPE_INTEGER), /* ACPI 6.0 */ + METHOD_NO_RETURN_VALUE}}, + {{"_BTM", METHOD_1ARGS(ACPI_TYPE_INTEGER), METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, @@ -390,6 +399,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0, 0, 0), + {{"_CR3", METHOD_0ARGS, /* ACPI 6.0 */ + METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_CRS", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, @@ -445,7 +457,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_DOS", METHOD_1ARGS(ACPI_TYPE_INTEGER), METHOD_NO_RETURN_VALUE}}, - {{"_DSD", METHOD_0ARGS, + {{"_DSD", METHOD_0ARGS, /* ACPI 6.0 */ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Buf, 1 Pkg */ PACKAGE_INFO(ACPI_PTYPE2_UUID_PAIR, ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_PACKAGE, 1, 0), @@ -604,6 +616,12 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int(rev), n Pkg (2 Int) */ PACKAGE_INFO(ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0), + {{"_LPI", METHOD_0ARGS, /* ACPI 6.0 */ + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (3 Int, n Pkg (10 Int/Buf) */ + PACKAGE_INFO(ACPI_PTYPE2_VAR_VAR, ACPI_RTYPE_INTEGER, 3, + ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER | ACPI_RTYPE_STRING, + 10, 0), + {{"_MAT", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, @@ -624,6 +642,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { ACPI_TYPE_INTEGER), METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_MTL", METHOD_0ARGS, /* ACPI 6.0 */ + METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_NTT", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, @@ -716,6 +737,10 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */ PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0), + {{"_PRR", METHOD_0ARGS, /* ACPI 6.0 */ + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (1 Ref) */ + PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_REFERENCE, 1, 0, 0, 0), + {{"_PRS", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, @@ -796,6 +821,11 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_PXM", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_RDI", METHOD_0ARGS, /* ACPI 6.0 */ + METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (1 Int, n Pkg (m Ref)) */ + PACKAGE_INFO(ACPI_PTYPE2_VAR_VAR, ACPI_RTYPE_INTEGER, 1, + ACPI_RTYPE_REFERENCE, 0, 0), + {{"_REG", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER), METHOD_NO_RETURN_VALUE}}, @@ -808,6 +838,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_ROM", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER), METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + {{"_RST", METHOD_0ARGS, /* ACPI 6.0 */ + METHOD_NO_RETURN_VALUE}}, + {{"_RTV", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, @@ -935,6 +968,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_TDL", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_TFP", METHOD_0ARGS, /* ACPI 6.0 */ + METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_TIP", METHOD_1ARGS(ACPI_TYPE_INTEGER), METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, @@ -959,6 +995,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each 5 Int with count */ PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 5, 0, 0, 0), + {{"_TSN", METHOD_0ARGS, /* ACPI 6.0 */ + METHOD_RETURNS(ACPI_RTYPE_REFERENCE)}}, + {{"_TSP", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 2b3c5bd222f1..d49f5c7a20d9 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -251,7 +251,7 @@ extern const u8 _acpi_ctype[]; #define _ACPI_DI 0x04 /* '0'-'9' */ #define _ACPI_LO 0x02 /* 'a'-'z' */ #define _ACPI_PU 0x10 /* punctuation */ -#define _ACPI_SP 0x08 /* space */ +#define _ACPI_SP 0x08 /* space, tab, CR, LF, VT, FF */ #define _ACPI_UP 0x01 /* 'A'-'Z' */ #define _ACPI_XD 0x80 /* '0'-'9', 'A'-'F', 'a'-'f' */ diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index d72565a3c646..85bb951430d9 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -116,6 +116,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, walk_state = acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); if (!walk_state) { + acpi_ps_free_op(op); return_ACPI_STATUS(AE_NO_MEMORY); } @@ -125,6 +126,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, obj_desc->method.aml_length, NULL, 0); if (ACPI_FAILURE(status)) { acpi_ds_delete_walk_state(walk_state); + acpi_ps_free_op(op); return_ACPI_STATUS(status); } @@ -133,9 +135,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, /* Parse the method, scan for creation of named objects */ status = acpi_ps_parse_aml(walk_state); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } acpi_ps_delete_parse_tree(op); return_ACPI_STATUS(status); diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c index c5214dec4988..f785ea788356 100644 --- a/drivers/acpi/acpica/hwpci.c +++ b/drivers/acpi/acpica/hwpci.c @@ -123,7 +123,7 @@ acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id, acpi_handle root_pci_device, acpi_handle pci_region) { acpi_status status; - struct acpi_pci_device *list_head = NULL; + struct acpi_pci_device *list_head; ACPI_FUNCTION_TRACE(hw_derive_pci_id); @@ -177,13 +177,13 @@ acpi_hw_build_pci_list(acpi_handle root_pci_device, acpi_handle parent_device; acpi_status status; struct acpi_pci_device *list_element; - struct acpi_pci_device *list_head = NULL; /* * Ascend namespace branch until the root_pci_device is reached, building * a list of device nodes. Loop will exit when either the PCI device is * found, or the root of the namespace is reached. */ + *return_list_head = NULL; current_device = pci_region; while (1) { status = acpi_get_parent(current_device, &parent_device); @@ -198,7 +198,6 @@ acpi_hw_build_pci_list(acpi_handle root_pci_device, /* Finished when we reach the PCI root device (PNP0A03 or PNP0A08) */ if (parent_device == root_pci_device) { - *return_list_head = list_head; return (AE_OK); } @@ -213,9 +212,9 @@ acpi_hw_build_pci_list(acpi_handle root_pci_device, /* Put new element at the head of the list */ - list_element->next = list_head; + list_element->next = *return_list_head; list_element->device = parent_device; - list_head = list_element; + *return_list_head = list_element; current_device = parent_device; } diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 8b79958b7aca..9bb251932b45 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -316,6 +316,13 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, acpi_ns_check_package_list(info, package, elements, count); break; + case ACPI_PTYPE2_VAR_VAR: + /* + * Returns a variable list of packages, each with a variable list + * of objects. + */ + break; + case ACPI_PTYPE2_UUID_PAIR: /* The package must contain pairs of (UUID + type) */ @@ -487,6 +494,12 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info, } break; + case ACPI_PTYPE2_VAR_VAR: + /* + * Each subpackage has a fixed or variable number of elements + */ + break; + case ACPI_PTYPE2_FIXED: /* Each subpackage has a fixed length */ diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index 151fcd95ba84..77d8103d0094 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -497,10 +497,10 @@ acpi_ns_remove_null_elements(struct acpi_evaluate_info *info, case ACPI_PTYPE2_MIN: case ACPI_PTYPE2_REV_FIXED: case ACPI_PTYPE2_FIX_VAR: - break; default: + case ACPI_PTYPE2_VAR_VAR: case ACPI_PTYPE1_FIXED: case ACPI_PTYPE1_OPTION: return; diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index 20e1a35169fc..58310907fa7b 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c @@ -50,9 +50,6 @@ #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psopinfo") -extern const u8 acpi_gbl_short_op_index[]; -extern const u8 acpi_gbl_long_op_index[]; - static const u8 acpi_gbl_argument_count[] = { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 }; diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c index 7e1168be39fa..857af824337b 100644 --- a/drivers/acpi/acpica/utfileio.c +++ b/drivers/acpi/acpica/utfileio.c @@ -198,11 +198,8 @@ acpi_ut_read_table(FILE * fp, table_header.length, file_size); #ifdef ACPI_ASL_COMPILER - status = fl_check_for_ascii(fp, NULL, FALSE); - if (ACPI_SUCCESS(status)) { - acpi_os_printf - ("File appears to be ASCII only, must be binary\n"); - } + acpi_os_printf("File is corrupt or is ASCII text -- " + "it must be a binary file\n"); #endif return (AE_BAD_HEADER); } @@ -315,7 +312,7 @@ acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table) /* Get the entire file */ fprintf(stderr, - "Loading Acpi table from file %10s - Length %.8u (%06X)\n", + "Reading ACPI table from file %10s - Length %.8u (0x%06X)\n", filename, file_size, file_size); status = acpi_ut_read_table(file, table, &table_length); diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index aa448278ba28..fda8b3def81c 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -75,9 +75,9 @@ char acpi_ut_hex_to_ascii_char(u64 integer, u32 position) /******************************************************************************* * - * FUNCTION: acpi_ut_hex_char_to_value + * FUNCTION: acpi_ut_ascii_char_to_hex * - * PARAMETERS: ascii_char - Hex character in Ascii + * PARAMETERS: hex_char - Hex character in Ascii * * RETURN: The binary value of the ascii/hex character * diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 306e785f9418..98d578753101 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c @@ -107,9 +107,16 @@ acpi_exception(const char *module_name, va_list arg_list; ACPI_MSG_REDIRECT_BEGIN; - acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ", - acpi_format_exception(status)); + /* For AE_OK, just print the message */ + + if (ACPI_SUCCESS(status)) { + acpi_os_printf(ACPI_MSG_EXCEPTION); + + } else { + acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ", + acpi_format_exception(status)); + } va_start(arg_list, format); acpi_os_vprintf(format, arg_list); ACPI_MSG_SUFFIX; diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index ed65e9c4b5b0..3670bbab57a3 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -35,6 +35,7 @@ #include <linux/nmi.h> #include <linux/hardirq.h> #include <linux/pstore.h> +#include <linux/vmalloc.h> #include <acpi/apei.h> #include "apei-internal.h" diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index e82d0976a5d0..2bfd53cbfe80 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -729,10 +729,10 @@ static struct llist_head ghes_estatus_llist; static struct irq_work ghes_proc_irq_work; /* - * NMI may be triggered on any CPU, so ghes_nmi_lock is used for - * mutual exclusion. + * NMI may be triggered on any CPU, so ghes_in_nmi is used for + * having only one concurrent reader. */ -static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); +static atomic_t ghes_in_nmi = ATOMIC_INIT(0); static LIST_HEAD(ghes_nmi); @@ -797,73 +797,75 @@ static void ghes_print_queued_estatus(void) } } +/* Save estatus for further processing in IRQ context */ +static void __process_error(struct ghes *ghes) +{ +#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + u32 len, node_len; + struct ghes_estatus_node *estatus_node; + struct acpi_hest_generic_status *estatus; + + if (ghes_estatus_cached(ghes->estatus)) + return; + + len = cper_estatus_len(ghes->estatus); + node_len = GHES_ESTATUS_NODE_LEN(len); + + estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len); + if (!estatus_node) + return; + + estatus_node->ghes = ghes; + estatus_node->generic = ghes->generic; + estatus = GHES_ESTATUS_FROM_NODE(estatus_node); + memcpy(estatus, ghes->estatus, len); + llist_add(&estatus_node->llnode, &ghes_estatus_llist); +#endif +} + +static void __ghes_panic(struct ghes *ghes) +{ + oops_begin(); + ghes_print_queued_estatus(); + __ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus); + + /* reboot to log the error! */ + if (panic_timeout == 0) + panic_timeout = ghes_panic_timeout; + panic("Fatal hardware error!"); +} + static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) { - struct ghes *ghes, *ghes_global = NULL; - int sev, sev_global = -1; - int ret = NMI_DONE; + struct ghes *ghes; + int sev, ret = NMI_DONE; + + if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) + return ret; - raw_spin_lock(&ghes_nmi_lock); list_for_each_entry_rcu(ghes, &ghes_nmi, list) { if (ghes_read_estatus(ghes, 1)) { ghes_clear_estatus(ghes); continue; } - sev = ghes_severity(ghes->estatus->error_severity); - if (sev > sev_global) { - sev_global = sev; - ghes_global = ghes; - } - ret = NMI_HANDLED; - } - - if (ret == NMI_DONE) - goto out; - if (sev_global >= GHES_SEV_PANIC) { - oops_begin(); - ghes_print_queued_estatus(); - __ghes_print_estatus(KERN_EMERG, ghes_global->generic, - ghes_global->estatus); - /* reboot to log the error! */ - if (panic_timeout == 0) - panic_timeout = ghes_panic_timeout; - panic("Fatal hardware error!"); - } + sev = ghes_severity(ghes->estatus->error_severity); + if (sev >= GHES_SEV_PANIC) + __ghes_panic(ghes); - list_for_each_entry_rcu(ghes, &ghes_nmi, list) { -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - u32 len, node_len; - struct ghes_estatus_node *estatus_node; - struct acpi_hest_generic_status *estatus; -#endif if (!(ghes->flags & GHES_TO_CLEAR)) continue; -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - if (ghes_estatus_cached(ghes->estatus)) - goto next; - /* Save estatus for further processing in IRQ context */ - len = cper_estatus_len(ghes->estatus); - node_len = GHES_ESTATUS_NODE_LEN(len); - estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, - node_len); - if (estatus_node) { - estatus_node->ghes = ghes; - estatus_node->generic = ghes->generic; - estatus = GHES_ESTATUS_FROM_NODE(estatus_node); - memcpy(estatus, ghes->estatus, len); - llist_add(&estatus_node->llnode, &ghes_estatus_llist); - } -next: -#endif + + __process_error(ghes); ghes_clear_estatus(ghes); + + ret = NMI_HANDLED; } + #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG irq_work_queue(&ghes_proc_irq_work); #endif - -out: - raw_spin_unlock(&ghes_nmi_lock); + atomic_dec(&ghes_in_nmi); return ret; } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 63d43677f644..b3628cc01a53 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -70,6 +70,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>"); MODULE_DESCRIPTION("ACPI Battery Driver"); MODULE_LICENSE("GPL"); +static async_cookie_t async_cookie; static int battery_bix_broken_package; static int battery_notification_delay_ms; static unsigned int cache_time = 1000; @@ -338,14 +339,6 @@ static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; -#ifdef CONFIG_ACPI_PROCFS_POWER -inline char *acpi_battery_units(struct acpi_battery *battery) -{ - return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? - "mA" : "mW"; -} -#endif - /* -------------------------------------------------------------------------- Battery Management -------------------------------------------------------------------------- */ @@ -354,14 +347,14 @@ struct acpi_offsets { u8 mode; /* int or string? */ }; -static struct acpi_offsets state_offsets[] = { +static const struct acpi_offsets state_offsets[] = { {offsetof(struct acpi_battery, state), 0}, {offsetof(struct acpi_battery, rate_now), 0}, {offsetof(struct acpi_battery, capacity_now), 0}, {offsetof(struct acpi_battery, voltage_now), 0}, }; -static struct acpi_offsets info_offsets[] = { +static const struct acpi_offsets info_offsets[] = { {offsetof(struct acpi_battery, power_unit), 0}, {offsetof(struct acpi_battery, design_capacity), 0}, {offsetof(struct acpi_battery, full_charge_capacity), 0}, @@ -377,7 +370,7 @@ static struct acpi_offsets info_offsets[] = { {offsetof(struct acpi_battery, oem_info), 1}, }; -static struct acpi_offsets extended_info_offsets[] = { +static const struct acpi_offsets extended_info_offsets[] = { {offsetof(struct acpi_battery, revision), 0}, {offsetof(struct acpi_battery, power_unit), 0}, {offsetof(struct acpi_battery, design_capacity), 0}, @@ -402,7 +395,7 @@ static struct acpi_offsets extended_info_offsets[] = { static int extract_package(struct acpi_battery *battery, union acpi_object *package, - struct acpi_offsets *offsets, int num) + const struct acpi_offsets *offsets, int num) { int i; union acpi_object *element; @@ -792,6 +785,12 @@ static void acpi_battery_refresh(struct acpi_battery *battery) #ifdef CONFIG_ACPI_PROCFS_POWER static struct proc_dir_entry *acpi_battery_dir; +static const char *acpi_battery_units(const struct acpi_battery *battery) +{ + return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? + "mA" : "mW"; +} + static int acpi_battery_print_info(struct seq_file *seq, int result) { struct acpi_battery *battery = seq->private; @@ -1125,19 +1124,21 @@ static int battery_notify(struct notifier_block *nb, return 0; } -static int battery_bix_broken_package_quirk(const struct dmi_system_id *d) +static int __init +battery_bix_broken_package_quirk(const struct dmi_system_id *d) { battery_bix_broken_package = 1; return 0; } -static int battery_notification_delay_quirk(const struct dmi_system_id *d) +static int __init +battery_notification_delay_quirk(const struct dmi_system_id *d) { battery_notification_delay_ms = 1000; return 0; } -static struct dmi_system_id bat_dmi_table[] = { +static const struct dmi_system_id bat_dmi_table[] __initconst = { { .callback = battery_bix_broken_package_quirk, .ident = "NEC LZ750/LS", @@ -1292,33 +1293,34 @@ static struct acpi_driver acpi_battery_driver = { static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) { - if (acpi_disabled) - return; + int result; dmi_check_system(bat_dmi_table); - + #ifdef CONFIG_ACPI_PROCFS_POWER acpi_battery_dir = acpi_lock_battery_dir(); if (!acpi_battery_dir) return; #endif - if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { + result = acpi_bus_register_driver(&acpi_battery_driver); #ifdef CONFIG_ACPI_PROCFS_POWER + if (result < 0) acpi_unlock_battery_dir(acpi_battery_dir); #endif - return; - } - return; } static int __init acpi_battery_init(void) { - async_schedule(acpi_battery_init_async, NULL); + if (acpi_disabled) + return -ENODEV; + + async_cookie = async_schedule(acpi_battery_init_async, NULL); return 0; } static void __exit acpi_battery_exit(void) { + async_synchronize_cookie(async_cookie); acpi_bus_unregister_driver(&acpi_battery_driver); #ifdef CONFIG_ACPI_PROCFS_POWER acpi_unlock_battery_dir(acpi_battery_dir); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index c412fdb28d34..513e7230e3d0 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -470,6 +470,16 @@ static int __init acpi_bus_init_irq(void) return 0; } +/** + * acpi_early_init - Initialize ACPICA and populate the ACPI namespace. + * + * The ACPI tables are accessible after this, but the handling of events has not + * been initialized and the global lock is not available yet, so AML should not + * be executed at this point. + * + * Doing this before switching the EFI runtime services to virtual mode allows + * the EfiBootServices memory to be freed slightly earlier on boot. + */ void __init acpi_early_init(void) { acpi_status status; @@ -533,26 +543,42 @@ void __init acpi_early_init(void) acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi; } #endif + return; + + error0: + disable_acpi(); +} + +/** + * acpi_subsystem_init - Finalize the early initialization of ACPI. + * + * Switch over the platform to the ACPI mode (if possible), initialize the + * handling of ACPI events, install the interrupt and global lock handlers. + * + * Doing this too early is generally unsafe, but at the same time it needs to be + * done before all things that really depend on ACPI. The right spot appears to + * be before finalizing the EFI initialization. + */ +void __init acpi_subsystem_init(void) +{ + acpi_status status; + + if (acpi_disabled) + return; status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); - goto error0; + disable_acpi(); + } else { + /* + * If the system is using ACPI then we can be reasonably + * confident that any regulators are managed by the firmware + * so tell the regulator core it has everything it needs to + * know. + */ + regulator_has_full_constraints(); } - - /* - * If the system is using ACPI then we can be reasonably - * confident that any regulators are managed by the firmware - * so tell the regulator core it has everything it needs to - * know. - */ - regulator_has_full_constraints(); - - return; - - error0: - disable_acpi(); - return; } static int __init acpi_bus_init(void) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 735db11a9b00..717afcdb5f4a 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -98,17 +98,16 @@ int acpi_device_get_power(struct acpi_device *device, int *state) /* * The power resources settings may indicate a power state - * shallower than the actual power state of the device. + * shallower than the actual power state of the device, because + * the same power resources may be referenced by other devices. * - * Moreover, on systems predating ACPI 4.0, if the device - * doesn't depend on any power resources and _PSC returns 3, - * that means "power off". We need to maintain compatibility - * with those systems. + * For systems predating ACPI 4.0 we assume that D3hot is the + * deepest state that can be supported. */ if (psc > result && psc < ACPI_STATE_D3_COLD) result = psc; else if (result == ACPI_STATE_UNKNOWN) - result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_COLD : psc; + result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_HOT : psc; } /* @@ -153,8 +152,8 @@ static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state) */ int acpi_device_set_power(struct acpi_device *device, int state) { + int target_state = state; int result = 0; - bool cut_power = false; if (!device || !device->flags.power_manageable || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) @@ -169,11 +168,21 @@ int acpi_device_set_power(struct acpi_device *device, int state) return 0; } - if (!device->power.states[state].flags.valid) { + if (state == ACPI_STATE_D3_COLD) { + /* + * For transitions to D3cold we need to execute _PS3 and then + * possibly drop references to the power resources in use. + */ + state = ACPI_STATE_D3_HOT; + /* If _PR3 is not available, use D3hot as the target state. */ + if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) + target_state = state; + } else if (!device->power.states[state].flags.valid) { dev_warn(&device->dev, "Power state %s not supported\n", acpi_power_state_string(state)); return -ENODEV; } + if (!device->power.flags.ignore_parent && device->parent && (state < device->parent->power.state)) { dev_warn(&device->dev, @@ -183,39 +192,38 @@ int acpi_device_set_power(struct acpi_device *device, int state) return -ENODEV; } - /* For D3cold we should first transition into D3hot. */ - if (state == ACPI_STATE_D3_COLD - && device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible) { - state = ACPI_STATE_D3_HOT; - cut_power = true; - } - - if (state < device->power.state && state != ACPI_STATE_D0 - && device->power.state >= ACPI_STATE_D3_HOT) { - dev_warn(&device->dev, - "Cannot transition to non-D0 state from D3\n"); - return -ENODEV; - } - /* * Transition Power * ---------------- - * In accordance with the ACPI specification first apply power (via - * power resources) and then evaluate _PSx. + * In accordance with ACPI 6, _PSx is executed before manipulating power + * resources, unless the target state is D0, in which case _PS0 is + * supposed to be executed after turning the power resources on. */ - if (device->power.flags.power_resources) { - result = acpi_power_transition(device, state); + if (state > ACPI_STATE_D0) { + /* + * According to ACPI 6, devices cannot go from lower-power + * (deeper) states to higher-power (shallower) states. + */ + if (state < device->power.state) { + dev_warn(&device->dev, "Cannot transition from %s to %s\n", + acpi_power_state_string(device->power.state), + acpi_power_state_string(state)); + return -ENODEV; + } + + result = acpi_dev_pm_explicit_set(device, state); if (result) goto end; - } - result = acpi_dev_pm_explicit_set(device, state); - if (result) - goto end; - if (cut_power) { - device->power.state = state; - state = ACPI_STATE_D3_COLD; - result = acpi_power_transition(device, state); + if (device->power.flags.power_resources) + result = acpi_power_transition(device, target_state); + } else { + if (device->power.flags.power_resources) { + result = acpi_power_transition(device, ACPI_STATE_D0); + if (result) + goto end; + } + result = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); } end: @@ -264,13 +272,24 @@ int acpi_bus_init_power(struct acpi_device *device) return result; if (state < ACPI_STATE_D3_COLD && device->power.flags.power_resources) { + /* Reference count the power resources. */ result = acpi_power_on_resources(device, state); if (result) return result; - result = acpi_dev_pm_explicit_set(device, state); - if (result) - return result; + if (state == ACPI_STATE_D0) { + /* + * If _PSC is not present and the state inferred from + * power resources appears to be D0, it still may be + * necessary to execute _PS0 at this point, because + * another device using the same power resources may + * have been put into D0 previously and that's why we + * see D0 here. + */ + result = acpi_dev_pm_explicit_set(device, state); + if (result) + return result; + } } else if (state == ACPI_STATE_UNKNOWN) { /* * No power resources and missing _PSC? Cross fingers and make @@ -603,12 +622,12 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3_COLD) return -EINVAL; - if (d_max_in > ACPI_STATE_D3_HOT) { + if (d_max_in > ACPI_STATE_D2) { enum pm_qos_flags_status stat; stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF); if (stat == PM_QOS_FLAGS_ALL) - d_max_in = ACPI_STATE_D3_HOT; + d_max_in = ACPI_STATE_D2; } adev = ACPI_COMPANION(dev); @@ -953,6 +972,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_prepare); */ void acpi_subsys_complete(struct device *dev) { + pm_generic_complete(dev); /* * If the device had been runtime-suspended before the system went into * the sleep state it is going out of and it has never been resumed till diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 5e8fed448850..9d4761d2f6b7 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -59,6 +59,38 @@ #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ +/* + * The SCI_EVT clearing timing is not defined by the ACPI specification. + * This leads to lots of practical timing issues for the host EC driver. + * The following variations are defined (from the target EC firmware's + * perspective): + * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the + * target can clear SCI_EVT at any time so long as the host can see + * the indication by reading the status register (EC_SC). So the + * host should re-check SCI_EVT after the first time the SCI_EVT + * indication is seen, which is the same time the query request + * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set + * at any later time could indicate another event. Normally such + * kind of EC firmware has implemented an event queue and will + * return 0x00 to indicate "no outstanding event". + * QUERY: After seeing the query request (QR_EC) written to the command + * register (EC_CMD) by the host and having prepared the responding + * event value in the data register (EC_DATA), the target can safely + * clear SCI_EVT because the target can confirm that the current + * event is being handled by the host. The host then should check + * SCI_EVT right after reading the event response from the data + * register (EC_DATA). + * EVENT: After seeing the event response read from the data register + * (EC_DATA) by the host, the target can clear SCI_EVT. As the + * target requires time to notice the change in the data register + * (EC_DATA), the host may be required to wait additional guarding + * time before checking the SCI_EVT again. Such guarding may not be + * necessary if the host is notified via another IRQ. + */ +#define ACPI_EC_EVT_TIMING_STATUS 0x00 +#define ACPI_EC_EVT_TIMING_QUERY 0x01 +#define ACPI_EC_EVT_TIMING_EVENT 0x02 + /* EC commands */ enum ec_command { ACPI_EC_COMMAND_READ = 0x80, @@ -70,13 +102,13 @@ enum ec_command { #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ -#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ -#define ACPI_EC_UDELAY_POLL 1000 /* Wait 1ms for EC transaction polling */ +#define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */ #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query * when trying to clear the EC */ enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ + EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */ EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and * OpReg are installed */ EC_FLAGS_STARTED, /* Driver is started */ @@ -93,6 +125,16 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); +static bool ec_busy_polling __read_mostly; +module_param(ec_busy_polling, bool, 0644); +MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction"); + +static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL; +module_param(ec_polling_guard, uint, 0644); +MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes"); + +static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY; + /* * If the number of false interrupts per one transaction exceeds * this threshold, will think there is a GPE storm happened and @@ -121,7 +163,6 @@ struct transaction { u8 wlen; u8 rlen; u8 flags; - unsigned long timestamp; }; static int acpi_ec_query(struct acpi_ec *ec, u8 *data); @@ -130,7 +171,6 @@ static void advance_transaction(struct acpi_ec *ec); struct acpi_ec *boot_ec, *first_ec; EXPORT_SYMBOL(first_ec); -static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ @@ -218,7 +258,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec) { u8 x = inb(ec->data_addr); - ec->curr->timestamp = jiffies; + ec->timestamp = jiffies; ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x); return x; } @@ -227,14 +267,14 @@ static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) { ec_dbg_raw("EC_SC(W) = 0x%2.2x", command); outb(command, ec->command_addr); - ec->curr->timestamp = jiffies; + ec->timestamp = jiffies; } static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) { ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data); outb(data, ec->data_addr); - ec->curr->timestamp = jiffies; + ec->timestamp = jiffies; } #ifdef DEBUG @@ -267,7 +307,7 @@ static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec) acpi_event_status gpe_status = 0; (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status); - return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false; + return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false; } static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open) @@ -379,19 +419,49 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) static void acpi_ec_submit_query(struct acpi_ec *ec) { if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { - ec_dbg_req("Event started"); + ec_dbg_evt("Command(%s) submitted/blocked", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); + ec->nr_pending_queries++; schedule_work(&ec->work); } } static void acpi_ec_complete_query(struct acpi_ec *ec) { - if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { + if (test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); - ec_dbg_req("Event stopped"); + ec_dbg_evt("Command(%s) unblocked", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); } } +static bool acpi_ec_guard_event(struct acpi_ec *ec) +{ + if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || + ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY || + !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) || + (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY)) + return false; + + /* + * Postpone the query submission to allow the firmware to proceed, + * we shouldn't check SCI_EVT before the firmware reflagging it. + */ + return true; +} + +static int ec_transaction_polled(struct acpi_ec *ec) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&ec->lock, flags); + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) + ret = 1; + spin_unlock_irqrestore(&ec->lock, flags); + return ret; +} + static int ec_transaction_completed(struct acpi_ec *ec) { unsigned long flags; @@ -404,6 +474,22 @@ static int ec_transaction_completed(struct acpi_ec *ec) return ret; } +static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag) +{ + ec->curr->flags |= flag; + if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { + if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS && + flag == ACPI_EC_COMMAND_POLL) + acpi_ec_complete_query(ec); + if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY && + flag == ACPI_EC_COMMAND_COMPLETE) + acpi_ec_complete_query(ec); + if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && + flag == ACPI_EC_COMMAND_COMPLETE) + set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); + } +} + static void advance_transaction(struct acpi_ec *ec) { struct transaction *t; @@ -420,6 +506,18 @@ static void advance_transaction(struct acpi_ec *ec) acpi_ec_clear_gpe(ec); status = acpi_ec_read_status(ec); t = ec->curr; + /* + * Another IRQ or a guarded polling mode advancement is detected, + * the next QR_EC submission is then allowed. + */ + if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) { + if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && + (!ec->nr_pending_queries || + test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) { + clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); + acpi_ec_complete_query(ec); + } + } if (!t) goto err; if (t->flags & ACPI_EC_COMMAND_POLL) { @@ -432,17 +530,17 @@ static void advance_transaction(struct acpi_ec *ec) if ((status & ACPI_EC_FLAG_OBF) == 1) { t->rdata[t->ri++] = acpi_ec_read_data(ec); if (t->rlen == t->ri) { - t->flags |= ACPI_EC_COMMAND_COMPLETE; + ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); if (t->command == ACPI_EC_COMMAND_QUERY) - ec_dbg_req("Command(%s) hardware completion", - acpi_ec_cmd_string(t->command)); + ec_dbg_evt("Command(%s) completed by hardware", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); wakeup = true; } } else goto err; } else if (t->wlen == t->wi && (status & ACPI_EC_FLAG_IBF) == 0) { - t->flags |= ACPI_EC_COMMAND_COMPLETE; + ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); wakeup = true; } goto out; @@ -450,17 +548,15 @@ static void advance_transaction(struct acpi_ec *ec) if (EC_FLAGS_QUERY_HANDSHAKE && !(status & ACPI_EC_FLAG_SCI) && (t->command == ACPI_EC_COMMAND_QUERY)) { - t->flags |= ACPI_EC_COMMAND_POLL; - acpi_ec_complete_query(ec); + ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); t->rdata[t->ri++] = 0x00; - t->flags |= ACPI_EC_COMMAND_COMPLETE; - ec_dbg_req("Command(%s) software completion", - acpi_ec_cmd_string(t->command)); + ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); + ec_dbg_evt("Command(%s) completed by software", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); wakeup = true; } else if ((status & ACPI_EC_FLAG_IBF) == 0) { acpi_ec_write_cmd(ec, t->command); - t->flags |= ACPI_EC_COMMAND_POLL; - acpi_ec_complete_query(ec); + ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); } else goto err; goto out; @@ -490,8 +586,39 @@ static void start_transaction(struct acpi_ec *ec) { ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; ec->curr->flags = 0; - ec->curr->timestamp = jiffies; - advance_transaction(ec); +} + +static int ec_guard(struct acpi_ec *ec) +{ + unsigned long guard = usecs_to_jiffies(ec_polling_guard); + unsigned long timeout = ec->timestamp + guard; + + do { + if (ec_busy_polling) { + /* Perform busy polling */ + if (ec_transaction_completed(ec)) + return 0; + udelay(jiffies_to_usecs(guard)); + } else { + /* + * Perform wait polling + * + * For SCI_EVT clearing timing of "event", + * performing guarding before re-checking the + * SCI_EVT. Otherwise, such guarding is not needed + * due to the old practices. + */ + if (!ec_transaction_polled(ec) && + !acpi_ec_guard_event(ec)) + break; + if (wait_event_timeout(ec->wait, + ec_transaction_completed(ec), + guard)) + return 0; + } + /* Guard the register accesses for the polling modes */ + } while (time_before(jiffies, timeout)); + return -ETIME; } static int ec_poll(struct acpi_ec *ec) @@ -502,25 +629,11 @@ static int ec_poll(struct acpi_ec *ec) while (repeat--) { unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); - unsigned long usecs = ACPI_EC_UDELAY_POLL; do { - /* don't sleep with disabled interrupts */ - if (EC_FLAGS_MSI || irqs_disabled()) { - usecs = ACPI_EC_MSI_UDELAY; - udelay(usecs); - if (ec_transaction_completed(ec)) - return 0; - } else { - if (wait_event_timeout(ec->wait, - ec_transaction_completed(ec), - usecs_to_jiffies(usecs))) - return 0; - } + if (!ec_guard(ec)) + return 0; spin_lock_irqsave(&ec->lock, flags); - if (time_after(jiffies, - ec->curr->timestamp + - usecs_to_jiffies(usecs))) - advance_transaction(ec); + advance_transaction(ec); spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); @@ -537,8 +650,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, unsigned long tmp; int ret = 0; - if (EC_FLAGS_MSI) - udelay(ACPI_EC_MSI_UDELAY); /* start transaction */ spin_lock_irqsave(&ec->lock, tmp); /* Enable GPE for command processing (IBF=0/OBF=1) */ @@ -552,7 +663,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command)); start_transaction(ec); spin_unlock_irqrestore(&ec->lock, tmp); + ret = ec_poll(ec); + spin_lock_irqsave(&ec->lock, tmp); if (t->irq_count == ec_storm_threshold) acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM); @@ -575,6 +688,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) return -EINVAL; if (t->rdata) memset(t->rdata, 0, t->rlen); + mutex_lock(&ec->mutex); if (ec->global_lock) { status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); @@ -586,8 +700,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) status = acpi_ec_transaction_unlocked(ec, t); - if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags)) - msleep(1); if (ec->global_lock) acpi_release_global_lock(glk); unlock: @@ -923,11 +1035,54 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) return result; } -static void acpi_ec_gpe_poller(struct work_struct *work) +static void acpi_ec_check_event(struct acpi_ec *ec) { + unsigned long flags; + + if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) { + if (ec_guard(ec)) { + spin_lock_irqsave(&ec->lock, flags); + /* + * Take care of the SCI_EVT unless no one else is + * taking care of it. + */ + if (!ec->curr) + advance_transaction(ec); + spin_unlock_irqrestore(&ec->lock, flags); + } + } +} + +static void acpi_ec_event_handler(struct work_struct *work) +{ + unsigned long flags; struct acpi_ec *ec = container_of(work, struct acpi_ec, work); - acpi_ec_query(ec, NULL); + ec_dbg_evt("Event started"); + + spin_lock_irqsave(&ec->lock, flags); + while (ec->nr_pending_queries) { + spin_unlock_irqrestore(&ec->lock, flags); + (void)acpi_ec_query(ec, NULL); + spin_lock_irqsave(&ec->lock, flags); + ec->nr_pending_queries--; + /* + * Before exit, make sure that this work item can be + * scheduled again. There might be QR_EC failures, leaving + * EC_FLAGS_QUERY_PENDING uncleared and preventing this work + * item from being scheduled again. + */ + if (!ec->nr_pending_queries) { + if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || + ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY) + acpi_ec_complete_query(ec); + } + } + spin_unlock_irqrestore(&ec->lock, flags); + + ec_dbg_evt("Event stopped"); + + acpi_ec_check_event(ec); } static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, @@ -961,7 +1116,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (function != ACPI_READ && function != ACPI_WRITE) return AE_BAD_PARAMETER; - if (EC_FLAGS_MSI || bits > 8) + if (ec_busy_polling || bits > 8) acpi_ec_burst_enable(ec); for (i = 0; i < bytes; ++i, ++address, ++value) @@ -969,7 +1124,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, acpi_ec_read(ec, address, value) : acpi_ec_write(ec, address, *value); - if (EC_FLAGS_MSI || bits > 8) + if (ec_busy_polling || bits > 8) acpi_ec_burst_disable(ec); switch (result) { @@ -1002,7 +1157,8 @@ static struct acpi_ec *make_acpi_ec(void) init_waitqueue_head(&ec->wait); INIT_LIST_HEAD(&ec->list); spin_lock_init(&ec->lock); - INIT_WORK(&ec->work, acpi_ec_gpe_poller); + INIT_WORK(&ec->work, acpi_ec_event_handler); + ec->timestamp = jiffies; return ec; } @@ -1237,30 +1393,13 @@ static int ec_validate_ecdt(const struct dmi_system_id *id) return 0; } -/* MSI EC needs special treatment, enable it */ -static int ec_flag_msi(const struct dmi_system_id *id) -{ - pr_debug("Detected MSI hardware, enabling workarounds.\n"); - EC_FLAGS_MSI = 1; - EC_FLAGS_VALIDATE_ECDT = 1; - return 0; -} - -/* - * Clevo M720 notebook actually works ok with IRQ mode, if we lifted - * the GPE storm threshold back to 20 - */ -static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) -{ - pr_debug("Setting the EC GPE storm threshold to 20\n"); - ec_storm_threshold = 20; - return 0; -} - +#if 0 /* - * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for - * which case, we complete the QR_EC without issuing it to the firmware. - * https://bugzilla.kernel.org/show_bug.cgi?id=86211 + * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not + * set, for which case, we complete the QR_EC without issuing it to the + * firmware. + * https://bugzilla.kernel.org/show_bug.cgi?id=82611 + * https://bugzilla.kernel.org/show_bug.cgi?id=97381 */ static int ec_flag_query_handshake(const struct dmi_system_id *id) { @@ -1268,6 +1407,7 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id) EC_FLAGS_QUERY_HANDSHAKE = 1; return 0; } +#endif /* * On some hardware it is necessary to clear events accumulated by the EC during @@ -1290,6 +1430,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id) { pr_debug("Detected system needing EC poll on resume.\n"); EC_FLAGS_CLEAR_ON_RESUME = 1; + ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; return 0; } @@ -1299,29 +1440,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, { - ec_flag_msi, "MSI hardware", { - DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL}, - { - ec_flag_msi, "MSI hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL}, - { - ec_flag_msi, "MSI hardware", { - DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, - { - ec_flag_msi, "MSI hardware", { - DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, - { - ec_flag_msi, "Quanta hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), - DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL}, - { - ec_flag_msi, "Quanta hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), - DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL}, - { - ec_flag_msi, "Clevo W350etq", { - DMI_MATCH(DMI_SYS_VENDOR, "CLEVO CO."), - DMI_MATCH(DMI_PRODUCT_NAME, "W35_37ET"),}, NULL}, + ec_validate_ecdt, "MSI MS-171F", { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL}, { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, @@ -1329,10 +1450,6 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, { - ec_enlarge_storm_threshold, "CLEVO hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), - DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, - { ec_skip_dsdt_scan, "HP Folio 13", { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL}, @@ -1343,9 +1460,6 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { { ec_clear_on_resume, "Samsung hardware", { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, - { - ec_flag_query_handshake, "Acer hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL}, {}, }; @@ -1427,6 +1541,43 @@ error: return -ENODEV; } +static int param_set_event_clearing(const char *val, struct kernel_param *kp) +{ + int result = 0; + + if (!strncmp(val, "status", sizeof("status") - 1)) { + ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; + pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n"); + } else if (!strncmp(val, "query", sizeof("query") - 1)) { + ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY; + pr_info("Assuming SCI_EVT clearing on QR_EC writes\n"); + } else if (!strncmp(val, "event", sizeof("event") - 1)) { + ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT; + pr_info("Assuming SCI_EVT clearing on event reads\n"); + } else + result = -EINVAL; + return result; +} + +static int param_get_event_clearing(char *buffer, struct kernel_param *kp) +{ + switch (ec_event_clearing) { + case ACPI_EC_EVT_TIMING_STATUS: + return sprintf(buffer, "status"); + case ACPI_EC_EVT_TIMING_QUERY: + return sprintf(buffer, "query"); + case ACPI_EC_EVT_TIMING_EVENT: + return sprintf(buffer, "event"); + default: + return sprintf(buffer, "invalid"); + } + return 0; +} + +module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing, + NULL, 0644); +MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing"); + static struct acpi_driver acpi_ec_driver = { .name = "ec", .class = ACPI_EC_CLASS, diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 7a36f02598a6..bea0bbaafa97 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -158,8 +158,9 @@ static int fan_get_state(struct acpi_device *device, unsigned long *state) if (result) return result; - *state = (acpi_state == ACPI_STATE_D3_COLD ? 0 : - (acpi_state == ACPI_STATE_D0 ? 1 : -1)); + *state = acpi_state == ACPI_STATE_D3_COLD + || acpi_state == ACPI_STATE_D3_HOT ? + 0 : (acpi_state == ACPI_STATE_D0 ? 1 : -1); return 0; } diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 39c485b0c25c..b9657af751d1 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/acpi.h> +#include <linux/dma-mapping.h> #include "internal.h" @@ -167,6 +168,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; + bool coherent; if (has_acpi_companion(dev)) { if (acpi_dev) { @@ -223,6 +225,9 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) if (!has_acpi_companion(dev)) ACPI_COMPANION_SET(dev, acpi_dev); + if (acpi_check_dma(acpi_dev, &coherent)) + arch_setup_dma_ops(dev, 0, 0, NULL, coherent); + acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, physical_node_name); diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c index aafe3ca829c2..a322710b5ba4 100644 --- a/drivers/acpi/hed.c +++ b/drivers/acpi/hed.c @@ -27,7 +27,7 @@ #include <linux/acpi.h> #include <acpi/hed.h> -static struct acpi_device_id acpi_hed_ids[] = { +static const struct acpi_device_id acpi_hed_ids[] = { {"PNP0C33", 0}, {"", 0}, }; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index ba4a61e964be..787c629bc9b4 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -138,6 +138,8 @@ struct acpi_ec { struct transaction *curr; spinlock_t lock; struct work_struct work; + unsigned long timestamp; + unsigned long nr_pending_queries; }; extern struct acpi_ec *first_ec; @@ -182,15 +184,10 @@ static inline void suspend_nvs_restore(void) {} #endif /*-------------------------------------------------------------------------- - Video - -------------------------------------------------------------------------- */ -#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) -bool acpi_osi_is_win8(void); -#endif - -/*-------------------------------------------------------------------------- Device properties -------------------------------------------------------------------------- */ +#define ACPI_DT_NAMESPACE_HID "PRP0001" + void acpi_init_properties(struct acpi_device *adev); void acpi_free_properties(struct acpi_device *adev); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 7ccba395c9dd..a5dc9034efee 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -175,11 +175,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas, if (!addr || !length) return; - /* Resources are never freed */ - if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) - request_region(addr, length, desc); - else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) - request_mem_region(addr, length, desc); + acpi_reserve_region(addr, length, gas->space_id, 0, desc); } static void __init acpi_reserve_resources(void) @@ -540,7 +536,7 @@ static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; acpi_status acpi_os_predefined_override(const struct acpi_predefined_names *init_val, - acpi_string * new_val) + char **new_val) { if (!init_val || !new_val) return AE_BAD_PARAMETER; @@ -1684,6 +1680,12 @@ int acpi_resources_are_enforced(void) } EXPORT_SYMBOL(acpi_resources_are_enforced); +bool acpi_osi_is_win8(void) +{ + return acpi_gbl_osi_data >= ACPI_OSI_WIN_8; +} +EXPORT_SYMBOL(acpi_osi_is_win8); + /* * Deallocate the memory for a spinlock. */ diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index b1def411c0b8..304eccb0ae5c 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -44,7 +44,6 @@ ACPI_MODULE_NAME("pci_irq"); struct acpi_prt_entry { - struct list_head list; struct acpi_pci_id id; u8 pin; acpi_handle link; @@ -163,7 +162,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev, { int segment = pci_domain_nr(dev->bus); int bus = dev->bus->number; - int device = PCI_SLOT(dev->devfn); + int device = pci_ari_enabled(dev->bus) ? 0 : PCI_SLOT(dev->devfn); struct acpi_prt_entry *entry; if (((prt->address >> 16) & 0xffff) != device || diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index e0bcfb642b52..93eac53b5110 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -684,7 +684,8 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state) } } - *state = ACPI_STATE_D3_COLD; + *state = device->power.states[ACPI_STATE_D3_COLD].flags.valid ? + ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT; return 0; } @@ -710,8 +711,6 @@ int acpi_power_transition(struct acpi_device *device, int state) || (device->power.state > ACPI_STATE_D3_COLD)) return -ENODEV; - /* TBD: Resources must be ordered. */ - /* * First we reference all power resources required in the target list * (e.g. so the device doesn't lose power while transitioning). Then, @@ -761,6 +760,25 @@ static void acpi_power_sysfs_remove(struct acpi_device *device) device_remove_file(&device->dev, &dev_attr_resource_in_use); } +static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource) +{ + mutex_lock(&power_resource_list_lock); + + if (!list_empty(&acpi_power_resource_list)) { + struct acpi_power_resource *r; + + list_for_each_entry(r, &acpi_power_resource_list, list_node) + if (r->order > resource->order) { + list_add_tail(&resource->list_node, &r->list_node); + goto out; + } + } + list_add_tail(&resource->list_node, &acpi_power_resource_list); + + out: + mutex_unlock(&power_resource_list_lock); +} + int acpi_add_power_resource(acpi_handle handle) { struct acpi_power_resource *resource; @@ -811,9 +829,7 @@ int acpi_add_power_resource(acpi_handle handle) if (!device_create_file(&device->dev, &dev_attr_resource_in_use)) device->remove = acpi_power_sysfs_remove; - mutex_lock(&power_resource_list_lock); - list_add(&resource->list_node, &acpi_power_resource_list); - mutex_unlock(&power_resource_list_lock); + acpi_power_add_resource_to_list(resource); acpi_device_add_finalize(device); return 0; @@ -844,7 +860,22 @@ void acpi_resume_power_resources(void) && resource->ref_count) { dev_info(&resource->device.dev, "Turning ON\n"); __acpi_power_on(resource); - } else if (state == ACPI_POWER_RESOURCE_STATE_ON + } + + mutex_unlock(&resource->resource_lock); + } + list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) { + int result, state; + + mutex_lock(&resource->resource_lock); + + result = acpi_power_get_state(resource->device.handle, &state); + if (result) { + mutex_unlock(&resource->resource_lock); + continue; + } + + if (state == ACPI_POWER_RESOURCE_STATE_ON && !resource->ref_count) { dev_info(&resource->device.dev, "Turning OFF\n"); __acpi_power_off(resource); diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index b1ec78b8a645..33a38d604630 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -184,7 +184,7 @@ phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) phys_cpuid_t phys_id; phys_id = map_mat_entry(handle, type, acpi_id); - if (phys_id == PHYS_CPUID_INVALID) + if (invalid_phys_cpuid(phys_id)) phys_id = map_madt_entry(type, acpi_id); return phys_id; @@ -196,7 +196,7 @@ int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) int i; #endif - if (phys_id == PHYS_CPUID_INVALID) { + if (invalid_phys_cpuid(phys_id)) { /* * On UP processor, there is no _MAT or MADT table. * So above phys_id is always set to PHYS_CPUID_INVALID. @@ -215,12 +215,12 @@ int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) * Ignores phys_id and always returns 0 for the processor * handle with acpi id 0 if nr_cpu_ids is 1. * This should be the case if SMP tables are not found. - * Return -1 for other CPU's handle. + * Return -EINVAL for other CPU's handle. */ if (nr_cpu_ids <= 1 && acpi_id == 0) return acpi_id; else - return -1; + return -EINVAL; } #ifdef CONFIG_SMP @@ -233,7 +233,7 @@ int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) if (phys_id == 0) return phys_id; #endif - return -1; + return -ENODEV; } int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 39e0c8e36244..d540f42c9232 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -94,7 +94,7 @@ static int set_max_cstate(const struct dmi_system_id *id) return 0; } -static struct dmi_system_id processor_power_dmi_table[] = { +static const struct dmi_system_id processor_power_dmi_table[] = { { set_max_cstate, "Clevo 5600D", { DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index e5dd80800930..7cfbda4d7c51 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -52,10 +52,7 @@ static bool __init processor_physically_present(acpi_handle handle) type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; cpuid = acpi_get_cpuid(handle, type, acpi_id); - if (cpuid == -1) - return false; - - return true; + return !invalid_logical_cpuid(cpuid); } static void acpi_set_pdc_bits(u32 *buf) diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 0d083736e25b..7836e2e980f4 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -79,50 +79,51 @@ static bool acpi_properties_format_valid(const union acpi_object *properties) static void acpi_init_of_compatible(struct acpi_device *adev) { const union acpi_object *of_compatible; - struct acpi_hardware_id *hwid; - bool acpi_of = false; int ret; - /* - * Check if the special PRP0001 ACPI ID is present and in that - * case we fill in Device Tree compatible properties for this - * device. - */ - list_for_each_entry(hwid, &adev->pnp.ids, list) { - if (!strcmp(hwid->id, "PRP0001")) { - acpi_of = true; - break; - } - } - - if (!acpi_of) - return; - ret = acpi_dev_get_property_array(adev, "compatible", ACPI_TYPE_STRING, &of_compatible); if (ret) { ret = acpi_dev_get_property(adev, "compatible", ACPI_TYPE_STRING, &of_compatible); if (ret) { - acpi_handle_warn(adev->handle, - "PRP0001 requires compatible property\n"); + if (adev->parent + && adev->parent->flags.of_compatible_ok) + goto out; + return; } } adev->data.of_compatible = of_compatible; + + out: + adev->flags.of_compatible_ok = 1; } void acpi_init_properties(struct acpi_device *adev) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; + bool acpi_of = false; + struct acpi_hardware_id *hwid; const union acpi_object *desc; acpi_status status; int i; + /* + * Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in + * Device Tree compatible properties for this device. + */ + list_for_each_entry(hwid, &adev->pnp.ids, list) { + if (!strcmp(hwid->id, ACPI_DT_NAMESPACE_HID)) { + acpi_of = true; + break; + } + } + status = acpi_evaluate_object_typed(adev->handle, "_DSD", NULL, &buf, ACPI_TYPE_PACKAGE); if (ACPI_FAILURE(status)) - return; + goto out; desc = buf.pointer; if (desc->package.count % 2) @@ -156,13 +157,20 @@ void acpi_init_properties(struct acpi_device *adev) adev->data.pointer = buf.pointer; adev->data.properties = properties; - acpi_init_of_compatible(adev); - return; + if (acpi_of) + acpi_init_of_compatible(adev); + + goto out; } fail: - dev_warn(&adev->dev, "Returned _DSD data is not valid, skipping\n"); + dev_dbg(&adev->dev, "Returned _DSD data is not valid, skipping\n"); ACPI_FREE(buf.pointer); + + out: + if (acpi_of && !adev->flags.of_compatible_ok) + acpi_handle_info(adev->handle, + ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n"); } void acpi_free_properties(struct acpi_device *adev) diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 8244f013f210..fcb7807ea8b7 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -26,6 +26,7 @@ #include <linux/device.h> #include <linux/export.h> #include <linux/ioport.h> +#include <linux/list.h> #include <linux/slab.h> #ifdef CONFIG_X86 @@ -621,3 +622,162 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares, return (type & types) ? 0 : 1; } EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type); + +struct reserved_region { + struct list_head node; + u64 start; + u64 end; +}; + +static LIST_HEAD(reserved_io_regions); +static LIST_HEAD(reserved_mem_regions); + +static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags, + char *desc) +{ + unsigned int length = end - start + 1; + struct resource *res; + + res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ? + request_region(start, length, desc) : + request_mem_region(start, length, desc); + if (!res) + return -EIO; + + res->flags &= ~flags; + return 0; +} + +static int add_region_before(u64 start, u64 end, u8 space_id, + unsigned long flags, char *desc, + struct list_head *head) +{ + struct reserved_region *reg; + int error; + + reg = kmalloc(sizeof(*reg), GFP_KERNEL); + if (!reg) + return -ENOMEM; + + error = request_range(start, end, space_id, flags, desc); + if (error) + return error; + + reg->start = start; + reg->end = end; + list_add_tail(®->node, head); + return 0; +} + +/** + * acpi_reserve_region - Reserve an I/O or memory region as a system resource. + * @start: Starting address of the region. + * @length: Length of the region. + * @space_id: Identifier of address space to reserve the region from. + * @flags: Resource flags to clear for the region after requesting it. + * @desc: Region description (for messages). + * + * Reserve an I/O or memory region as a system resource to prevent others from + * using it. If the new region overlaps with one of the regions (in the given + * address space) already reserved by this routine, only the non-overlapping + * parts of it will be reserved. + * + * Returned is either 0 (success) or a negative error code indicating a resource + * reservation problem. It is the code of the first encountered error, but the + * routine doesn't abort until it has attempted to request all of the parts of + * the new region that don't overlap with other regions reserved previously. + * + * The resources requested by this routine are never released. + */ +int acpi_reserve_region(u64 start, unsigned int length, u8 space_id, + unsigned long flags, char *desc) +{ + struct list_head *regions; + struct reserved_region *reg; + u64 end = start + length - 1; + int ret = 0, error = 0; + + if (space_id == ACPI_ADR_SPACE_SYSTEM_IO) + regions = &reserved_io_regions; + else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + regions = &reserved_mem_regions; + else + return -EINVAL; + + if (list_empty(regions)) + return add_region_before(start, end, space_id, flags, desc, regions); + + list_for_each_entry(reg, regions, node) + if (reg->start == end + 1) { + /* The new region can be prepended to this one. */ + ret = request_range(start, end, space_id, flags, desc); + if (!ret) + reg->start = start; + + return ret; + } else if (reg->start > end) { + /* No overlap. Add the new region here and get out. */ + return add_region_before(start, end, space_id, flags, + desc, ®->node); + } else if (reg->end == start - 1) { + goto combine; + } else if (reg->end >= start) { + goto overlap; + } + + /* The new region goes after the last existing one. */ + return add_region_before(start, end, space_id, flags, desc, regions); + + overlap: + /* + * The new region overlaps an existing one. + * + * The head part of the new region immediately preceding the existing + * overlapping one can be combined with it right away. + */ + if (reg->start > start) { + error = request_range(start, reg->start - 1, space_id, flags, desc); + if (error) + ret = error; + else + reg->start = start; + } + + combine: + /* + * The new region is adjacent to an existing one. If it extends beyond + * that region all the way to the next one, it is possible to combine + * all three of them. + */ + while (reg->end < end) { + struct reserved_region *next = NULL; + u64 a = reg->end + 1, b = end; + + if (!list_is_last(®->node, regions)) { + next = list_next_entry(reg, node); + if (next->start <= end) + b = next->start - 1; + } + error = request_range(a, b, space_id, flags, desc); + if (!error) { + if (next && next->start == b + 1) { + reg->end = next->end; + list_del(&next->node); + kfree(next); + } else { + reg->end = end; + break; + } + } else if (next) { + if (!ret) + ret = error; + + reg = next; + } else { + break; + } + } + + return ret ? ret : error; +} +EXPORT_SYMBOL_GPL(acpi_reserve_region); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 03141aa4ea95..2649a068671d 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -11,6 +11,7 @@ #include <linux/kthread.h> #include <linux/dmi.h> #include <linux/nls.h> +#include <linux/dma-mapping.h> #include <asm/pgtable.h> @@ -135,12 +136,13 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, struct acpi_hardware_id *id; /* - * Since we skip PRP0001 from the modalias below, 0 should be returned - * if PRP0001 is the only ACPI/PNP ID in the device's list. + * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should + * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the + * device's list. */ count = 0; list_for_each_entry(id, &acpi_dev->pnp.ids, list) - if (strcmp(id->id, "PRP0001")) + if (strcmp(id->id, ACPI_DT_NAMESPACE_HID)) count++; if (!count) @@ -153,7 +155,7 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, size -= len; list_for_each_entry(id, &acpi_dev->pnp.ids, list) { - if (!strcmp(id->id, "PRP0001")) + if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID)) continue; count = snprintf(&modalias[len], size, "%s:", id->id); @@ -177,7 +179,8 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias, * @size: Size of the buffer. * * Expose DT compatible modalias as of:NnameTCcompatible. This function should - * only be called for devices having PRP0001 in their list of ACPI/PNP IDs. + * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of + * ACPI/PNP IDs. */ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, int size) @@ -980,9 +983,9 @@ static void acpi_device_remove_files(struct acpi_device *dev) * @adev: ACPI device object to match. * @of_match_table: List of device IDs to match against. * - * If @dev has an ACPI companion which has the special PRP0001 device ID in its - * list of identifiers and a _DSD object with the "compatible" property, use - * that property to match against the given list of identifiers. + * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of + * identifiers and a _DSD object with the "compatible" property, use that + * property to match against the given list of identifiers. */ static bool acpi_of_match_device(struct acpi_device *adev, const struct of_device_id *of_match_table) @@ -1038,14 +1041,14 @@ static const struct acpi_device_id *__acpi_match_device( return id; /* - * Next, check the special "PRP0001" ID and try to match the + * Next, check ACPI_DT_NAMESPACE_HID and try to match the * "compatible" property if found. * * The id returned by the below is not valid, but the only * caller passing non-NULL of_ids here is only interested in * whether or not the return value is NULL. */ - if (!strcmp("PRP0001", hwid->id) + if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) && acpi_of_match_device(device, of_ids)) return id; } @@ -1671,7 +1674,7 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, static void acpi_wakeup_gpe_init(struct acpi_device *device) { - struct acpi_device_id button_device_ids[] = { + static const struct acpi_device_id button_device_ids[] = { {"PNP0C0C", 0}, {"PNP0C0D", 0}, {"PNP0C0E", 0}, @@ -1766,15 +1769,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) if (acpi_has_method(device->handle, pathname)) ps->flags.explicit_set = 1; - /* - * State is valid if there are means to put the device into it. - * D3hot is only valid if _PR3 present. - */ - if (!list_empty(&ps->resources) - || (ps->flags.explicit_set && state < ACPI_STATE_D3_HOT)) { + /* State is valid if there are means to put the device into it. */ + if (!list_empty(&ps->resources) || ps->flags.explicit_set) ps->flags.valid = 1; - ps->flags.os_accessible = 1; - } ps->power = -1; /* Unknown - driver assigned */ ps->latency = -1; /* Unknown - driver assigned */ @@ -1810,21 +1807,13 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) acpi_bus_init_power_state(device, i); INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); + if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) + device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; - /* Set defaults for D0 and D3 states (always valid) */ + /* Set defaults for D0 and D3hot states (always valid) */ device->power.states[ACPI_STATE_D0].flags.valid = 1; device->power.states[ACPI_STATE_D0].power = 100; - device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; - device->power.states[ACPI_STATE_D3_COLD].power = 0; - - /* Set D3cold's explicit_set flag if _PS3 exists. */ - if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) - device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; - - /* Presence of _PS3 or _PRx means we can put the device into D3 cold */ - if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set || - device->power.flags.power_resources) - device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; + device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; if (acpi_bus_init_power(device)) device->flags.power_manageable = 0; @@ -1947,6 +1936,62 @@ bool acpi_dock_match(acpi_handle handle) return acpi_has_method(handle, "_DCK"); } +static acpi_status +acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, + void **return_value) +{ + long *cap = context; + + if (acpi_has_method(handle, "_BCM") && + acpi_has_method(handle, "_BCL")) { + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight " + "support\n")); + *cap |= ACPI_VIDEO_BACKLIGHT; + if (!acpi_has_method(handle, "_BQC")) + printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " + "cannot determine initial brightness\n"); + /* We have backlight support, no need to scan further */ + return AE_CTRL_TERMINATE; + } + return 0; +} + +/* Returns true if the ACPI object is a video device which can be + * handled by video.ko. + * The device will get a Linux specific CID added in scan.c to + * identify the device as an ACPI graphics device + * Be aware that the graphics device may not be physically present + * Use acpi_video_get_capabilities() to detect general ACPI video + * capabilities of present cards + */ +long acpi_is_video_device(acpi_handle handle) +{ + long video_caps = 0; + + /* Is this device able to support video switching ? */ + if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS")) + video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; + + /* Is this device able to retrieve a video ROM ? */ + if (acpi_has_method(handle, "_ROM")) + video_caps |= ACPI_VIDEO_ROM_AVAILABLE; + + /* Is this device able to configure which video head to be POSTed ? */ + if (acpi_has_method(handle, "_VPO") && + acpi_has_method(handle, "_GPD") && + acpi_has_method(handle, "_SPD")) + video_caps |= ACPI_VIDEO_DEVICE_POSTING; + + /* Only check for backlight functionality if one of the above hit. */ + if (video_caps) + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, + ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL, + &video_caps, NULL); + + return video_caps; +} +EXPORT_SYMBOL(acpi_is_video_device); + const char *acpi_device_hid(struct acpi_device *device) { struct acpi_hardware_id *hid; @@ -2109,6 +2154,39 @@ void acpi_free_pnp_ids(struct acpi_device_pnp *pnp) kfree(pnp->unique_id); } +static void acpi_init_coherency(struct acpi_device *adev) +{ + unsigned long long cca = 0; + acpi_status status; + struct acpi_device *parent = adev->parent; + + if (parent && parent->flags.cca_seen) { + /* + * From ACPI spec, OSPM will ignore _CCA if an ancestor + * already saw one. + */ + adev->flags.cca_seen = 1; + cca = parent->flags.coherent_dma; + } else { + status = acpi_evaluate_integer(adev->handle, "_CCA", + NULL, &cca); + if (ACPI_SUCCESS(status)) + adev->flags.cca_seen = 1; + else if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED)) + /* + * If architecture does not specify that _CCA is + * required for DMA-able devices (e.g. x86), + * we default to _CCA=1. + */ + cca = 1; + else + acpi_handle_debug(adev->handle, + "ACPI device is missing _CCA.\n"); + } + + adev->flags.coherent_dma = cca; +} + void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, int type, unsigned long long sta) { @@ -2127,6 +2205,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device->flags.visited = false; device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); + acpi_init_coherency(device); } void acpi_device_add_finalize(struct acpi_device *device) @@ -2405,7 +2484,7 @@ static void acpi_default_enumeration(struct acpi_device *device) } static const struct acpi_device_id generic_device_ids[] = { - {"PRP0001", }, + {ACPI_DT_NAMESPACE_HID, }, {"", }, }; @@ -2413,8 +2492,8 @@ static int acpi_generic_device_attach(struct acpi_device *adev, const struct acpi_device_id *not_used) { /* - * Since PRP0001 is the only ID handled here, the test below can be - * unconditional. + * Since ACPI_DT_NAMESPACE_HID is the only ID handled here, the test + * below can be unconditional. */ if (adev->data.of_compatible) acpi_default_enumeration(adev); diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index cd49a3982b6a..67c548ad3764 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -712,3 +712,18 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs) return false; } EXPORT_SYMBOL(acpi_check_dsm); + +/* + * acpi_backlight= handling, this is done here rather then in video_detect.c + * because __setup cannot be used in modules. + */ +char acpi_video_backlight_string[16]; +EXPORT_SYMBOL(acpi_video_backlight_string); + +static int __init acpi_backlight(char *str) +{ + strlcpy(acpi_video_backlight_string, str, + sizeof(acpi_video_backlight_string)); + return 1; +} +__setup("acpi_backlight=", acpi_backlight); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index c42feb2bacd0..815f75ef2411 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -1,106 +1,61 @@ /* + * Copyright (C) 2015 Red Hat Inc. + * Hans de Goede <hdegoede@redhat.com> * Copyright (C) 2008 SuSE Linux Products GmbH * Thomas Renninger <trenn@suse.de> * * May be copied or modified under the terms of the GNU General Public License * * video_detect.c: - * Provides acpi_is_video_device() for early scanning of ACPI devices in scan.c - * There a Linux specific (Spec does not provide a HID for video devices) is - * assigned - * * After PCI devices are glued with ACPI devices * acpi_get_pci_dev() can be called to identify ACPI graphics * devices for which a real graphics card is plugged in * - * Now acpi_video_get_capabilities() can be called to check which - * capabilities the graphics cards plugged in support. The check for general - * video capabilities will be triggered by the first caller of - * acpi_video_get_capabilities(NULL); which will happen when the first - * backlight switching supporting driver calls: - * acpi_video_backlight_support(); - * * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B) * are available, video.ko should be used to handle the device. * * Otherwise vendor specific drivers like thinkpad_acpi, asus-laptop, * sony_acpi,... can take care about backlight brightness. * - * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) - * this file will not be compiled, acpi_video_get_capabilities() and - * acpi_video_backlight_support() will always return 0 and vendor specific - * drivers always can handle backlight. + * Backlight drivers can use acpi_video_get_backlight_type() to determine + * which driver should handle the backlight. * + * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) + * this file will not be compiled and acpi_video_get_backlight_type() will + * always return acpi_backlight_vendor. */ #include <linux/export.h> #include <linux/acpi.h> +#include <linux/backlight.h> #include <linux/dmi.h> +#include <linux/module.h> #include <linux/pci.h> - -#include "internal.h" +#include <linux/types.h> +#include <acpi/video.h> ACPI_MODULE_NAME("video"); #define _COMPONENT ACPI_VIDEO_COMPONENT -static long acpi_video_support; -static bool acpi_video_caps_checked; +void acpi_video_unregister_backlight(void); -static acpi_status -acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, - void **return_value) -{ - long *cap = context; +static bool backlight_notifier_registered; +static struct notifier_block backlight_nb; - if (acpi_has_method(handle, "_BCM") && - acpi_has_method(handle, "_BCL")) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight " - "support\n")); - *cap |= ACPI_VIDEO_BACKLIGHT; - if (!acpi_has_method(handle, "_BQC")) - printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " - "cannot determine initial brightness\n"); - /* We have backlight support, no need to scan further */ - return AE_CTRL_TERMINATE; - } - return 0; -} +static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; +static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef; -/* Returns true if the ACPI object is a video device which can be - * handled by video.ko. - * The device will get a Linux specific CID added in scan.c to - * identify the device as an ACPI graphics device - * Be aware that the graphics device may not be physically present - * Use acpi_video_get_capabilities() to detect general ACPI video - * capabilities of present cards - */ -long acpi_is_video_device(acpi_handle handle) +static void acpi_video_parse_cmdline(void) { - long video_caps = 0; - - /* Is this device able to support video switching ? */ - if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS")) - video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; - - /* Is this device able to retrieve a video ROM ? */ - if (acpi_has_method(handle, "_ROM")) - video_caps |= ACPI_VIDEO_ROM_AVAILABLE; - - /* Is this device able to configure which video head to be POSTed ? */ - if (acpi_has_method(handle, "_VPO") && - acpi_has_method(handle, "_GPD") && - acpi_has_method(handle, "_SPD")) - video_caps |= ACPI_VIDEO_DEVICE_POSTING; - - /* Only check for backlight functionality if one of the above hit. */ - if (video_caps) - acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, - ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL, - &video_caps, NULL); - - return video_caps; + if (!strcmp("vendor", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_vendor; + if (!strcmp("video", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_video; + if (!strcmp("native", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_native; + if (!strcmp("none", acpi_video_backlight_string)) + acpi_backlight_cmdline = acpi_backlight_none; } -EXPORT_SYMBOL(acpi_is_video_device); static acpi_status find_video(acpi_handle handle, u32 lvl, void *context, void **rv) @@ -109,7 +64,7 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv) struct pci_dev *dev; struct acpi_device *acpi_dev; - const struct acpi_device_id video_ids[] = { + static const struct acpi_device_id video_ids[] = { {ACPI_VIDEO_HID, 0}, {"", 0}, }; @@ -130,11 +85,23 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv) * buggy */ static int video_detect_force_vendor(const struct dmi_system_id *d) { - acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; + acpi_backlight_dmi = acpi_backlight_vendor; + return 0; +} + +static int video_detect_force_video(const struct dmi_system_id *d) +{ + acpi_backlight_dmi = acpi_backlight_video; return 0; } -static struct dmi_system_id video_detect_dmi_table[] = { +static int video_detect_force_native(const struct dmi_system_id *d) +{ + acpi_backlight_dmi = acpi_backlight_native; + return 0; +} + +static const struct dmi_system_id video_detect_dmi_table[] = { /* On Samsung X360, the BIOS will set a flag (VDRV) if generic * ACPI backlight device is used. This flag will definitively break * the backlight interface (even the vendor interface) untill next @@ -174,137 +141,209 @@ static struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), }, }, + + /* + * These models have a working acpi_video backlight control, and using + * native backlight causes a regression where backlight does not work + * when userspace is not handling brightness key events. Disable + * native_backlight on these to fix this: + * https://bugzilla.kernel.org/show_bug.cgi?id=81691 + */ + { + .callback = video_detect_force_video, + .ident = "ThinkPad T420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"), + }, + }, + { + .callback = video_detect_force_video, + .ident = "ThinkPad T520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), + }, + }, + { + .callback = video_detect_force_video, + .ident = "ThinkPad X201s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"), + }, + }, + + /* The native backlight controls do not work on some older machines */ + { + /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */ + .callback = video_detect_force_video, + .ident = "HP ENVY 15 Notebook", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), + }, + }, + { + .callback = video_detect_force_video, + .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"), + }, + }, + { + .callback = video_detect_force_video, + .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, + "370R4E/370R4V/370R5E/3570RE/370R5V"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */ + .callback = video_detect_force_video, + .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, + "3570R/370R/470R/450R/510R/4450RV"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */ + .callback = video_detect_force_video, + .ident = "SAMSUNG 730U3E/740U3E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"), + }, + }, + { + /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */ + .callback = video_detect_force_video, + .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, + "900X3C/900X3D/900X3E/900X4C/900X4D"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ + .callback = video_detect_force_video, + .ident = "Dell XPS15 L521X", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), + }, + }, + + /* Non win8 machines which need native backlight nevertheless */ + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */ + .callback = video_detect_force_native, + .ident = "Lenovo Ideapad Z570", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "102434U"), + }, + }, + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ + .callback = video_detect_force_native, + .ident = "Apple MacBook Pro 12,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro12,1"), + }, + }, { }, }; +static int acpi_video_backlight_notify(struct notifier_block *nb, + unsigned long val, void *bd) +{ + struct backlight_device *backlight = bd; + + /* A raw bl registering may change video -> native */ + if (backlight->props.type == BACKLIGHT_RAW && + val == BACKLIGHT_REGISTERED && + acpi_video_get_backlight_type() != acpi_backlight_video) + acpi_video_unregister_backlight(); + + return NOTIFY_OK; +} + /* - * Returns the video capabilities of a specific ACPI graphics device + * Determine which type of backlight interface to use on this system, + * First check cmdline, then dmi quirks, then do autodetect. * - * if NULL is passed as argument all ACPI devices are enumerated and - * all graphics capabilities of physically present devices are - * summarized and returned. This is cached and done only once. + * The autodetect order is: + * 1) Is the acpi-video backlight interface supported -> + * no, use a vendor interface + * 2) Is this a win8 "ready" BIOS and do we have a native interface -> + * yes, use a native interface + * 3) Else use the acpi-video interface + * + * Arguably the native on win8 check should be done first, but that would + * be a behavior change, which may causes issues. */ -long acpi_video_get_capabilities(acpi_handle graphics_handle) +enum acpi_backlight_type acpi_video_get_backlight_type(void) { - long caps = 0; - struct acpi_device *tmp_dev; - acpi_status status; - - if (acpi_video_caps_checked && graphics_handle == NULL) - return acpi_video_support; - - if (!graphics_handle) { - /* Only do the global walk through all graphics devices once */ - acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, find_video, NULL, - &caps, NULL); - /* There might be boot param flags set already... */ - acpi_video_support |= caps; - acpi_video_caps_checked = 1; - /* Add blacklists here. Be careful to use the right *DMI* bits - * to still be able to override logic via boot params, e.g.: - * - * if (dmi_name_in_vendors("XY")) { - * acpi_video_support |= - * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; - *} - */ + static DEFINE_MUTEX(init_mutex); + static bool init_done; + static long video_caps; + /* Parse cmdline, dmi and acpi only once */ + mutex_lock(&init_mutex); + if (!init_done) { + acpi_video_parse_cmdline(); dmi_check_system(video_detect_dmi_table); - } else { - status = acpi_bus_get_device(graphics_handle, &tmp_dev); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "Invalid device")); - return 0; - } - acpi_walk_namespace(ACPI_TYPE_DEVICE, graphics_handle, + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, find_video, NULL, - &caps, NULL); + &video_caps, NULL); + backlight_nb.notifier_call = acpi_video_backlight_notify; + backlight_nb.priority = 0; + if (backlight_register_notifier(&backlight_nb) == 0) + backlight_notifier_registered = true; + init_done = true; } - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "We have 0x%lX video support %s %s\n", - graphics_handle ? caps : acpi_video_support, - graphics_handle ? "on device " : "in general", - graphics_handle ? acpi_device_bid(tmp_dev) : "")); - return caps; -} -EXPORT_SYMBOL(acpi_video_get_capabilities); + mutex_unlock(&init_mutex); -static void acpi_video_caps_check(void) -{ - /* - * We must check whether the ACPI graphics device is physically plugged - * in. Therefore this must be called after binding PCI and ACPI devices - */ - if (!acpi_video_caps_checked) - acpi_video_get_capabilities(NULL); -} + if (acpi_backlight_cmdline != acpi_backlight_undef) + return acpi_backlight_cmdline; -bool acpi_osi_is_win8(void) -{ - return acpi_gbl_osi_data >= ACPI_OSI_WIN_8; -} -EXPORT_SYMBOL(acpi_osi_is_win8); + if (acpi_backlight_dmi != acpi_backlight_undef) + return acpi_backlight_dmi; -/* Promote the vendor interface instead of the generic video module. - * This function allow DMI blacklists to be implemented by externals - * platform drivers instead of putting a big blacklist in video_detect.c - * After calling this function you will probably want to call - * acpi_video_unregister() to make sure the video module is not loaded - */ -void acpi_video_dmi_promote_vendor(void) -{ - acpi_video_caps_check(); - acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; -} -EXPORT_SYMBOL(acpi_video_dmi_promote_vendor); - -/* To be called when a driver who previously promoted the vendor - * interface */ -void acpi_video_dmi_demote_vendor(void) -{ - acpi_video_caps_check(); - acpi_video_support &= ~ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; -} -EXPORT_SYMBOL(acpi_video_dmi_demote_vendor); - -/* Returns true if video.ko can do backlight switching */ -int acpi_video_backlight_support(void) -{ - acpi_video_caps_check(); - - /* First check for boot param -> highest prio */ - if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR) - return 0; - else if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO) - return 1; + if (!(video_caps & ACPI_VIDEO_BACKLIGHT)) + return acpi_backlight_vendor; - /* Then check for DMI blacklist -> second highest prio */ - if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_DMI_VENDOR) - return 0; - else if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_DMI_VIDEO) - return 1; + if (acpi_osi_is_win8() && backlight_device_registered(BACKLIGHT_RAW)) + return acpi_backlight_native; - /* Then go the default way */ - return acpi_video_support & ACPI_VIDEO_BACKLIGHT; + return acpi_backlight_video; } -EXPORT_SYMBOL(acpi_video_backlight_support); +EXPORT_SYMBOL(acpi_video_get_backlight_type); /* - * Use acpi_backlight=vendor/video to force that backlight switching - * is processed by vendor specific acpi drivers or video.ko driver. + * Set the preferred backlight interface type based on DMI info. + * This function allows DMI blacklists to be implemented by external + * platform drivers instead of putting a big blacklist in video_detect.c */ -static int __init acpi_backlight(char *str) +void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) { - if (str == NULL || *str == '\0') - return 1; - else { - if (!strcmp("vendor", str)) - acpi_video_support |= - ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; - if (!strcmp("video", str)) - acpi_video_support |= - ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO; - } - return 1; + acpi_backlight_dmi = type; + /* Remove acpi-video backlight interface if it is no longer desired */ + if (acpi_video_get_backlight_type() != acpi_backlight_video) + acpi_video_unregister_backlight(); +} +EXPORT_SYMBOL(acpi_video_set_dmi_backlight_type); + +void __exit acpi_video_detect_exit(void) +{ + if (backlight_notifier_registered) + backlight_unregister_notifier(&backlight_nb); } -__setup("acpi_backlight=", acpi_backlight); diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 1cb8544598d5..f94a6ccfe787 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o +obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 7fdd0172605a..acef9f9f759a 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -15,6 +15,7 @@ #include <linux/clkdev.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/pm_runtime.h> #ifdef CONFIG_PM @@ -67,7 +68,8 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) } else { clk_prepare(ce->clk); ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); + dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", + ce->clk, ce->con_id); } } @@ -93,7 +95,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id, return -ENOMEM; } } else { - if (IS_ERR(ce->clk) || !__clk_get(clk)) { + if (IS_ERR(clk) || !__clk_get(clk)) { kfree(ce); return -ENOENT; } @@ -367,6 +369,43 @@ static int pm_clk_notify(struct notifier_block *nb, return 0; } +int pm_clk_runtime_suspend(struct device *dev) +{ + int ret; + + dev_dbg(dev, "%s\n", __func__); + + ret = pm_generic_runtime_suspend(dev); + if (ret) { + dev_err(dev, "failed to suspend device\n"); + return ret; + } + + ret = pm_clk_suspend(dev); + if (ret) { + dev_err(dev, "failed to suspend clock\n"); + pm_generic_runtime_resume(dev); + return ret; + } + + return 0; +} + +int pm_clk_runtime_resume(struct device *dev) +{ + int ret; + + dev_dbg(dev, "%s\n", __func__); + + ret = pm_clk_resume(dev); + if (ret) { + dev_err(dev, "failed to resume clock\n"); + return ret; + } + + return pm_generic_runtime_resume(dev); +} + #else /* !CONFIG_PM */ /** diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 2327613d4539..cdd547bd67df 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -181,7 +181,7 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) genpd->cpuidle_data->idle_state->exit_latency = usecs64; } -static int genpd_power_on(struct generic_pm_domain *genpd) +static int genpd_power_on(struct generic_pm_domain *genpd, bool timed) { ktime_t time_start; s64 elapsed_ns; @@ -190,6 +190,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd) if (!genpd->power_on) return 0; + if (!timed) + return genpd->power_on(genpd); + time_start = ktime_get(); ret = genpd->power_on(genpd); if (ret) @@ -208,7 +211,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd) return ret; } -static int genpd_power_off(struct generic_pm_domain *genpd) +static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) { ktime_t time_start; s64 elapsed_ns; @@ -217,6 +220,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd) if (!genpd->power_off) return 0; + if (!timed) + return genpd->power_off(genpd); + time_start = ktime_get(); ret = genpd->power_off(genpd); if (ret == -EBUSY) @@ -305,7 +311,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) } } - ret = genpd_power_on(genpd); + ret = genpd_power_on(genpd, true); if (ret) goto err; @@ -615,7 +621,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) * the pm_genpd_poweron() restore power for us (this shouldn't * happen very often). */ - ret = genpd_power_off(genpd); + ret = genpd_power_off(genpd, true); if (ret == -EBUSY) { genpd_set_active(genpd); goto out; @@ -827,6 +833,7 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, /** * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. + * @timed: True if latency measurements are allowed. * * Check if the given PM domain can be powered off (during system suspend or * hibernation) and do that if so. Also, in that case propagate to its masters. @@ -836,7 +843,8 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, * executed sequentially, so it is guaranteed that it will never run twice in * parallel). */ -static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) +static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd, + bool timed) { struct gpd_link *link; @@ -847,26 +855,28 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) || atomic_read(&genpd->sd_count) > 0) return; - genpd_power_off(genpd); + genpd_power_off(genpd, timed); genpd->status = GPD_STATE_POWER_OFF; list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); - pm_genpd_sync_poweroff(link->master); + pm_genpd_sync_poweroff(link->master, timed); } } /** * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. * @genpd: PM domain to power on. + * @timed: True if latency measurements are allowed. * * This function is only called in "noirq" and "syscore" stages of system power * transitions, so it need not acquire locks (all of the "noirq" callbacks are * executed sequentially, so it is guaranteed that it will never run twice in * parallel). */ -static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) +static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, + bool timed) { struct gpd_link *link; @@ -874,11 +884,11 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) return; list_for_each_entry(link, &genpd->slave_links, slave_node) { - pm_genpd_sync_poweron(link->master); + pm_genpd_sync_poweron(link->master, timed); genpd_sd_counter_inc(link->master); } - genpd_power_on(genpd); + genpd_power_on(genpd, timed); genpd->status = GPD_STATE_ACTIVE; } @@ -1056,7 +1066,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) * the same PM domain, so it is not necessary to use locking here. */ genpd->suspended_count++; - pm_genpd_sync_poweroff(genpd); + pm_genpd_sync_poweroff(genpd, true); return 0; } @@ -1086,7 +1096,7 @@ static int pm_genpd_resume_noirq(struct device *dev) * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ - pm_genpd_sync_poweron(genpd); + pm_genpd_sync_poweron(genpd, true); genpd->suspended_count--; return genpd_start_dev(genpd, dev); @@ -1300,7 +1310,7 @@ static int pm_genpd_restore_noirq(struct device *dev) * If the domain was off before the hibernation, make * sure it will be off going forward. */ - genpd_power_off(genpd); + genpd_power_off(genpd, true); return 0; } @@ -1309,7 +1319,7 @@ static int pm_genpd_restore_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - pm_genpd_sync_poweron(genpd); + pm_genpd_sync_poweron(genpd, true); return genpd_start_dev(genpd, dev); } @@ -1367,9 +1377,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend) if (suspend) { genpd->suspended_count++; - pm_genpd_sync_poweroff(genpd); + pm_genpd_sync_poweroff(genpd, false); } else { - pm_genpd_sync_poweron(genpd); + pm_genpd_sync_poweron(genpd, false); genpd->suspended_count--; } } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 3d874eca7104..30b7bbfdc558 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -24,6 +24,7 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/pm-trace.h> +#include <linux/pm_wakeirq.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/async.h> @@ -587,6 +588,7 @@ void dpm_resume_noirq(pm_message_t state) async_synchronize_full(); dpm_show_time(starttime, state, "noirq"); resume_device_irqs(); + device_wakeup_disarm_wake_irqs(); cpuidle_resume(); trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); } @@ -920,9 +922,7 @@ static void device_complete(struct device *dev, pm_message_t state) if (callback) { pm_dev_dbg(dev, state, info); - trace_device_pm_callback_start(dev, info, state.event); callback(dev); - trace_device_pm_callback_end(dev, 0); } device_unlock(dev); @@ -954,7 +954,9 @@ void dpm_complete(pm_message_t state) list_move(&dev->power.entry, &list); mutex_unlock(&dpm_list_mtx); + trace_device_pm_callback_start(dev, "", state.event); device_complete(dev, state); + trace_device_pm_callback_end(dev, 0); mutex_lock(&dpm_list_mtx); put_device(dev); @@ -1104,6 +1106,7 @@ int dpm_suspend_noirq(pm_message_t state) trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); cpuidle_pause(); + device_wakeup_arm_wake_irqs(); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); pm_transition = state; @@ -1585,11 +1588,8 @@ static int device_prepare(struct device *dev, pm_message_t state) callback = dev->driver->pm->prepare; } - if (callback) { - trace_device_pm_callback_start(dev, info, state.event); + if (callback) ret = callback(dev); - trace_device_pm_callback_end(dev, ret); - } device_unlock(dev); @@ -1631,7 +1631,9 @@ int dpm_prepare(pm_message_t state) get_device(dev); mutex_unlock(&dpm_list_mtx); + trace_device_pm_callback_start(dev, "", state.event); error = device_prepare(dev, state); + trace_device_pm_callback_end(dev, error); mutex_lock(&dpm_list_mtx); if (error) { diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index b6b8a273c5da..f1a5d95e7b20 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -20,6 +20,46 @@ static inline void pm_runtime_early_init(struct device *dev) extern void pm_runtime_init(struct device *dev); extern void pm_runtime_remove(struct device *dev); +struct wake_irq { + struct device *dev; + int irq; + bool dedicated_irq:1; +}; + +extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); +extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); + +#ifdef CONFIG_PM_SLEEP + +extern int device_wakeup_attach_irq(struct device *dev, + struct wake_irq *wakeirq); +extern void device_wakeup_detach_irq(struct device *dev); +extern void device_wakeup_arm_wake_irqs(void); +extern void device_wakeup_disarm_wake_irqs(void); + +#else + +static inline int +device_wakeup_attach_irq(struct device *dev, + struct wake_irq *wakeirq) +{ + return 0; +} + +static inline void device_wakeup_detach_irq(struct device *dev) +{ +} + +static inline void device_wakeup_arm_wake_irqs(void) +{ +} + +static inline void device_wakeup_disarm_wake_irqs(void) +{ +} + +#endif /* CONFIG_PM_SLEEP */ + /* * sysfs.c */ @@ -52,6 +92,14 @@ static inline void wakeup_sysfs_remove(struct device *dev) {} static inline int pm_qos_sysfs_add(struct device *dev) { return 0; } static inline void pm_qos_sysfs_remove(struct device *dev) {} +static inline void dev_pm_arm_wake_irq(struct wake_irq *wirq) +{ +} + +static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq) +{ +} + #endif #ifdef CONFIG_PM_SLEEP diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 5070c4fe8542..e1a10a03df8e 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -10,6 +10,7 @@ #include <linux/sched.h> #include <linux/export.h> #include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> #include <trace/events/rpm.h> #include "power.h" @@ -514,6 +515,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_suspend); + dev_pm_enable_wake_irq(dev); retval = rpm_callback(callback, dev); if (retval) goto fail; @@ -552,6 +554,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) return retval; fail: + dev_pm_disable_wake_irq(dev); __update_runtime_status(dev, RPM_ACTIVE); dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); @@ -734,13 +737,16 @@ static int rpm_resume(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_resume); + dev_pm_disable_wake_irq(dev); retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_cancel_pending(dev); + dev_pm_enable_wake_irq(dev); } else { no_callback: __update_runtime_status(dev, RPM_ACTIVE); + pm_runtime_mark_last_busy(dev); if (parent) atomic_inc(&parent->power.child_count); } diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c new file mode 100644 index 000000000000..7470004ca810 --- /dev/null +++ b/drivers/base/power/wakeirq.c @@ -0,0 +1,273 @@ +/* + * wakeirq.c - Device wakeirq helper functions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> + +#include "power.h" + +/** + * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ + * @dev: Device entry + * @irq: Device wake-up capable interrupt + * @wirq: Wake irq specific data + * + * Internal function to attach either a device IO interrupt or a + * dedicated wake-up interrupt as a wake IRQ. + */ +static int dev_pm_attach_wake_irq(struct device *dev, int irq, + struct wake_irq *wirq) +{ + unsigned long flags; + int err; + + if (!dev || !wirq) + return -EINVAL; + + spin_lock_irqsave(&dev->power.lock, flags); + if (dev_WARN_ONCE(dev, dev->power.wakeirq, + "wake irq already initialized\n")) { + spin_unlock_irqrestore(&dev->power.lock, flags); + return -EEXIST; + } + + dev->power.wakeirq = wirq; + spin_unlock_irqrestore(&dev->power.lock, flags); + + err = device_wakeup_attach_irq(dev, wirq); + if (err) + return err; + + return 0; +} + +/** + * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ + * @dev: Device entry + * @irq: Device IO interrupt + * + * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets + * automatically configured for wake-up from suspend based + * on the device specific sysfs wakeup entry. Typically called + * during driver probe after calling device_init_wakeup(). + */ +int dev_pm_set_wake_irq(struct device *dev, int irq) +{ + struct wake_irq *wirq; + int err; + + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); + if (!wirq) + return -ENOMEM; + + wirq->dev = dev; + wirq->irq = irq; + + err = dev_pm_attach_wake_irq(dev, irq, wirq); + if (err) + kfree(wirq); + + return err; +} +EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq); + +/** + * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ + * @dev: Device entry + * + * Detach a device wake IRQ and free resources. + * + * Note that it's OK for drivers to call this without calling + * dev_pm_set_wake_irq() as all the driver instances may not have + * a wake IRQ configured. This avoid adding wake IRQ specific + * checks into the drivers. + */ +void dev_pm_clear_wake_irq(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + unsigned long flags; + + if (!wirq) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + dev->power.wakeirq = NULL; + spin_unlock_irqrestore(&dev->power.lock, flags); + + device_wakeup_detach_irq(dev); + if (wirq->dedicated_irq) + free_irq(wirq->irq, wirq); + kfree(wirq); +} +EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq); + +/** + * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts + * @irq: Device specific dedicated wake-up interrupt + * @_wirq: Wake IRQ data + * + * Some devices have a separate wake-up interrupt in addition to the + * device IO interrupt. The wake-up interrupt signals that a device + * should be woken up from it's idle state. This handler uses device + * specific pm_runtime functions to wake the device, and then it's + * up to the device to do whatever it needs to. Note that as the + * device may need to restore context and start up regulators, we + * use a threaded IRQ. + * + * Also note that we are not resending the lost device interrupts. + * We assume that the wake-up interrupt just needs to wake-up the + * device, and then device's pm_runtime_resume() can deal with the + * situation. + */ +static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) +{ + struct wake_irq *wirq = _wirq; + int res; + + /* We don't want RPM_ASYNC or RPM_NOWAIT here */ + res = pm_runtime_resume(wirq->dev); + if (res < 0) + dev_warn(wirq->dev, + "wake IRQ with no resume: %i\n", res); + + return IRQ_HANDLED; +} + +/** + * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has + * a dedicated wake-up interrupt in addition to the device IO + * interrupt. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq() + * functions. + */ +int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +{ + struct wake_irq *wirq; + int err; + + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); + if (!wirq) + return -ENOMEM; + + wirq->dev = dev; + wirq->irq = irq; + wirq->dedicated_irq = true; + irq_set_status_flags(irq, IRQ_NOAUTOEN); + + /* + * Consumer device may need to power up and restore state + * so we use a threaded irq. + */ + err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq, + IRQF_ONESHOT, dev_name(dev), wirq); + if (err) + goto err_free; + + err = dev_pm_attach_wake_irq(dev, irq, wirq); + if (err) + goto err_free_irq; + + return err; + +err_free_irq: + free_irq(irq, wirq); +err_free: + kfree(wirq); + + return err; +} +EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); + +/** + * dev_pm_enable_wake_irq - Enable device wake-up interrupt + * @dev: Device + * + * Called from the bus code or the device driver for + * runtime_suspend() to enable the wake-up interrupt while + * the device is running. + * + * Note that for runtime_suspend()) the wake-up interrupts + * should be unconditionally enabled unlike for suspend() + * that is conditional. + */ +void dev_pm_enable_wake_irq(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (wirq && wirq->dedicated_irq) + enable_irq(wirq->irq); +} +EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); + +/** + * dev_pm_disable_wake_irq - Disable device wake-up interrupt + * @dev: Device + * + * Called from the bus code or the device driver for + * runtime_resume() to disable the wake-up interrupt while + * the device is running. + */ +void dev_pm_disable_wake_irq(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (wirq && wirq->dedicated_irq) + disable_irq_nosync(wirq->irq); +} +EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); + +/** + * dev_pm_arm_wake_irq - Arm device wake-up + * @wirq: Device wake-up interrupt + * + * Sets up the wake-up event conditionally based on the + * device_may_wake(). + */ +void dev_pm_arm_wake_irq(struct wake_irq *wirq) +{ + if (!wirq) + return; + + if (device_may_wakeup(wirq->dev)) + enable_irq_wake(wirq->irq); +} + +/** + * dev_pm_disarm_wake_irq - Disarm device wake-up + * @wirq: Device wake-up interrupt + * + * Clears up the wake-up event conditionally based on the + * device_may_wake(). + */ +void dev_pm_disarm_wake_irq(struct wake_irq *wirq) +{ + if (!wirq) + return; + + if (device_may_wakeup(wirq->dev)) + disable_irq_wake(wirq->irq); +} diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 77262009f89d..40f71603378c 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -14,6 +14,7 @@ #include <linux/suspend.h> #include <linux/seq_file.h> #include <linux/debugfs.h> +#include <linux/pm_wakeirq.h> #include <trace/events/power.h> #include "power.h" @@ -56,6 +57,11 @@ static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); +static struct wakeup_source deleted_ws = { + .name = "deleted", + .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), +}; + /** * wakeup_source_prepare - Prepare a new wakeup source for initialization. * @ws: Wakeup source to prepare. @@ -107,6 +113,34 @@ void wakeup_source_drop(struct wakeup_source *ws) } EXPORT_SYMBOL_GPL(wakeup_source_drop); +/* + * Record wakeup_source statistics being deleted into a dummy wakeup_source. + */ +static void wakeup_source_record(struct wakeup_source *ws) +{ + unsigned long flags; + + spin_lock_irqsave(&deleted_ws.lock, flags); + + if (ws->event_count) { + deleted_ws.total_time = + ktime_add(deleted_ws.total_time, ws->total_time); + deleted_ws.prevent_sleep_time = + ktime_add(deleted_ws.prevent_sleep_time, + ws->prevent_sleep_time); + deleted_ws.max_time = + ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ? + deleted_ws.max_time : ws->max_time; + deleted_ws.event_count += ws->event_count; + deleted_ws.active_count += ws->active_count; + deleted_ws.relax_count += ws->relax_count; + deleted_ws.expire_count += ws->expire_count; + deleted_ws.wakeup_count += ws->wakeup_count; + } + + spin_unlock_irqrestore(&deleted_ws.lock, flags); +} + /** * wakeup_source_destroy - Destroy a struct wakeup_source object. * @ws: Wakeup source to destroy. @@ -119,6 +153,7 @@ void wakeup_source_destroy(struct wakeup_source *ws) return; wakeup_source_drop(ws); + wakeup_source_record(ws); kfree(ws->name); kfree(ws); } @@ -239,6 +274,97 @@ int device_wakeup_enable(struct device *dev) EXPORT_SYMBOL_GPL(device_wakeup_enable); /** + * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source + * @dev: Device to handle + * @wakeirq: Device specific wakeirq entry + * + * Attach a device wakeirq to the wakeup source so the device + * wake IRQ can be configured automatically for suspend and + * resume. + */ +int device_wakeup_attach_irq(struct device *dev, + struct wake_irq *wakeirq) +{ + struct wakeup_source *ws; + int ret = 0; + + spin_lock_irq(&dev->power.lock); + ws = dev->power.wakeup; + if (!ws) { + dev_err(dev, "forgot to call call device_init_wakeup?\n"); + ret = -EINVAL; + goto unlock; + } + + if (ws->wakeirq) { + ret = -EEXIST; + goto unlock; + } + + ws->wakeirq = wakeirq; + +unlock: + spin_unlock_irq(&dev->power.lock); + + return ret; +} + +/** + * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source + * @dev: Device to handle + * + * Removes a device wakeirq from the wakeup source. + */ +void device_wakeup_detach_irq(struct device *dev) +{ + struct wakeup_source *ws; + + spin_lock_irq(&dev->power.lock); + ws = dev->power.wakeup; + if (!ws) + goto unlock; + + ws->wakeirq = NULL; + +unlock: + spin_unlock_irq(&dev->power.lock); +} + +/** + * device_wakeup_arm_wake_irqs(void) + * + * Itereates over the list of device wakeirqs to arm them. + */ +void device_wakeup_arm_wake_irqs(void) +{ + struct wakeup_source *ws; + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->wakeirq) + dev_pm_arm_wake_irq(ws->wakeirq); + } + rcu_read_unlock(); +} + +/** + * device_wakeup_disarm_wake_irqs(void) + * + * Itereates over the list of device wakeirqs to disarm them. + */ +void device_wakeup_disarm_wake_irqs(void) +{ + struct wakeup_source *ws; + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->wakeirq) + dev_pm_disarm_wake_irq(ws->wakeirq); + } + rcu_read_unlock(); +} + +/** * device_wakeup_detach - Detach a device's wakeup source object from it. * @dev: Device to detach the wakeup source object from. * @@ -351,6 +477,20 @@ int device_set_wakeup_enable(struct device *dev, bool enable) } EXPORT_SYMBOL_GPL(device_set_wakeup_enable); +/** + * wakeup_source_not_registered - validate the given wakeup source. + * @ws: Wakeup source to be validated. + */ +static bool wakeup_source_not_registered(struct wakeup_source *ws) +{ + /* + * Use timer struct to check if the given source is initialized + * by wakeup_source_add. + */ + return ws->timer.function != pm_wakeup_timer_fn || + ws->timer.data != (unsigned long)ws; +} + /* * The functions below use the observation that each wakeup event starts a * period in which the system should not be suspended. The moment this period @@ -391,6 +531,10 @@ static void wakeup_source_activate(struct wakeup_source *ws) { unsigned int cec; + if (WARN_ONCE(wakeup_source_not_registered(ws), + "unregistered wakeup source\n")) + return; + /* * active wakeup source should bring the system * out of PM_SUSPEND_FREEZE state @@ -894,6 +1038,8 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused) print_wakeup_source_stats(m, ws); rcu_read_unlock(); + print_wakeup_source_stats(m, &deleted_ws); + return 0; } diff --git a/drivers/base/property.c b/drivers/base/property.c index 1d0b116cae95..e645852396ba 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -14,6 +14,7 @@ #include <linux/export.h> #include <linux/kernel.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/property.h> /** @@ -519,3 +520,16 @@ unsigned int device_get_child_node_count(struct device *dev) return count; } EXPORT_SYMBOL_GPL(device_get_child_node_count); + +bool device_dma_is_coherent(struct device *dev) +{ + bool coherent = false; + + if (IS_ENABLED(CONFIG_OF) && dev->of_node) + coherent = of_dma_is_coherent(dev->of_node); + else + acpi_check_dma(ACPI_COMPANION(dev), &coherent); + + return coherent; +} +EXPORT_SYMBOL_GPL(device_dma_is_coherent); diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 6491f45200a7..8b7d7f8e5851 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -61,7 +61,7 @@ static DEVICE_ATTR_RO(physical_package_id); define_id_show_func(core_id); static DEVICE_ATTR_RO(core_id); -define_siblings_show_func(thread_siblings, thread_cpumask); +define_siblings_show_func(thread_siblings, sibling_cpumask); static DEVICE_ATTR_RO(thread_siblings); static DEVICE_ATTR_RO(thread_siblings_list); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index ff20f192b0f6..0422c47261c3 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -139,8 +139,6 @@ static struct board_type products[] = { {0x3214103C, "Smart Array E200i", &SA5_access}, {0x3215103C, "Smart Array E200i", &SA5_access}, {0x3237103C, "Smart Array E500", &SA5_access}, - {0x3223103C, "Smart Array P800", &SA5_access}, - {0x3234103C, "Smart Array P400", &SA5_access}, {0x323D103C, "Smart Array P700m", &SA5_access}, }; @@ -574,8 +572,6 @@ static void cciss_procinit(ctlr_info_t *h) /* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { - 0x324a103C, /* Smart Array P712m */ - 0x324b103C, /* SmartArray P711m */ 0x3223103C, /* Smart Array P800 */ 0x3234103C, /* Smart Array P400 */ 0x3235103C, /* Smart Array P400i */ @@ -586,12 +582,32 @@ static u32 unresettable_controller[] = { 0x3215103C, /* Smart Array E200i */ 0x3237103C, /* Smart Array E500 */ 0x323D103C, /* Smart Array P700m */ + 0x40800E11, /* Smart Array 5i */ 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ + 0x40700E11, /* Smart Array 5300 */ + 0x40820E11, /* Smart Array 532 */ + 0x40830E11, /* Smart Array 5312 */ + 0x409A0E11, /* Smart Array 641 */ + 0x409B0E11, /* Smart Array 642 */ + 0x40910E11, /* Smart Array 6i */ }; /* List of controllers which cannot even be soft reset */ static u32 soft_unresettable_controller[] = { + 0x40800E11, /* Smart Array 5i */ + 0x40700E11, /* Smart Array 5300 */ + 0x40820E11, /* Smart Array 532 */ + 0x40830E11, /* Smart Array 5312 */ + 0x409A0E11, /* Smart Array 641 */ + 0x409B0E11, /* Smart Array 642 */ + 0x40910E11, /* Smart Array 6i */ + /* Exclude 640x boards. These are two pci devices in one slot + * which share a battery backed cache module. One controls the + * cache, the other accesses the cache through the one that controls + * it. If we reset the one controlling the cache, the other will + * likely not be happy. Just forbid resetting this conjoined mess. + */ 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ }; @@ -4667,8 +4683,7 @@ static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) */ cciss_lookup_board_id(pdev, &board_id); if (!ctlr_is_resettable(board_id)) { - dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " - "due to shared cache module."); + dev_warn(&pdev->dev, "Controller not resettable\n"); return -ENODEV; } diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index ecd845cd28d8..1537302e56e3 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -84,7 +84,6 @@ static struct scsi_host_template cciss_driver_template = { .show_info = cciss_scsi_show_info, .queuecommand = cciss_scsi_queue_command, .this_id = 7, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */ .eh_device_reset_handler= cciss_eh_device_reset_handler, diff --git a/drivers/block/pmem.c b/drivers/block/pmem.c index eabf4a8d0085..095dfaadcaa5 100644 --- a/drivers/block/pmem.c +++ b/drivers/block/pmem.c @@ -139,11 +139,11 @@ static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res) } /* - * Map the memory as non-cachable, as we can't write back the contents + * Map the memory as write-through, as we can't write back the contents * of the CPU caches in case of a crash. */ err = -ENOMEM; - pmem->virt_addr = ioremap_nocache(pmem->phys_addr, pmem->size); + pmem->virt_addr = ioremap_wt(pmem->phys_addr, pmem->size); if (!pmem->virt_addr) goto out_release_region; diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 6f047dcb94c2..c43c3d2baf73 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -57,6 +57,7 @@ #include <linux/of_address.h> #include <linux/debugfs.h> #include <linux/log2.h> +#include <linux/memblock.h> #include <linux/syscore_ops.h> /* @@ -152,13 +153,39 @@ struct mvebu_mbus_state { static struct mvebu_mbus_state mbus_state; +/* + * We provide two variants of the mv_mbus_dram_info() function: + * + * - The normal one, where the described DRAM ranges may overlap with + * the I/O windows, but for which the DRAM ranges are guaranteed to + * have a power of two size. Such ranges are suitable for the DMA + * masters that only DMA between the RAM and the device, which is + * actually all devices except the crypto engines. + * + * - The 'nooverlap' one, where the described DRAM ranges are + * guaranteed to not overlap with the I/O windows, but for which the + * DRAM ranges will not have power of two sizes. They will only be + * aligned on a 64 KB boundary, and have a size multiple of 64 + * KB. Such ranges are suitable for the DMA masters that DMA between + * the crypto SRAM (which is mapped through an I/O window) and a + * device. This is the case for the crypto engines. + */ + static struct mbus_dram_target_info mvebu_mbus_dram_info; +static struct mbus_dram_target_info mvebu_mbus_dram_info_nooverlap; + const struct mbus_dram_target_info *mv_mbus_dram_info(void) { return &mvebu_mbus_dram_info; } EXPORT_SYMBOL_GPL(mv_mbus_dram_info); +const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void) +{ + return &mvebu_mbus_dram_info_nooverlap; +} +EXPORT_SYMBOL_GPL(mv_mbus_dram_info_nooverlap); + /* Checks whether the given window has remap capability */ static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus, const int win) @@ -576,6 +603,95 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win) return MVEBU_MBUS_NO_REMAP; } +/* + * Use the memblock information to find the MBus bridge hole in the + * physical address space. + */ +static void __init +mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) +{ + struct memblock_region *r; + uint64_t s = 0; + + for_each_memblock(memory, r) { + /* + * This part of the memory is above 4 GB, so we don't + * care for the MBus bridge hole. + */ + if (r->base >= 0x100000000ULL) + continue; + + /* + * The MBus bridge hole is at the end of the RAM under + * the 4 GB limit. + */ + if (r->base + r->size > s) + s = r->base + r->size; + } + + *start = s; + *end = 0x100000000ULL; +} + +/* + * This function fills in the mvebu_mbus_dram_info_nooverlap data + * structure, by looking at the mvebu_mbus_dram_info data, and + * removing the parts of it that overlap with I/O windows. + */ +static void __init +mvebu_mbus_setup_cpu_target_nooverlap(struct mvebu_mbus_state *mbus) +{ + uint64_t mbus_bridge_base, mbus_bridge_end; + int cs_nooverlap = 0; + int i; + + mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end); + + for (i = 0; i < mvebu_mbus_dram_info.num_cs; i++) { + struct mbus_dram_window *w; + u64 base, size, end; + + w = &mvebu_mbus_dram_info.cs[i]; + base = w->base; + size = w->size; + end = base + size; + + /* + * The CS is fully enclosed inside the MBus bridge + * area, so ignore it. + */ + if (base >= mbus_bridge_base && end <= mbus_bridge_end) + continue; + + /* + * Beginning of CS overlaps with end of MBus, raise CS + * base address, and shrink its size. + */ + if (base >= mbus_bridge_base && end > mbus_bridge_end) { + size -= mbus_bridge_end - base; + base = mbus_bridge_end; + } + + /* + * End of CS overlaps with beginning of MBus, shrink + * CS size. + */ + if (base < mbus_bridge_base && end > mbus_bridge_base) + size -= end - mbus_bridge_base; + + w = &mvebu_mbus_dram_info_nooverlap.cs[cs_nooverlap++]; + w->cs_index = i; + w->mbus_attr = 0xf & ~(1 << i); + if (mbus->hw_io_coherency) + w->mbus_attr |= ATTR_HW_COHERENCY; + w->base = base; + w->size = size; + } + + mvebu_mbus_dram_info_nooverlap.mbus_dram_target_id = TARGET_DDR; + mvebu_mbus_dram_info_nooverlap.num_cs = cs_nooverlap; +} + static void __init mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) { @@ -964,6 +1080,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, mvebu_mbus_disable_window(mbus, win); mbus->soc->setup_cpu_target(mbus); + mvebu_mbus_setup_cpu_target_nooverlap(mbus); if (is_coherent) writel(UNIT_SYNC_BARRIER_ALL, diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index ebee57d715d2..5012e3ad1225 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c @@ -301,7 +301,7 @@ static int omap_l3_probe(struct platform_device *pdev) return ret; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP /** * l3_resume_noirq() - resume function for l3_noc @@ -347,7 +347,7 @@ static int l3_resume_noirq(struct device *dev) } static const struct dev_pm_ops l3_dev_pm_ops = { - .resume_noirq = l3_resume_noirq, + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq) }; #define L3_DEV_PM_OPS (&l3_dev_pm_ops) diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index a3bebef255ad..0c98a9d51a24 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -33,7 +33,7 @@ #include <asm/io.h> #include <asm/msr.h> #include <asm/cpufeature.h> -#include <asm/i387.h> +#include <asm/fpu/api.h> diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index 8dd48a2be911..fc061f7c2bd1 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c @@ -532,9 +532,8 @@ static int reader_config(struct pcmcia_device *link, int devno) fail_rc = pcmcia_enable_device(link); if (fail_rc != 0) { - dev_printk(KERN_INFO, &link->dev, - "pcmcia_enable_device failed 0x%x\n", - fail_rc); + dev_info(&link->dev, "pcmcia_enable_device failed 0x%x\n", + fail_rc); goto cs_release; } diff --git a/drivers/char/random.c b/drivers/char/random.c index 9cd6968e2f92..d0da5d852d41 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -409,6 +409,9 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait); static struct fasync_struct *fasync; +static DEFINE_SPINLOCK(random_ready_list_lock); +static LIST_HEAD(random_ready_list); + /********************************************************************** * * OS independent entropy store. Here are the functions which handle @@ -589,6 +592,22 @@ static void fast_mix(struct fast_pool *f) f->count++; } +static void process_random_ready_list(void) +{ + unsigned long flags; + struct random_ready_callback *rdy, *tmp; + + spin_lock_irqsave(&random_ready_list_lock, flags); + list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { + struct module *owner = rdy->owner; + + list_del_init(&rdy->list); + rdy->func(rdy); + module_put(owner); + } + spin_unlock_irqrestore(&random_ready_list_lock, flags); +} + /* * Credit (or debit) the entropy store with n bits of entropy. * Use credit_entropy_bits_safe() if the value comes from userspace @@ -660,7 +679,8 @@ retry: r->entropy_total = 0; if (r == &nonblocking_pool) { prandom_reseed_late(); - wake_up_interruptible(&urandom_init_wait); + process_random_ready_list(); + wake_up_all(&urandom_init_wait); pr_notice("random: %s pool is initialized\n", r->name); } } @@ -1245,6 +1265,64 @@ void get_random_bytes(void *buf, int nbytes) EXPORT_SYMBOL(get_random_bytes); /* + * Add a callback function that will be invoked when the nonblocking + * pool is initialised. + * + * returns: 0 if callback is successfully added + * -EALREADY if pool is already initialised (callback not called) + * -ENOENT if module for callback is not alive + */ +int add_random_ready_callback(struct random_ready_callback *rdy) +{ + struct module *owner; + unsigned long flags; + int err = -EALREADY; + + if (likely(nonblocking_pool.initialized)) + return err; + + owner = rdy->owner; + if (!try_module_get(owner)) + return -ENOENT; + + spin_lock_irqsave(&random_ready_list_lock, flags); + if (nonblocking_pool.initialized) + goto out; + + owner = NULL; + + list_add(&rdy->list, &random_ready_list); + err = 0; + +out: + spin_unlock_irqrestore(&random_ready_list_lock, flags); + + module_put(owner); + + return err; +} +EXPORT_SYMBOL(add_random_ready_callback); + +/* + * Delete a previously registered readiness callback function. + */ +void del_random_ready_callback(struct random_ready_callback *rdy) +{ + unsigned long flags; + struct module *owner = NULL; + + spin_lock_irqsave(&random_ready_list_lock, flags); + if (!list_empty(&rdy->list)) { + list_del_init(&rdy->list); + owner = rdy->owner; + } + spin_unlock_irqrestore(&random_ready_list_lock, flags); + + module_put(owner); +} +EXPORT_SYMBOL(del_random_ready_callback); + +/* * This function will use the architecture-specific hardware random * number generator if it is available. The arch-specific hw RNG will * almost certainly be faster than what we can do in software, but it diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index bfa1e64e267d..9b13a303d3f8 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -242,14 +242,12 @@ static int s2mps11_clk_probe(struct platform_device *pdev) goto err_reg; } - s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk, + s2mps11_clk->lookup = clkdev_create(s2mps11_clk->clk, s2mps11_name(s2mps11_clk), NULL); if (!s2mps11_clk->lookup) { ret = -ENOMEM; goto err_lup; } - - clkdev_add(s2mps11_clk->lookup); } for (i = 0; i < S2MPS11_CLKS_NUM; i++) { diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 1fcb6ef2cdac..c0eaf0973bd2 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -177,7 +177,7 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id) if (!cl) goto out; - clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id); + clk = __clk_create_clk(cl->clk_hw, dev_id, con_id); if (IS_ERR(clk)) goto out; @@ -215,18 +215,26 @@ void clk_put(struct clk *clk) } EXPORT_SYMBOL(clk_put); -void clkdev_add(struct clk_lookup *cl) +static void __clkdev_add(struct clk_lookup *cl) { mutex_lock(&clocks_mutex); list_add_tail(&cl->node, &clocks); mutex_unlock(&clocks_mutex); } + +void clkdev_add(struct clk_lookup *cl) +{ + if (!cl->clk_hw) + cl->clk_hw = __clk_get_hw(cl->clk); + __clkdev_add(cl); +} EXPORT_SYMBOL(clkdev_add); -void __init clkdev_add_table(struct clk_lookup *cl, size_t num) +void clkdev_add_table(struct clk_lookup *cl, size_t num) { mutex_lock(&clocks_mutex); while (num--) { + cl->clk_hw = __clk_get_hw(cl->clk); list_add_tail(&cl->node, &clocks); cl++; } @@ -243,7 +251,7 @@ struct clk_lookup_alloc { }; static struct clk_lookup * __init_refok -vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, +vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt, va_list ap) { struct clk_lookup_alloc *cla; @@ -252,7 +260,7 @@ vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, if (!cla) return NULL; - cla->cl.clk = clk; + cla->cl.clk_hw = hw; if (con_id) { strlcpy(cla->con_id, con_id, sizeof(cla->con_id)); cla->cl.con_id = cla->con_id; @@ -266,6 +274,19 @@ vclkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, return &cla->cl; } +static struct clk_lookup * +vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt, + va_list ap) +{ + struct clk_lookup *cl; + + cl = vclkdev_alloc(hw, con_id, dev_fmt, ap); + if (cl) + __clkdev_add(cl); + + return cl; +} + struct clk_lookup * __init_refok clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) { @@ -273,28 +294,49 @@ clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...) va_list ap; va_start(ap, dev_fmt); - cl = vclkdev_alloc(clk, con_id, dev_fmt, ap); + cl = vclkdev_alloc(__clk_get_hw(clk), con_id, dev_fmt, ap); va_end(ap); return cl; } EXPORT_SYMBOL(clkdev_alloc); -int clk_add_alias(const char *alias, const char *alias_dev_name, char *id, - struct device *dev) +/** + * clkdev_create - allocate and add a clkdev lookup structure + * @clk: struct clk to associate with all clk_lookups + * @con_id: connection ID string on device + * @dev_fmt: format string describing device name + * + * Returns a clk_lookup structure, which can be later unregistered and + * freed. + */ +struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, + const char *dev_fmt, ...) { - struct clk *r = clk_get(dev, id); + struct clk_lookup *cl; + va_list ap; + + va_start(ap, dev_fmt); + cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap); + va_end(ap); + + return cl; +} +EXPORT_SYMBOL_GPL(clkdev_create); + +int clk_add_alias(const char *alias, const char *alias_dev_name, + const char *con_id, struct device *dev) +{ + struct clk *r = clk_get(dev, con_id); struct clk_lookup *l; if (IS_ERR(r)) return PTR_ERR(r); - l = clkdev_alloc(r, alias, alias_dev_name); + l = clkdev_create(r, alias, "%s", alias_dev_name); clk_put(r); - if (!l) - return -ENODEV; - clkdev_add(l); - return 0; + + return l ? 0 : -ENODEV; } EXPORT_SYMBOL(clk_add_alias); @@ -334,15 +376,10 @@ int clk_register_clkdev(struct clk *clk, const char *con_id, return PTR_ERR(clk); va_start(ap, dev_fmt); - cl = vclkdev_alloc(clk, con_id, dev_fmt, ap); + cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap); va_end(ap); - if (!cl) - return -ENOMEM; - - clkdev_add(cl); - - return 0; + return cl ? 0 : -ENOMEM; } EXPORT_SYMBOL(clk_register_clkdev); @@ -365,8 +402,8 @@ int clk_register_clkdevs(struct clk *clk, struct clk_lookup *cl, size_t num) return PTR_ERR(clk); for (i = 0; i < num; i++, cl++) { - cl->clk = clk; - clkdev_add(cl); + cl->clk_hw = __clk_get_hw(clk); + __clkdev_add(cl); } return 0; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 51d7865fdddb..32164ba3d36a 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -106,6 +106,16 @@ config CLKSRC_EFM32 Support to use the timers of EFM32 SoCs as clock source and clock event device. +config CLKSRC_LPC32XX + bool + select CLKSRC_MMIO + select CLKSRC_OF + +config CLKSRC_STM32 + bool "Clocksource for STM32 SoCs" if !ARCH_STM32 + depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST) + select CLKSRC_MMIO + config ARM_ARCH_TIMER bool select CLKSRC_OF if OF @@ -139,6 +149,13 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK help Use ARM global timer clock source as sched_clock +config ARMV7M_SYSTICK + bool + select CLKSRC_OF if OF + select CLKSRC_MMIO + help + This options enables support for the ARMv7M system timer unit + config ATMEL_PIT select CLKSRC_OF if OF def_bool SOC_AT91SAM9 || SOC_SAMA5 diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 5b85f6adb258..1831a588b988 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -36,7 +36,9 @@ obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o +obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o +obj-$(CONFIG_CLKSRC_LPC32XX) += time-lpc32xx.o obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o @@ -45,6 +47,7 @@ obj-$(CONFIG_MTK_TIMER) += mtk_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o +obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o diff --git a/drivers/clocksource/armv7m_systick.c b/drivers/clocksource/armv7m_systick.c new file mode 100644 index 000000000000..addfd2c64f54 --- /dev/null +++ b/drivers/clocksource/armv7m_systick.c @@ -0,0 +1,79 @@ +/* + * Copyright (C) Maxime Coquelin 2015 + * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com> + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/kernel.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/clk.h> +#include <linux/bitops.h> + +#define SYST_CSR 0x00 +#define SYST_RVR 0x04 +#define SYST_CVR 0x08 +#define SYST_CALIB 0x0c + +#define SYST_CSR_ENABLE BIT(0) + +#define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF + +static void __init system_timer_of_register(struct device_node *np) +{ + struct clk *clk = NULL; + void __iomem *base; + u32 rate; + int ret; + + base = of_iomap(np, 0); + if (!base) { + pr_warn("system-timer: invalid base address\n"); + return; + } + + ret = of_property_read_u32(np, "clock-frequency", &rate); + if (ret) { + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) + goto out_unmap; + + ret = clk_prepare_enable(clk); + if (ret) + goto out_clk_put; + + rate = clk_get_rate(clk); + if (!rate) + goto out_clk_disable; + } + + writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR); + writel_relaxed(SYST_CSR_ENABLE, base + SYST_CSR); + + ret = clocksource_mmio_init(base + SYST_CVR, "arm_system_timer", rate, + 200, 24, clocksource_mmio_readl_down); + if (ret) { + pr_err("failed to init clocksource (%d)\n", ret); + if (clk) + goto out_clk_disable; + else + goto out_unmap; + } + + pr_info("ARM System timer initialized as clocksource\n"); + + return; + +out_clk_disable: + clk_disable_unprepare(clk); +out_clk_put: + clk_put(clk); +out_unmap: + iounmap(base); + pr_warn("ARM System timer register failed (%d)\n", ret); +} + +CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick", + system_timer_of_register); diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c index 2c9c993727c8..4c2ba59897e8 100644 --- a/drivers/clocksource/asm9260_timer.c +++ b/drivers/clocksource/asm9260_timer.c @@ -178,7 +178,7 @@ static void __init asm9260_timer_init(struct device_node *np) unsigned long rate; priv.base = of_io_request_and_map(np, 0, np->name); - if (!priv.base) + if (IS_ERR(priv.base)) panic("%s: unable to map resource", np->name); clk = of_clk_get(np, 0); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 83564c9cfdbe..935b05936dbd 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -209,7 +209,7 @@ static void exynos4_frc_resume(struct clocksource *cs) exynos4_mct_frc_start(); } -struct clocksource mct_frc = { +static struct clocksource mct_frc = { .name = "mct-frc", .rating = 400, .read = exynos4_frc_read, @@ -413,7 +413,7 @@ static inline void exynos4_tick_set_mode(enum clock_event_mode mode, } } -static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) +static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) { struct clock_event_device *evt = &mevt->evt; @@ -426,12 +426,8 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) exynos4_mct_tick_stop(mevt); /* Clear the MCT tick interrupt */ - if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) { + if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); - return 1; - } else { - return 0; - } } static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) @@ -564,18 +560,6 @@ out_irq: free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); } -void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) -{ - mct_irqs[MCT_G0_IRQ] = irq_g0; - mct_irqs[MCT_L0_IRQ] = irq_l0; - mct_irqs[MCT_L1_IRQ] = irq_l1; - mct_int_type = MCT_INT_SPI; - - exynos4_timer_resources(NULL, base); - exynos4_clocksource_init(); - exynos4_clockevent_init(); -} - static void __init mct_init_dt(struct device_node *np, unsigned int int_type) { u32 nr_irqs, i; diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c index 098c542e5c53..cba2d015564c 100644 --- a/drivers/clocksource/qcom-timer.c +++ b/drivers/clocksource/qcom-timer.c @@ -40,8 +40,6 @@ #define GPT_HZ 32768 -#define MSM_DGT_SHIFT 5 - static void __iomem *event_base; static void __iomem *sts_base; @@ -232,7 +230,6 @@ err: register_current_timer_delay(&msm_delay_timer); } -#ifdef CONFIG_ARCH_QCOM static void __init msm_dt_timer_init(struct device_node *np) { u32 freq; @@ -285,59 +282,3 @@ static void __init msm_dt_timer_init(struct device_node *np) } CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init); CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init); -#else - -static int __init msm_timer_map(phys_addr_t addr, u32 event, u32 source, - u32 sts) -{ - void __iomem *base; - - base = ioremap(addr, SZ_256); - if (!base) { - pr_err("Failed to map timer base\n"); - return -ENOMEM; - } - event_base = base + event; - source_base = base + source; - if (sts) - sts_base = base + sts; - - return 0; -} - -static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs) -{ - /* - * Shift timer count down by a constant due to unreliable lower bits - * on some targets. - */ - return msm_read_timer_count(cs) >> MSM_DGT_SHIFT; -} - -void __init msm7x01_timer_init(void) -{ - struct clocksource *cs = &msm_clocksource; - - if (msm_timer_map(0xc0100000, 0x0, 0x10, 0x0)) - return; - cs->read = msm_read_timer_count_shift; - cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)); - /* 600 KHz */ - msm_timer_init(19200000 >> MSM_DGT_SHIFT, 32 - MSM_DGT_SHIFT, 7, - false); -} - -void __init msm7x30_timer_init(void) -{ - if (msm_timer_map(0xc0100000, 0x4, 0x24, 0x80)) - return; - msm_timer_init(24576000 / 4, 32, 1, false); -} - -void __init qsd8x50_timer_init(void) -{ - if (msm_timer_map(0xAC100000, 0x0, 0x10, 0x34)) - return; - msm_timer_init(19200000 / 4, 32, 7, false); -} -#endif diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c new file mode 100644 index 000000000000..a1c06a2bc77c --- /dev/null +++ b/drivers/clocksource/time-lpc32xx.c @@ -0,0 +1,272 @@ +/* + * Clocksource driver for NXP LPC32xx/18xx/43xx timer + * + * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com> + * + * Based on: + * time-efm32 Copyright (C) 2013 Pengutronix + * mach-lpc32xx/timer.c Copyright (C) 2009 - 2010 NXP Semiconductors + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/sched_clock.h> + +#define LPC32XX_TIMER_IR 0x000 +#define LPC32XX_TIMER_IR_MR0INT BIT(0) +#define LPC32XX_TIMER_TCR 0x004 +#define LPC32XX_TIMER_TCR_CEN BIT(0) +#define LPC32XX_TIMER_TCR_CRST BIT(1) +#define LPC32XX_TIMER_TC 0x008 +#define LPC32XX_TIMER_PR 0x00c +#define LPC32XX_TIMER_MCR 0x014 +#define LPC32XX_TIMER_MCR_MR0I BIT(0) +#define LPC32XX_TIMER_MCR_MR0R BIT(1) +#define LPC32XX_TIMER_MCR_MR0S BIT(2) +#define LPC32XX_TIMER_MR0 0x018 +#define LPC32XX_TIMER_CTCR 0x070 + +struct lpc32xx_clock_event_ddata { + struct clock_event_device evtdev; + void __iomem *base; +}; + +/* Needed for the sched clock */ +static void __iomem *clocksource_timer_counter; + +static u64 notrace lpc32xx_read_sched_clock(void) +{ + return readl(clocksource_timer_counter); +} + +static int lpc32xx_clkevt_next_event(unsigned long delta, + struct clock_event_device *evtdev) +{ + struct lpc32xx_clock_event_ddata *ddata = + container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); + + /* + * Place timer in reset and program the delta in the prescale + * register (PR). When the prescale counter matches the value + * in PR the counter register is incremented and the compare + * match will trigger. After setup the timer is released from + * reset and enabled. + */ + writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR); + writel_relaxed(delta, ddata->base + LPC32XX_TIMER_PR); + writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR); + + return 0; +} + +static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev) +{ + struct lpc32xx_clock_event_ddata *ddata = + container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); + + /* Disable the timer */ + writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR); + + return 0; +} + +static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev) +{ + /* + * When using oneshot, we must also disable the timer + * to wait for the first call to set_next_event(). + */ + return lpc32xx_clkevt_shutdown(evtdev); +} + +static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id) +{ + struct lpc32xx_clock_event_ddata *ddata = dev_id; + + /* Clear match on channel 0 */ + writel_relaxed(LPC32XX_TIMER_IR_MR0INT, ddata->base + LPC32XX_TIMER_IR); + + ddata->evtdev.event_handler(&ddata->evtdev); + + return IRQ_HANDLED; +} + +static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = { + .evtdev = { + .name = "lpc3220 clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = lpc32xx_clkevt_next_event, + .set_state_shutdown = lpc32xx_clkevt_shutdown, + .set_state_oneshot = lpc32xx_clkevt_oneshot, + }, +}; + +static int __init lpc32xx_clocksource_init(struct device_node *np) +{ + void __iomem *base; + unsigned long rate; + struct clk *clk; + int ret; + + clk = of_clk_get_by_name(np, "timerclk"); + if (IS_ERR(clk)) { + pr_err("clock get failed (%lu)\n", PTR_ERR(clk)); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("clock enable failed (%d)\n", ret); + goto err_clk_enable; + } + + base = of_iomap(np, 0); + if (!base) { + pr_err("unable to map registers\n"); + ret = -EADDRNOTAVAIL; + goto err_iomap; + } + + /* + * Disable and reset timer then set it to free running timer + * mode (CTCR) with no prescaler (PR) or match operations (MCR). + * After setup the timer is released from reset and enabled. + */ + writel_relaxed(LPC32XX_TIMER_TCR_CRST, base + LPC32XX_TIMER_TCR); + writel_relaxed(0, base + LPC32XX_TIMER_PR); + writel_relaxed(0, base + LPC32XX_TIMER_MCR); + writel_relaxed(0, base + LPC32XX_TIMER_CTCR); + writel_relaxed(LPC32XX_TIMER_TCR_CEN, base + LPC32XX_TIMER_TCR); + + rate = clk_get_rate(clk); + ret = clocksource_mmio_init(base + LPC32XX_TIMER_TC, "lpc3220 timer", + rate, 300, 32, clocksource_mmio_readl_up); + if (ret) { + pr_err("failed to init clocksource (%d)\n", ret); + goto err_clocksource_init; + } + + clocksource_timer_counter = base + LPC32XX_TIMER_TC; + sched_clock_register(lpc32xx_read_sched_clock, 32, rate); + + return 0; + +err_clocksource_init: + iounmap(base); +err_iomap: + clk_disable_unprepare(clk); +err_clk_enable: + clk_put(clk); + return ret; +} + +static int __init lpc32xx_clockevent_init(struct device_node *np) +{ + void __iomem *base; + unsigned long rate; + struct clk *clk; + int ret, irq; + + clk = of_clk_get_by_name(np, "timerclk"); + if (IS_ERR(clk)) { + pr_err("clock get failed (%lu)\n", PTR_ERR(clk)); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("clock enable failed (%d)\n", ret); + goto err_clk_enable; + } + + base = of_iomap(np, 0); + if (!base) { + pr_err("unable to map registers\n"); + ret = -EADDRNOTAVAIL; + goto err_iomap; + } + + irq = irq_of_parse_and_map(np, 0); + if (!irq) { + pr_err("get irq failed\n"); + ret = -ENOENT; + goto err_irq; + } + + /* + * Disable timer and clear any pending interrupt (IR) on match + * channel 0 (MR0). Configure a compare match value of 1 on MR0 + * and enable interrupt, reset on match and stop on match (MCR). + */ + writel_relaxed(0, base + LPC32XX_TIMER_TCR); + writel_relaxed(0, base + LPC32XX_TIMER_CTCR); + writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR); + writel_relaxed(1, base + LPC32XX_TIMER_MR0); + writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R | + LPC32XX_TIMER_MCR_MR0S, base + LPC32XX_TIMER_MCR); + + rate = clk_get_rate(clk); + lpc32xx_clk_event_ddata.base = base; + clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev, + rate, 1, -1); + + ret = request_irq(irq, lpc32xx_clock_event_handler, + IRQF_TIMER | IRQF_IRQPOLL, "lpc3220 clockevent", + &lpc32xx_clk_event_ddata); + if (ret) { + pr_err("request irq failed\n"); + goto err_irq; + } + + return 0; + +err_irq: + iounmap(base); +err_iomap: + clk_disable_unprepare(clk); +err_clk_enable: + clk_put(clk); + return ret; +} + +/* + * This function asserts that we have exactly one clocksource and one + * clock_event_device in the end. + */ +static void __init lpc32xx_timer_init(struct device_node *np) +{ + static int has_clocksource, has_clockevent; + int ret; + + if (!has_clocksource) { + ret = lpc32xx_clocksource_init(np); + if (!ret) { + has_clocksource = 1; + return; + } + } + + if (!has_clockevent) { + ret = lpc32xx_clockevent_init(np); + if (!ret) { + has_clockevent = 1; + return; + } + } +} +CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init); diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c index b9efd30513d5..c97d1980c0f8 100644 --- a/drivers/clocksource/timer-integrator-ap.c +++ b/drivers/clocksource/timer-integrator-ap.c @@ -166,7 +166,7 @@ static void __init integrator_ap_timer_init_of(struct device_node *node) struct device_node *sec_node; base = of_io_request_and_map(node, 0, "integrator-timer"); - if (!base) + if (IS_ERR(base)) return; clk = of_clk_get(node, 0); diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c new file mode 100644 index 000000000000..a97e8b50701c --- /dev/null +++ b/drivers/clocksource/timer-stm32.c @@ -0,0 +1,184 @@ +/* + * Copyright (C) Maxime Coquelin 2015 + * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com> + * License terms: GNU General Public License (GPL), version 2 + * + * Inspired by time-efm32.c from Uwe Kleine-Koenig + */ + +#include <linux/kernel.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/clk.h> +#include <linux/reset.h> + +#define TIM_CR1 0x00 +#define TIM_DIER 0x0c +#define TIM_SR 0x10 +#define TIM_EGR 0x14 +#define TIM_PSC 0x28 +#define TIM_ARR 0x2c + +#define TIM_CR1_CEN BIT(0) +#define TIM_CR1_OPM BIT(3) +#define TIM_CR1_ARPE BIT(7) + +#define TIM_DIER_UIE BIT(0) + +#define TIM_SR_UIF BIT(0) + +#define TIM_EGR_UG BIT(0) + +struct stm32_clock_event_ddata { + struct clock_event_device evtdev; + unsigned periodic_top; + void __iomem *base; +}; + +static void stm32_clock_event_set_mode(enum clock_event_mode mode, + struct clock_event_device *evtdev) +{ + struct stm32_clock_event_ddata *data = + container_of(evtdev, struct stm32_clock_event_ddata, evtdev); + void *base = data->base; + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + writel_relaxed(data->periodic_top, base + TIM_ARR); + writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1); + break; + + case CLOCK_EVT_MODE_ONESHOT: + default: + writel_relaxed(0, base + TIM_CR1); + break; + } +} + +static int stm32_clock_event_set_next_event(unsigned long evt, + struct clock_event_device *evtdev) +{ + struct stm32_clock_event_ddata *data = + container_of(evtdev, struct stm32_clock_event_ddata, evtdev); + + writel_relaxed(evt, data->base + TIM_ARR); + writel_relaxed(TIM_CR1_ARPE | TIM_CR1_OPM | TIM_CR1_CEN, + data->base + TIM_CR1); + + return 0; +} + +static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id) +{ + struct stm32_clock_event_ddata *data = dev_id; + + writel_relaxed(0, data->base + TIM_SR); + + data->evtdev.event_handler(&data->evtdev); + + return IRQ_HANDLED; +} + +static struct stm32_clock_event_ddata clock_event_ddata = { + .evtdev = { + .name = "stm32 clockevent", + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, + .set_mode = stm32_clock_event_set_mode, + .set_next_event = stm32_clock_event_set_next_event, + .rating = 200, + }, +}; + +static void __init stm32_clockevent_init(struct device_node *np) +{ + struct stm32_clock_event_ddata *data = &clock_event_ddata; + struct clk *clk; + struct reset_control *rstc; + unsigned long rate, max_delta; + int irq, ret, bits, prescaler = 1; + + clk = of_clk_get(np, 0); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + pr_err("failed to get clock for clockevent (%d)\n", ret); + goto err_clk_get; + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("failed to enable timer clock for clockevent (%d)\n", + ret); + goto err_clk_enable; + } + + rate = clk_get_rate(clk); + + rstc = of_reset_control_get(np, NULL); + if (!IS_ERR(rstc)) { + reset_control_assert(rstc); + reset_control_deassert(rstc); + } + + data->base = of_iomap(np, 0); + if (!data->base) { + pr_err("failed to map registers for clockevent\n"); + goto err_iomap; + } + + irq = irq_of_parse_and_map(np, 0); + if (!irq) { + pr_err("%s: failed to get irq.\n", np->full_name); + goto err_get_irq; + } + + /* Detect whether the timer is 16 or 32 bits */ + writel_relaxed(~0U, data->base + TIM_ARR); + max_delta = readl_relaxed(data->base + TIM_ARR); + if (max_delta == ~0U) { + prescaler = 1; + bits = 32; + } else { + prescaler = 1024; + bits = 16; + } + writel_relaxed(0, data->base + TIM_ARR); + + writel_relaxed(prescaler - 1, data->base + TIM_PSC); + writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR); + writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER); + writel_relaxed(0, data->base + TIM_SR); + + data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ); + + clockevents_config_and_register(&data->evtdev, + DIV_ROUND_CLOSEST(rate, prescaler), + 0x1, max_delta); + + ret = request_irq(irq, stm32_clock_event_handler, IRQF_TIMER, + "stm32 clockevent", data); + if (ret) { + pr_err("%s: failed to request irq.\n", np->full_name); + goto err_get_irq; + } + + pr_info("%s: STM32 clockevent driver initialized (%d bits)\n", + np->full_name, bits); + + return; + +err_get_irq: + iounmap(data->base); +err_iomap: + clk_disable_unprepare(clk); +err_clk_enable: + clk_put(clk); +err_clk_get: + return; +} + +CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init); diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 28aa4b7bb602..0ffb4ea7c925 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -324,7 +324,7 @@ static void __init sun5i_timer_init(struct device_node *node) int irq; timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); - if (!timer_base) + if (IS_ERR(timer_base)) panic("Can't map registers"); irq = irq_of_parse_and_map(node, 0); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 4f3dbc8cf729..611cb09239eb 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -5,7 +5,7 @@ # big LITTLE core layer and glue drivers config ARM_BIG_LITTLE_CPUFREQ tristate "Generic ARM big LITTLE CPUfreq driver" - depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK + depends on (ARM_CPU_TOPOLOGY || ARM64) && HAVE_CLK select PM_OPP help This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index b0c18ed8d83f..0136dfcdabf0 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -699,13 +699,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) dmi_check_system(sw_any_bug_dmi_table); if (bios_with_sw_any_bug && !policy_is_shared(policy)) { policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; - cpumask_copy(policy->cpus, cpu_core_mask(cpu)); + cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); } if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { cpumask_clear(policy->cpus); cpumask_set_cpu(cpu, policy->cpus); - cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu)); + cpumask_copy(data->freqdomain_cpus, + topology_sibling_cpumask(cpu)); policy->shared_type = CPUFREQ_SHARED_TYPE_HW; pr_info_once(PFX "overriding BIOS provided _PSD data\n"); } diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index e1a6ba66a7f5..f1e42f8ce0fc 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -31,7 +31,6 @@ #include <linux/slab.h> #include <linux/topology.h> #include <linux/types.h> -#include <asm/bL_switcher.h> #include "arm_big_little.h" @@ -41,12 +40,16 @@ #define MAX_CLUSTERS 2 #ifdef CONFIG_BL_SWITCHER +#include <asm/bL_switcher.h> static bool bL_switching_enabled; #define is_bL_switching_enabled() bL_switching_enabled #define set_switching_enabled(x) (bL_switching_enabled = (x)) #else #define is_bL_switching_enabled() false #define set_switching_enabled(x) do { } while (0) +#define bL_switch_request(...) do { } while (0) +#define bL_switcher_put_enabled() do { } while (0) +#define bL_switcher_get_enabled() do { } while (0) #endif #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) @@ -186,6 +189,15 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) mutex_unlock(&cluster_lock[old_cluster]); } + /* + * FIXME: clk_set_rate has to handle the case where clk_change_rate + * can fail due to hardware or firmware issues. Until the clk core + * layer is fixed, we can check here. In most of the cases we will + * be reading only the cached value anyway. This needs to be removed + * once clk core is fixed. + */ + if (bL_cpufreq_get_rate(cpu) != new_rate) + return -EIO; return 0; } @@ -322,7 +334,6 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev) static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) { u32 cluster = raw_cpu_to_cluster(cpu_dev->id); - char name[14] = "cpu-cluster."; int ret; if (freq_table[cluster]) @@ -342,8 +353,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) goto free_opp_table; } - name[12] = cluster + '0'; - clk[cluster] = clk_get(cpu_dev, name); + clk[cluster] = clk_get(cpu_dev, NULL); if (!IS_ERR(clk[cluster])) { dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", __func__, clk[cluster], freq_table[cluster], @@ -506,6 +516,7 @@ static struct cpufreq_driver bL_cpufreq_driver = { .attr = cpufreq_generic_attr, }; +#ifdef CONFIG_BL_SWITCHER static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb, unsigned long action, void *_arg) { @@ -538,6 +549,20 @@ static struct notifier_block bL_switcher_notifier = { .notifier_call = bL_cpufreq_switcher_notifier, }; +static int __bLs_register_notifier(void) +{ + return bL_switcher_register_notifier(&bL_switcher_notifier); +} + +static int __bLs_unregister_notifier(void) +{ + return bL_switcher_unregister_notifier(&bL_switcher_notifier); +} +#else +static int __bLs_register_notifier(void) { return 0; } +static int __bLs_unregister_notifier(void) { return 0; } +#endif + int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) { int ret, i; @@ -555,8 +580,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) arm_bL_ops = ops; - ret = bL_switcher_get_enabled(); - set_switching_enabled(ret); + set_switching_enabled(bL_switcher_get_enabled()); for (i = 0; i < MAX_CLUSTERS; i++) mutex_init(&cluster_lock[i]); @@ -567,7 +591,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) __func__, ops->name, ret); arm_bL_ops = NULL; } else { - ret = bL_switcher_register_notifier(&bL_switcher_notifier); + ret = __bLs_register_notifier(); if (ret) { cpufreq_unregister_driver(&bL_cpufreq_driver); arm_bL_ops = NULL; @@ -591,7 +615,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) } bL_switcher_get_enabled(); - bL_switcher_unregister_notifier(&bL_switcher_notifier); + __bLs_unregister_notifier(); cpufreq_unregister_driver(&bL_cpufreq_driver); bL_switcher_put_enabled(); pr_info("%s: Un-registered platform driver: %s\n", __func__, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index bab67db54b7e..528a82bf5038 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -416,6 +416,7 @@ static struct platform_driver dt_cpufreq_platdrv = { }; module_platform_driver(dt_cpufreq_platdrv); +MODULE_ALIAS("platform:cpufreq-dt"); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); MODULE_DESCRIPTION("Generic cpufreq driver"); diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index a2258090b58b..db69eeb501a7 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -414,7 +414,7 @@ static int nforce2_detect_chipset(void) * nforce2_init - initializes the nForce2 CPUFreq driver * * Initializes the nForce2 FSB support. Returns -ENODEV on unsupported - * devices, -EINVAL on problems during initiatization, and zero on + * devices, -EINVAL on problems during initialization, and zero on * success. */ static int __init nforce2_init(void) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8ae655c364f4..b612411655f9 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -31,10 +31,62 @@ #include <linux/tick.h> #include <trace/events/power.h> -/* Macros to iterate over lists */ -/* Iterate over online CPUs policies */ static LIST_HEAD(cpufreq_policy_list); -#define for_each_policy(__policy) \ + +static inline bool policy_is_inactive(struct cpufreq_policy *policy) +{ + return cpumask_empty(policy->cpus); +} + +static bool suitable_policy(struct cpufreq_policy *policy, bool active) +{ + return active == !policy_is_inactive(policy); +} + +/* Finds Next Acive/Inactive policy */ +static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, + bool active) +{ + do { + policy = list_next_entry(policy, policy_list); + + /* No more policies in the list */ + if (&policy->policy_list == &cpufreq_policy_list) + return NULL; + } while (!suitable_policy(policy, active)); + + return policy; +} + +static struct cpufreq_policy *first_policy(bool active) +{ + struct cpufreq_policy *policy; + + /* No policies in the list */ + if (list_empty(&cpufreq_policy_list)) + return NULL; + + policy = list_first_entry(&cpufreq_policy_list, typeof(*policy), + policy_list); + + if (!suitable_policy(policy, active)) + policy = next_policy(policy, active); + + return policy; +} + +/* Macros to iterate over CPU policies */ +#define for_each_suitable_policy(__policy, __active) \ + for (__policy = first_policy(__active); \ + __policy; \ + __policy = next_policy(__policy, __active)) + +#define for_each_active_policy(__policy) \ + for_each_suitable_policy(__policy, true) +#define for_each_inactive_policy(__policy) \ + for_each_suitable_policy(__policy, false) + +#define for_each_policy(__policy) \ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) /* Iterate over governors */ @@ -49,13 +101,9 @@ static LIST_HEAD(cpufreq_governor_list); */ static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); -static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); static DEFINE_RWLOCK(cpufreq_driver_lock); DEFINE_MUTEX(cpufreq_governor_lock); -/* This one keeps track of the previously set governor of a removed CPU */ -static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); - /* Flag to suspend/resume CPUFreq governors */ static bool cpufreq_suspended; @@ -178,7 +226,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy, policy->cpuinfo.transition_latency = transition_latency; /* - * The driver only supports the SMP configuartion where all processors + * The driver only supports the SMP configuration where all processors * share the clock and voltage and clock. */ cpumask_setall(policy->cpus); @@ -187,10 +235,18 @@ int cpufreq_generic_init(struct cpufreq_policy *policy, } EXPORT_SYMBOL_GPL(cpufreq_generic_init); -unsigned int cpufreq_generic_get(unsigned int cpu) +/* Only for cpufreq core internal use */ +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) { struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; +} + +unsigned int cpufreq_generic_get(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + if (!policy || IS_ERR(policy->clk)) { pr_err("%s: No %s associated to cpu: %d\n", __func__, policy ? "clk" : "policy", cpu); @@ -201,18 +257,29 @@ unsigned int cpufreq_generic_get(unsigned int cpu) } EXPORT_SYMBOL_GPL(cpufreq_generic_get); -/* Only for cpufreq core internal use */ -struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) -{ - return per_cpu(cpufreq_cpu_data, cpu); -} - +/** + * cpufreq_cpu_get: returns policy for a cpu and marks it busy. + * + * @cpu: cpu to find policy for. + * + * This returns policy for 'cpu', returns NULL if it doesn't exist. + * It also increments the kobject reference count to mark it busy and so would + * require a corresponding call to cpufreq_cpu_put() to decrement it back. + * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be + * freed as that depends on the kobj count. + * + * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a + * valid policy is found. This is done to make sure the driver doesn't get + * unregistered while the policy is being used. + * + * Return: A valid policy on success, otherwise NULL on failure. + */ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { struct cpufreq_policy *policy = NULL; unsigned long flags; - if (cpu >= nr_cpu_ids) + if (WARN_ON(cpu >= nr_cpu_ids)) return NULL; if (!down_read_trylock(&cpufreq_rwsem)) @@ -223,7 +290,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) if (cpufreq_driver) { /* get the CPU */ - policy = per_cpu(cpufreq_cpu_data, cpu); + policy = cpufreq_cpu_get_raw(cpu); if (policy) kobject_get(&policy->kobj); } @@ -237,6 +304,16 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) } EXPORT_SYMBOL_GPL(cpufreq_cpu_get); +/** + * cpufreq_cpu_put: Decrements the usage count of a policy + * + * @policy: policy earlier returned by cpufreq_cpu_get(). + * + * This decrements the kobject reference count incremented earlier by calling + * cpufreq_cpu_get(). + * + * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get(). + */ void cpufreq_cpu_put(struct cpufreq_policy *policy) { kobject_put(&policy->kobj); @@ -798,11 +875,18 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, down_write(&policy->rwsem); + /* Updating inactive policies is invalid, so avoid doing that. */ + if (unlikely(policy_is_inactive(policy))) { + ret = -EBUSY; + goto unlock_policy_rwsem; + } + if (fattr->store) ret = fattr->store(policy, buf, count); else ret = -EIO; +unlock_policy_rwsem: up_write(&policy->rwsem); up_read(&cpufreq_rwsem); @@ -873,28 +957,67 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr) } EXPORT_SYMBOL(cpufreq_sysfs_remove_file); -/* symlink affected CPUs */ +static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) +{ + struct device *cpu_dev; + + pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu); + + if (!policy) + return 0; + + cpu_dev = get_cpu_device(cpu); + if (WARN_ON(!cpu_dev)) + return 0; + + return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); +} + +static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu) +{ + struct device *cpu_dev; + + pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu); + + cpu_dev = get_cpu_device(cpu); + if (WARN_ON(!cpu_dev)) + return; + + sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); +} + +/* Add/remove symlinks for all related CPUs */ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) { unsigned int j; int ret = 0; - for_each_cpu(j, policy->cpus) { - struct device *cpu_dev; - - if (j == policy->cpu) + /* Some related CPUs might not be present (physically hotplugged) */ + for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { + if (j == policy->kobj_cpu) continue; - pr_debug("Adding link for CPU: %u\n", j); - cpu_dev = get_cpu_device(j); - ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, - "cpufreq"); + ret = add_cpu_dev_symlink(policy, j); if (ret) break; } + return ret; } +static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) +{ + unsigned int j; + + /* Some related CPUs might not be present (physically hotplugged) */ + for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { + if (j == policy->kobj_cpu) + continue; + + remove_cpu_dev_symlink(policy, j); + } +} + static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, struct device *dev) { @@ -937,7 +1060,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy) memcpy(&new_policy, policy, sizeof(*policy)); /* Update governor of new_policy to the governor used before hotplug */ - gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); + gov = find_governor(policy->last_governor); if (gov) pr_debug("Restoring governor %s for cpu %d\n", policy->governor->name, policy->cpu); @@ -963,7 +1086,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, struct device *dev) { int ret = 0; - unsigned long flags; + + /* Has this CPU been taken care of already? */ + if (cpumask_test_cpu(cpu, policy->cpus)) + return 0; if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); @@ -974,13 +1100,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, } down_write(&policy->rwsem); - - write_lock_irqsave(&cpufreq_driver_lock, flags); - cpumask_set_cpu(cpu, policy->cpus); - per_cpu(cpufreq_cpu_data, cpu) = policy; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - up_write(&policy->rwsem); if (has_target()) { @@ -994,7 +1114,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, } } - return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); + return 0; } static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) @@ -1003,20 +1123,25 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) unsigned long flags; read_lock_irqsave(&cpufreq_driver_lock, flags); - - policy = per_cpu(cpufreq_cpu_data_fallback, cpu); - + policy = per_cpu(cpufreq_cpu_data, cpu); read_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (policy) - policy->governor = NULL; + if (likely(policy)) { + /* Policy should be inactive here */ + WARN_ON(!policy_is_inactive(policy)); + + down_write(&policy->rwsem); + policy->cpu = cpu; + up_write(&policy->rwsem); + } return policy; } -static struct cpufreq_policy *cpufreq_policy_alloc(void) +static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) { struct cpufreq_policy *policy; + int ret; policy = kzalloc(sizeof(*policy), GFP_KERNEL); if (!policy) @@ -1028,6 +1153,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) goto err_free_cpumask; + ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, + "cpufreq"); + if (ret) { + pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); + goto err_free_rcpumask; + } + INIT_LIST_HEAD(&policy->policy_list); init_rwsem(&policy->rwsem); spin_lock_init(&policy->transition_lock); @@ -1035,8 +1167,15 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update); + policy->cpu = dev->id; + + /* Set this once on allocation */ + policy->kobj_cpu = dev->id; + return policy; +err_free_rcpumask: + free_cpumask_var(policy->related_cpus); err_free_cpumask: free_cpumask_var(policy->cpus); err_free_policy: @@ -1045,18 +1184,20 @@ err_free_policy: return NULL; } -static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) +static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify) { struct kobject *kobj; struct completion *cmp; - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_REMOVE_POLICY, policy); + if (notify) + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_REMOVE_POLICY, policy); - down_read(&policy->rwsem); + down_write(&policy->rwsem); + cpufreq_remove_dev_symlink(policy); kobj = &policy->kobj; cmp = &policy->kobj_unregister; - up_read(&policy->rwsem); + up_write(&policy->rwsem); kobject_put(kobj); /* @@ -1069,68 +1210,64 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) pr_debug("wait complete\n"); } -static void cpufreq_policy_free(struct cpufreq_policy *policy) -{ - free_cpumask_var(policy->related_cpus); - free_cpumask_var(policy->cpus); - kfree(policy); -} - -static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, - struct device *cpu_dev) +static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) { - int ret; - - if (WARN_ON(cpu == policy->cpu)) - return 0; + unsigned long flags; + int cpu; - /* Move kobject to the new policy->cpu */ - ret = kobject_move(&policy->kobj, &cpu_dev->kobj); - if (ret) { - pr_err("%s: Failed to move kobj: %d\n", __func__, ret); - return ret; - } + /* Remove policy from list */ + write_lock_irqsave(&cpufreq_driver_lock, flags); + list_del(&policy->policy_list); - down_write(&policy->rwsem); - policy->cpu = cpu; - up_write(&policy->rwsem); + for_each_cpu(cpu, policy->related_cpus) + per_cpu(cpufreq_cpu_data, cpu) = NULL; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); - return 0; + cpufreq_policy_put_kobj(policy, notify); + free_cpumask_var(policy->related_cpus); + free_cpumask_var(policy->cpus); + kfree(policy); } -static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) +/** + * cpufreq_add_dev - add a CPU device + * + * Adds the cpufreq interface for a CPU device. + * + * The Oracle says: try running cpufreq registration/unregistration concurrently + * with with cpu hotplugging and all hell will break loose. Tried to clean this + * mess up, but more thorough testing is needed. - Mathieu + */ +static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { unsigned int j, cpu = dev->id; int ret = -ENOMEM; struct cpufreq_policy *policy; unsigned long flags; - bool recover_policy = cpufreq_suspended; - - if (cpu_is_offline(cpu)) - return 0; + bool recover_policy = !sif; pr_debug("adding CPU %u\n", cpu); - /* check whether a different CPU already registered this - * CPU because it is in the same boat. */ - policy = cpufreq_cpu_get_raw(cpu); - if (unlikely(policy)) - return 0; + /* + * Only possible if 'cpu' wasn't physically present earlier and we are + * here from subsys_interface add callback. A hotplug notifier will + * follow and we will handle it like logical CPU hotplug then. For now, + * just create the sysfs link. + */ + if (cpu_is_offline(cpu)) + return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); if (!down_read_trylock(&cpufreq_rwsem)) return 0; - /* Check if this cpu was hot-unplugged earlier and has siblings */ - read_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_policy(policy) { - if (cpumask_test_cpu(cpu, policy->related_cpus)) { - read_unlock_irqrestore(&cpufreq_driver_lock, flags); - ret = cpufreq_add_policy_cpu(policy, cpu, dev); - up_read(&cpufreq_rwsem); - return ret; - } + /* Check if this CPU already has a policy to manage it */ + policy = per_cpu(cpufreq_cpu_data, cpu); + if (policy && !policy_is_inactive(policy)) { + WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); + ret = cpufreq_add_policy_cpu(policy, cpu, dev); + up_read(&cpufreq_rwsem); + return ret; } - read_unlock_irqrestore(&cpufreq_driver_lock, flags); /* * Restore the saved policy when doing light-weight init and fall back @@ -1139,22 +1276,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; if (!policy) { recover_policy = false; - policy = cpufreq_policy_alloc(); + policy = cpufreq_policy_alloc(dev); if (!policy) goto nomem_out; } - /* - * In the resume path, since we restore a saved policy, the assignment - * to policy->cpu is like an update of the existing policy, rather than - * the creation of a brand new one. So we need to perform this update - * by invoking update_policy_cpu(). - */ - if (recover_policy && cpu != policy->cpu) - WARN_ON(update_policy_cpu(policy, cpu, dev)); - else - policy->cpu = cpu; - cpumask_copy(policy->cpus, cpumask_of(cpu)); /* call driver. From then on the cpufreq must be able @@ -1181,21 +1307,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) policy->user_policy.min = policy->min; policy->user_policy.max = policy->max; - /* prepare interface data */ - ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, - &dev->kobj, "cpufreq"); - if (ret) { - pr_err("%s: failed to init policy->kobj: %d\n", - __func__, ret); - goto err_init_policy_kobj; - } + write_lock_irqsave(&cpufreq_driver_lock, flags); + for_each_cpu(j, policy->related_cpus) + per_cpu(cpufreq_cpu_data, j) = policy; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); } - write_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu(j, policy->cpus) - per_cpu(cpufreq_cpu_data, j) = policy; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { policy->cur = cpufreq_driver->get(policy->cpu); if (!policy->cur) { @@ -1253,11 +1370,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) goto err_out_unregister; blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_CREATE_POLICY, policy); - } - write_lock_irqsave(&cpufreq_driver_lock, flags); - list_add(&policy->policy_list, &cpufreq_policy_list); - write_unlock_irqrestore(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); + list_add(&policy->policy_list, &cpufreq_policy_list); + write_unlock_irqrestore(&cpufreq_driver_lock, flags); + } cpufreq_init_policy(policy); @@ -1281,68 +1398,28 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) err_out_unregister: err_get_freq: - write_lock_irqsave(&cpufreq_driver_lock, flags); - for_each_cpu(j, policy->cpus) - per_cpu(cpufreq_cpu_data, j) = NULL; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - - if (!recover_policy) { - kobject_put(&policy->kobj); - wait_for_completion(&policy->kobj_unregister); - } -err_init_policy_kobj: up_write(&policy->rwsem); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); err_set_policy_cpu: - if (recover_policy) { - /* Do not leave stale fallback data behind. */ - per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; - cpufreq_policy_put_kobj(policy); - } - cpufreq_policy_free(policy); - + cpufreq_policy_free(policy, recover_policy); nomem_out: up_read(&cpufreq_rwsem); return ret; } -/** - * cpufreq_add_dev - add a CPU device - * - * Adds the cpufreq interface for a CPU device. - * - * The Oracle says: try running cpufreq registration/unregistration concurrently - * with with cpu hotplugging and all hell will break loose. Tried to clean this - * mess up, but more thorough testing is needed. - Mathieu - */ -static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) -{ - return __cpufreq_add_dev(dev, sif); -} - static int __cpufreq_remove_dev_prepare(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = dev->id, cpus; - int ret; - unsigned long flags; + unsigned int cpu = dev->id; + int ret = 0; struct cpufreq_policy *policy; pr_debug("%s: unregistering CPU %u\n", __func__, cpu); - write_lock_irqsave(&cpufreq_driver_lock, flags); - - policy = per_cpu(cpufreq_cpu_data, cpu); - - /* Save the policy somewhere when doing a light-weight tear-down */ - if (cpufreq_suspended) - per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; - - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - + policy = cpufreq_cpu_get_raw(cpu); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); return -EINVAL; @@ -1354,108 +1431,75 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, pr_err("%s: Failed to stop governor\n", __func__); return ret; } - - strncpy(per_cpu(cpufreq_cpu_governor, cpu), - policy->governor->name, CPUFREQ_NAME_LEN); } - down_read(&policy->rwsem); - cpus = cpumask_weight(policy->cpus); - up_read(&policy->rwsem); + down_write(&policy->rwsem); + cpumask_clear_cpu(cpu, policy->cpus); - if (cpu != policy->cpu) { - sysfs_remove_link(&dev->kobj, "cpufreq"); - } else if (cpus > 1) { + if (policy_is_inactive(policy)) { + if (has_target()) + strncpy(policy->last_governor, policy->governor->name, + CPUFREQ_NAME_LEN); + } else if (cpu == policy->cpu) { /* Nominate new CPU */ - int new_cpu = cpumask_any_but(policy->cpus, cpu); - struct device *cpu_dev = get_cpu_device(new_cpu); + policy->cpu = cpumask_any(policy->cpus); + } + up_write(&policy->rwsem); - sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); - ret = update_policy_cpu(policy, new_cpu, cpu_dev); - if (ret) { - if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj, - "cpufreq")) - pr_err("%s: Failed to restore kobj link to cpu:%d\n", - __func__, cpu_dev->id); - return ret; - } + /* Start governor again for active policy */ + if (!policy_is_inactive(policy)) { + if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (!ret) + ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - if (!cpufreq_suspended) - pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", - __func__, new_cpu, cpu); + if (ret) + pr_err("%s: Failed to start governor\n", __func__); + } } else if (cpufreq_driver->stop_cpu) { cpufreq_driver->stop_cpu(policy); } - return 0; + return ret; } static int __cpufreq_remove_dev_finish(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = dev->id, cpus; + unsigned int cpu = dev->id; int ret; - unsigned long flags; - struct cpufreq_policy *policy; - - write_lock_irqsave(&cpufreq_driver_lock, flags); - policy = per_cpu(cpufreq_cpu_data, cpu); - per_cpu(cpufreq_cpu_data, cpu) = NULL; - write_unlock_irqrestore(&cpufreq_driver_lock, flags); + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); return -EINVAL; } - down_write(&policy->rwsem); - cpus = cpumask_weight(policy->cpus); - - if (cpus > 1) - cpumask_clear_cpu(cpu, policy->cpus); - up_write(&policy->rwsem); + /* Only proceed for inactive policies */ + if (!policy_is_inactive(policy)) + return 0; /* If cpu is last user of policy, free policy */ - if (cpus == 1) { - if (has_target()) { - ret = __cpufreq_governor(policy, - CPUFREQ_GOV_POLICY_EXIT); - if (ret) { - pr_err("%s: Failed to exit governor\n", - __func__); - return ret; - } - } - - if (!cpufreq_suspended) - cpufreq_policy_put_kobj(policy); - - /* - * Perform the ->exit() even during light-weight tear-down, - * since this is a core component, and is essential for the - * subsequent light-weight ->init() to succeed. - */ - if (cpufreq_driver->exit) - cpufreq_driver->exit(policy); - - /* Remove policy from list of active policies */ - write_lock_irqsave(&cpufreq_driver_lock, flags); - list_del(&policy->policy_list); - write_unlock_irqrestore(&cpufreq_driver_lock, flags); - - if (!cpufreq_suspended) - cpufreq_policy_free(policy); - } else if (has_target()) { - ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); - if (!ret) - ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); - + if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); if (ret) { - pr_err("%s: Failed to start governor\n", __func__); + pr_err("%s: Failed to exit governor\n", __func__); return ret; } } + /* + * Perform the ->exit() even during light-weight tear-down, + * since this is a core component, and is essential for the + * subsequent light-weight ->init() to succeed. + */ + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); + + /* Free the policy only if the driver is getting removed. */ + if (sif) + cpufreq_policy_free(policy, true); + return 0; } @@ -1469,8 +1513,33 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) unsigned int cpu = dev->id; int ret; - if (cpu_is_offline(cpu)) + /* + * Only possible if 'cpu' is getting physically removed now. A hotplug + * notifier should have already been called and we just need to remove + * link or free policy here. + */ + if (cpu_is_offline(cpu)) { + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + struct cpumask mask; + + if (!policy) + return 0; + + cpumask_copy(&mask, policy->related_cpus); + cpumask_clear_cpu(cpu, &mask); + + /* + * Free policy only if all policy->related_cpus are removed + * physically. + */ + if (cpumask_intersects(&mask, cpu_present_mask)) { + remove_cpu_dev_symlink(policy, cpu); + return 0; + } + + cpufreq_policy_free(policy, true); return 0; + } ret = __cpufreq_remove_dev_prepare(dev, sif); @@ -1567,6 +1636,10 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) ret_freq = cpufreq_driver->get(policy->cpu); + /* Updating inactive policies is invalid, so avoid doing that. */ + if (unlikely(policy_is_inactive(policy))) + return ret_freq; + if (ret_freq && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { /* verify no discrepancy between actual and @@ -1656,7 +1729,7 @@ void cpufreq_suspend(void) pr_debug("%s: Suspending Governors\n", __func__); - for_each_policy(policy) { + for_each_active_policy(policy) { if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) pr_err("%s: Failed to stop governor for policy: %p\n", __func__, policy); @@ -1690,7 +1763,7 @@ void cpufreq_resume(void) pr_debug("%s: Resuming Governors\n", __func__); - for_each_policy(policy) { + for_each_active_policy(policy) { if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) pr_err("%s: Failed to resume driver: %p\n", __func__, policy); @@ -1891,7 +1964,7 @@ static int __target_index(struct cpufreq_policy *policy, * Failed after setting to intermediate freq? Driver should have * reverted back to initial frequency and so should we. Check * here for intermediate_freq instead of get_intermediate, in - * case we have't switched to intermediate freq at all. + * case we haven't switched to intermediate freq at all. */ if (unlikely(retval && intermediate_freq)) { freqs.old = intermediate_freq; @@ -2092,7 +2165,8 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor) { - int cpu; + struct cpufreq_policy *policy; + unsigned long flags; if (!governor) return; @@ -2100,12 +2174,15 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) if (cpufreq_disabled()) return; - for_each_present_cpu(cpu) { - if (cpu_online(cpu)) - continue; - if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) - strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); + /* clear last_governor for all inactive policies */ + read_lock_irqsave(&cpufreq_driver_lock, flags); + for_each_inactive_policy(policy) { + if (!strcmp(policy->last_governor, governor->name)) { + policy->governor = NULL; + strcpy(policy->last_governor, "\0"); + } } + read_unlock_irqrestore(&cpufreq_driver_lock, flags); mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); @@ -2304,7 +2381,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, if (dev) { switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: - __cpufreq_add_dev(dev, NULL); + cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: @@ -2316,7 +2393,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, break; case CPU_DOWN_FAILED: - __cpufreq_add_dev(dev, NULL); + cpufreq_add_dev(dev, NULL); break; } } @@ -2336,7 +2413,7 @@ static int cpufreq_boost_set_sw(int state) struct cpufreq_policy *policy; int ret = -EINVAL; - for_each_policy(policy) { + for_each_active_policy(policy) { freq_table = cpufreq_frequency_get_table(policy->cpu); if (freq_table) { ret = cpufreq_frequency_table_cpuinfo(policy, diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 25a70d06c5bf..c86a10c30912 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -148,6 +148,10 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, return 0; } +static struct notifier_block cs_cpufreq_notifier_block = { + .notifier_call = dbs_cpufreq_notifier, +}; + /************************** sysfs interface ************************/ static struct common_dbs_data cs_dbs_cdata; @@ -317,7 +321,7 @@ static struct attribute_group cs_attr_group_gov_pol = { /************************** sysfs end ************************/ -static int cs_init(struct dbs_data *dbs_data) +static int cs_init(struct dbs_data *dbs_data, bool notify) { struct cs_dbs_tuners *tuners; @@ -336,25 +340,25 @@ static int cs_init(struct dbs_data *dbs_data) dbs_data->tuners = tuners; dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); - mutex_init(&dbs_data->mutex); + + if (notify) + cpufreq_register_notifier(&cs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + return 0; } -static void cs_exit(struct dbs_data *dbs_data) +static void cs_exit(struct dbs_data *dbs_data, bool notify) { + if (notify) + cpufreq_unregister_notifier(&cs_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + kfree(dbs_data->tuners); } define_get_cpu_dbs_routines(cs_cpu_dbs_info); -static struct notifier_block cs_cpufreq_notifier_block = { - .notifier_call = dbs_cpufreq_notifier, -}; - -static struct cs_ops cs_ops = { - .notifier_block = &cs_cpufreq_notifier_block, -}; - static struct common_dbs_data cs_dbs_cdata = { .governor = GOV_CONSERVATIVE, .attr_group_gov_sys = &cs_attr_group_gov_sys, @@ -363,9 +367,9 @@ static struct common_dbs_data cs_dbs_cdata = { .get_cpu_dbs_info_s = get_cpu_dbs_info_s, .gov_dbs_timer = cs_dbs_timer, .gov_check_cpu = cs_check_cpu, - .gov_ops = &cs_ops, .init = cs_init, .exit = cs_exit, + .mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex), }; static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 1b44496b2d2b..57a39f8a92b7 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -239,211 +239,242 @@ static void set_sampling_rate(struct dbs_data *dbs_data, } } -int cpufreq_governor_dbs(struct cpufreq_policy *policy, - struct common_dbs_data *cdata, unsigned int event) +static int cpufreq_governor_init(struct cpufreq_policy *policy, + struct dbs_data *dbs_data, + struct common_dbs_data *cdata) { - struct dbs_data *dbs_data; - struct od_cpu_dbs_info_s *od_dbs_info = NULL; - struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; - struct od_ops *od_ops = NULL; - struct od_dbs_tuners *od_tuners = NULL; - struct cs_dbs_tuners *cs_tuners = NULL; - struct cpu_dbs_common_info *cpu_cdbs; - unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; - int io_busy = 0; - int rc; - - if (have_governor_per_policy()) - dbs_data = policy->governor_data; - else - dbs_data = cdata->gdbs_data; - - WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)); - - switch (event) { - case CPUFREQ_GOV_POLICY_INIT: - if (have_governor_per_policy()) { - WARN_ON(dbs_data); - } else if (dbs_data) { - dbs_data->usage_count++; - policy->governor_data = dbs_data; - return 0; - } - - dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); - if (!dbs_data) { - pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__); - return -ENOMEM; - } + unsigned int latency; + int ret; - dbs_data->cdata = cdata; - dbs_data->usage_count = 1; - rc = cdata->init(dbs_data); - if (rc) { - pr_err("%s: POLICY_INIT: init() failed\n", __func__); - kfree(dbs_data); - return rc; - } + if (dbs_data) { + if (WARN_ON(have_governor_per_policy())) + return -EINVAL; + dbs_data->usage_count++; + policy->governor_data = dbs_data; + return 0; + } - if (!have_governor_per_policy()) - WARN_ON(cpufreq_get_global_kobject()); + dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); + if (!dbs_data) + return -ENOMEM; - rc = sysfs_create_group(get_governor_parent_kobj(policy), - get_sysfs_attr(dbs_data)); - if (rc) { - cdata->exit(dbs_data); - kfree(dbs_data); - return rc; - } + dbs_data->cdata = cdata; + dbs_data->usage_count = 1; - policy->governor_data = dbs_data; + ret = cdata->init(dbs_data, !policy->governor->initialized); + if (ret) + goto free_dbs_data; - /* policy latency is in ns. Convert it to us first */ - latency = policy->cpuinfo.transition_latency / 1000; - if (latency == 0) - latency = 1; + /* policy latency is in ns. Convert it to us first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; - /* Bring kernel and HW constraints together */ - dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, - MIN_LATENCY_MULTIPLIER * latency); - set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, + /* Bring kernel and HW constraints together */ + dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, latency * LATENCY_MULTIPLIER)); - if ((cdata->governor == GOV_CONSERVATIVE) && - (!policy->governor->initialized)) { - struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; - - cpufreq_register_notifier(cs_ops->notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); + if (!have_governor_per_policy()) { + if (WARN_ON(cpufreq_get_global_kobject())) { + ret = -EINVAL; + goto cdata_exit; } + cdata->gdbs_data = dbs_data; + } - if (!have_governor_per_policy()) - cdata->gdbs_data = dbs_data; + ret = sysfs_create_group(get_governor_parent_kobj(policy), + get_sysfs_attr(dbs_data)); + if (ret) + goto put_kobj; - return 0; - case CPUFREQ_GOV_POLICY_EXIT: - if (!--dbs_data->usage_count) { - sysfs_remove_group(get_governor_parent_kobj(policy), - get_sysfs_attr(dbs_data)); + policy->governor_data = dbs_data; - if (!have_governor_per_policy()) - cpufreq_put_global_kobject(); + return 0; - if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && - (policy->governor->initialized == 1)) { - struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; +put_kobj: + if (!have_governor_per_policy()) { + cdata->gdbs_data = NULL; + cpufreq_put_global_kobject(); + } +cdata_exit: + cdata->exit(dbs_data, !policy->governor->initialized); +free_dbs_data: + kfree(dbs_data); + return ret; +} + +static void cpufreq_governor_exit(struct cpufreq_policy *policy, + struct dbs_data *dbs_data) +{ + struct common_dbs_data *cdata = dbs_data->cdata; - cpufreq_unregister_notifier(cs_ops->notifier_block, - CPUFREQ_TRANSITION_NOTIFIER); - } + policy->governor_data = NULL; + if (!--dbs_data->usage_count) { + sysfs_remove_group(get_governor_parent_kobj(policy), + get_sysfs_attr(dbs_data)); - cdata->exit(dbs_data); - kfree(dbs_data); + if (!have_governor_per_policy()) { cdata->gdbs_data = NULL; + cpufreq_put_global_kobject(); } - policy->governor_data = NULL; - return 0; + cdata->exit(dbs_data, policy->governor->initialized == 1); + kfree(dbs_data); } +} - cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); +static int cpufreq_governor_start(struct cpufreq_policy *policy, + struct dbs_data *dbs_data) +{ + struct common_dbs_data *cdata = dbs_data->cdata; + unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu; + struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); + int io_busy = 0; + + if (!policy->cur) + return -EINVAL; + + if (cdata->governor == GOV_CONSERVATIVE) { + struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; - if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { - cs_tuners = dbs_data->tuners; - cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); sampling_rate = cs_tuners->sampling_rate; ignore_nice = cs_tuners->ignore_nice_load; } else { - od_tuners = dbs_data->tuners; - od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); + struct od_dbs_tuners *od_tuners = dbs_data->tuners; + sampling_rate = od_tuners->sampling_rate; ignore_nice = od_tuners->ignore_nice_load; - od_ops = dbs_data->cdata->gov_ops; io_busy = od_tuners->io_is_busy; } - switch (event) { - case CPUFREQ_GOV_START: - if (!policy->cur) - return -EINVAL; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j); + unsigned int prev_load; - mutex_lock(&dbs_data->mutex); + j_cdbs->cpu = j; + j_cdbs->cur_policy = policy; + j_cdbs->prev_cpu_idle = + get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy); - for_each_cpu(j, policy->cpus) { - struct cpu_dbs_common_info *j_cdbs = - dbs_data->cdata->get_cpu_cdbs(j); - unsigned int prev_load; + prev_load = (unsigned int)(j_cdbs->prev_cpu_wall - + j_cdbs->prev_cpu_idle); + j_cdbs->prev_load = 100 * prev_load / + (unsigned int)j_cdbs->prev_cpu_wall; - j_cdbs->cpu = j; - j_cdbs->cur_policy = policy; - j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, - &j_cdbs->prev_cpu_wall, io_busy); + if (ignore_nice) + j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; - prev_load = (unsigned int) - (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); - j_cdbs->prev_load = 100 * prev_load / - (unsigned int) j_cdbs->prev_cpu_wall; + mutex_init(&j_cdbs->timer_mutex); + INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer); + } - if (ignore_nice) - j_cdbs->prev_cpu_nice = - kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + if (cdata->governor == GOV_CONSERVATIVE) { + struct cs_cpu_dbs_info_s *cs_dbs_info = + cdata->get_cpu_dbs_info_s(cpu); - mutex_init(&j_cdbs->timer_mutex); - INIT_DEFERRABLE_WORK(&j_cdbs->work, - dbs_data->cdata->gov_dbs_timer); - } + cs_dbs_info->down_skip = 0; + cs_dbs_info->enable = 1; + cs_dbs_info->requested_freq = policy->cur; + } else { + struct od_ops *od_ops = cdata->gov_ops; + struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu); - if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { - cs_dbs_info->down_skip = 0; - cs_dbs_info->enable = 1; - cs_dbs_info->requested_freq = policy->cur; - } else { - od_dbs_info->rate_mult = 1; - od_dbs_info->sample_type = OD_NORMAL_SAMPLE; - od_ops->powersave_bias_init_cpu(cpu); - } + od_dbs_info->rate_mult = 1; + od_dbs_info->sample_type = OD_NORMAL_SAMPLE; + od_ops->powersave_bias_init_cpu(cpu); + } - mutex_unlock(&dbs_data->mutex); + /* Initiate timer time stamp */ + cpu_cdbs->time_stamp = ktime_get(); - /* Initiate timer time stamp */ - cpu_cdbs->time_stamp = ktime_get(); + gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate), + true); + return 0; +} - gov_queue_work(dbs_data, policy, - delay_for_sampling_rate(sampling_rate), true); - break; +static void cpufreq_governor_stop(struct cpufreq_policy *policy, + struct dbs_data *dbs_data) +{ + struct common_dbs_data *cdata = dbs_data->cdata; + unsigned int cpu = policy->cpu; + struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); - case CPUFREQ_GOV_STOP: - if (dbs_data->cdata->governor == GOV_CONSERVATIVE) - cs_dbs_info->enable = 0; + if (cdata->governor == GOV_CONSERVATIVE) { + struct cs_cpu_dbs_info_s *cs_dbs_info = + cdata->get_cpu_dbs_info_s(cpu); - gov_cancel_work(dbs_data, policy); + cs_dbs_info->enable = 0; + } - mutex_lock(&dbs_data->mutex); - mutex_destroy(&cpu_cdbs->timer_mutex); - cpu_cdbs->cur_policy = NULL; + gov_cancel_work(dbs_data, policy); - mutex_unlock(&dbs_data->mutex); + mutex_destroy(&cpu_cdbs->timer_mutex); + cpu_cdbs->cur_policy = NULL; +} - break; +static void cpufreq_governor_limits(struct cpufreq_policy *policy, + struct dbs_data *dbs_data) +{ + struct common_dbs_data *cdata = dbs_data->cdata; + unsigned int cpu = policy->cpu; + struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu); + + if (!cpu_cdbs->cur_policy) + return; + + mutex_lock(&cpu_cdbs->timer_mutex); + if (policy->max < cpu_cdbs->cur_policy->cur) + __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max, + CPUFREQ_RELATION_H); + else if (policy->min > cpu_cdbs->cur_policy->cur) + __cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min, + CPUFREQ_RELATION_L); + dbs_check_cpu(dbs_data, cpu); + mutex_unlock(&cpu_cdbs->timer_mutex); +} + +int cpufreq_governor_dbs(struct cpufreq_policy *policy, + struct common_dbs_data *cdata, unsigned int event) +{ + struct dbs_data *dbs_data; + int ret = 0; + /* Lock governor to block concurrent initialization of governor */ + mutex_lock(&cdata->mutex); + + if (have_governor_per_policy()) + dbs_data = policy->governor_data; + else + dbs_data = cdata->gdbs_data; + + if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) { + ret = -EINVAL; + goto unlock; + } + + switch (event) { + case CPUFREQ_GOV_POLICY_INIT: + ret = cpufreq_governor_init(policy, dbs_data, cdata); + break; + case CPUFREQ_GOV_POLICY_EXIT: + cpufreq_governor_exit(policy, dbs_data); + break; + case CPUFREQ_GOV_START: + ret = cpufreq_governor_start(policy, dbs_data); + break; + case CPUFREQ_GOV_STOP: + cpufreq_governor_stop(policy, dbs_data); + break; case CPUFREQ_GOV_LIMITS: - mutex_lock(&dbs_data->mutex); - if (!cpu_cdbs->cur_policy) { - mutex_unlock(&dbs_data->mutex); - break; - } - mutex_lock(&cpu_cdbs->timer_mutex); - if (policy->max < cpu_cdbs->cur_policy->cur) - __cpufreq_driver_target(cpu_cdbs->cur_policy, - policy->max, CPUFREQ_RELATION_H); - else if (policy->min > cpu_cdbs->cur_policy->cur) - __cpufreq_driver_target(cpu_cdbs->cur_policy, - policy->min, CPUFREQ_RELATION_L); - dbs_check_cpu(dbs_data, cpu); - mutex_unlock(&cpu_cdbs->timer_mutex); - mutex_unlock(&dbs_data->mutex); + cpufreq_governor_limits(policy, dbs_data); break; } - return 0; + +unlock: + mutex_unlock(&cdata->mutex); + + return ret; } EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index cc401d147e72..34736f5e869d 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -208,11 +208,16 @@ struct common_dbs_data { void *(*get_cpu_dbs_info_s)(int cpu); void (*gov_dbs_timer)(struct work_struct *work); void (*gov_check_cpu)(int cpu, unsigned int load); - int (*init)(struct dbs_data *dbs_data); - void (*exit)(struct dbs_data *dbs_data); + int (*init)(struct dbs_data *dbs_data, bool notify); + void (*exit)(struct dbs_data *dbs_data, bool notify); /* Governor specific ops, see below */ void *gov_ops; + + /* + * Protects governor's data (struct dbs_data and struct common_dbs_data) + */ + struct mutex mutex; }; /* Governor Per policy data */ @@ -221,9 +226,6 @@ struct dbs_data { unsigned int min_sampling_rate; int usage_count; void *tuners; - - /* dbs_mutex protects dbs_enable in governor start/stop */ - struct mutex mutex; }; /* Governor specific ops, will be passed to dbs_data->gov_ops */ @@ -234,10 +236,6 @@ struct od_ops { void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq); }; -struct cs_ops { - struct notifier_block *notifier_block; -}; - static inline int delay_for_sampling_rate(unsigned int sampling_rate) { int delay = usecs_to_jiffies(sampling_rate); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index ad3f38fd3eb9..3c1e10f2304c 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -475,7 +475,7 @@ static struct attribute_group od_attr_group_gov_pol = { /************************** sysfs end ************************/ -static int od_init(struct dbs_data *dbs_data) +static int od_init(struct dbs_data *dbs_data, bool notify) { struct od_dbs_tuners *tuners; u64 idle_time; @@ -513,11 +513,10 @@ static int od_init(struct dbs_data *dbs_data) tuners->io_is_busy = should_io_be_busy(); dbs_data->tuners = tuners; - mutex_init(&dbs_data->mutex); return 0; } -static void od_exit(struct dbs_data *dbs_data) +static void od_exit(struct dbs_data *dbs_data, bool notify) { kfree(dbs_data->tuners); } @@ -541,6 +540,7 @@ static struct common_dbs_data od_dbs_cdata = { .gov_ops = &od_ops, .init = od_init, .exit = od_exit, + .mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex), }; static void od_set_powersave_bias(unsigned int powersave_bias) diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 1d723dc8880c..3488c9c175eb 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c @@ -144,7 +144,7 @@ module_param(max_duration, int, 0444); /** - * we can detect a core multipiler from dir0_lsb + * we can detect a core multiplier from dir0_lsb * from GX1 datasheet p.56, * MULT[3:0]: * 0000 = SYSCLK multiplied by 4 (test only) @@ -346,7 +346,7 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy) /* it needs to be assured that at least one supported frequency is * within policy->min and policy->max. If it is not, policy->max - * needs to be increased until one freuqency is supported. + * needs to be increased until one frequency is supported. * policy->min may not be decreased, though. This way we guarantee a * specific processing capacity. */ diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6414661ac1c4..15ada47bb720 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -26,6 +26,7 @@ #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/acpi.h> +#include <linux/vmalloc.h> #include <trace/events/power.h> #include <asm/div64.h> @@ -48,9 +49,9 @@ static inline int32_t mul_fp(int32_t x, int32_t y) return ((int64_t)x * (int64_t)y) >> FRAC_BITS; } -static inline int32_t div_fp(int32_t x, int32_t y) +static inline int32_t div_fp(s64 x, s64 y) { - return div_s64((int64_t)x << FRAC_BITS, y); + return div64_s64((int64_t)x << FRAC_BITS, y); } static inline int ceiling_fp(int32_t x) @@ -68,6 +69,7 @@ struct sample { int32_t core_pct_busy; u64 aperf; u64 mperf; + u64 tsc; int freq; ktime_t time; }; @@ -109,6 +111,7 @@ struct cpudata { ktime_t last_sample_time; u64 prev_aperf; u64 prev_mperf; + u64 prev_tsc; struct sample sample; }; @@ -396,7 +399,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, update_turbo_state(); if (limits.turbo_disabled) { - pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n"); return -EPERM; } @@ -484,7 +487,7 @@ static void __init intel_pstate_sysfs_expose_params(void) static void intel_pstate_hwp_enable(void) { hwp_active++; - pr_info("intel_pstate HWP enabled\n"); + pr_info("intel_pstate: HWP enabled\n"); wrmsrl( MSR_PM_ENABLE, 0x1); } @@ -535,7 +538,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) val |= vid; - wrmsrl(MSR_IA32_PERF_CTL, val); + wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); } #define BYT_BCLK_FREQS 5 @@ -704,19 +707,20 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } -static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) +static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) { int max_perf, min_perf; - update_turbo_state(); - - intel_pstate_get_min_max(cpu, &min_perf, &max_perf); + if (force) { + update_turbo_state(); - pstate = clamp_t(int, pstate, min_perf, max_perf); + intel_pstate_get_min_max(cpu, &min_perf, &max_perf); - if (pstate == cpu->pstate.current_pstate) - return; + pstate = clamp_t(int, pstate, min_perf, max_perf); + if (pstate == cpu->pstate.current_pstate) + return; + } trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); cpu->pstate.current_pstate = pstate; @@ -733,7 +737,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (pstate_funcs.get_vid) pstate_funcs.get_vid(cpu); - intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); } static inline void intel_pstate_calc_busy(struct cpudata *cpu) @@ -756,23 +760,28 @@ static inline void intel_pstate_sample(struct cpudata *cpu) { u64 aperf, mperf; unsigned long flags; + u64 tsc; local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); + tsc = native_read_tsc(); local_irq_restore(flags); cpu->last_sample_time = cpu->sample.time; cpu->sample.time = ktime_get(); cpu->sample.aperf = aperf; cpu->sample.mperf = mperf; + cpu->sample.tsc = tsc; cpu->sample.aperf -= cpu->prev_aperf; cpu->sample.mperf -= cpu->prev_mperf; + cpu->sample.tsc -= cpu->prev_tsc; intel_pstate_calc_busy(cpu); cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; + cpu->prev_tsc = tsc; } static inline void intel_hwp_set_sample_time(struct cpudata *cpu) @@ -794,7 +803,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) { int32_t core_busy, max_pstate, current_pstate, sample_ratio; - u32 duration_us; + s64 duration_us; u32 sample_time; /* @@ -821,8 +830,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) * to adjust our busyness. */ sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; - duration_us = (u32) ktime_us_delta(cpu->sample.time, - cpu->last_sample_time); + duration_us = ktime_us_delta(cpu->sample.time, + cpu->last_sample_time); if (duration_us > sample_time * 3) { sample_ratio = div_fp(int_tofp(sample_time), int_tofp(duration_us)); @@ -837,6 +846,10 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) int32_t busy_scaled; struct _pid *pid; signed int ctl; + int from; + struct sample *sample; + + from = cpu->pstate.current_pstate; pid = &cpu->pid; busy_scaled = intel_pstate_get_scaled_busy(cpu); @@ -844,7 +857,17 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) ctl = pid_calc(pid, busy_scaled); /* Negative values of ctl increase the pstate and vice versa */ - intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); + intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true); + + sample = &cpu->sample; + trace_pstate_sample(fp_toint(sample->core_pct_busy), + fp_toint(busy_scaled), + from, + cpu->pstate.current_pstate, + sample->mperf, + sample->aperf, + sample->tsc, + sample->freq); } static void intel_hwp_timer_func(unsigned long __data) @@ -858,21 +881,11 @@ static void intel_hwp_timer_func(unsigned long __data) static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; - struct sample *sample; intel_pstate_sample(cpu); - sample = &cpu->sample; - intel_pstate_adjust_busy_pstate(cpu); - trace_pstate_sample(fp_toint(sample->core_pct_busy), - fp_toint(intel_pstate_get_scaled_busy(cpu)), - cpu->pstate.current_pstate, - sample->mperf, - sample->aperf, - sample->freq); - intel_pstate_set_sample_time(cpu); } @@ -935,7 +948,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) add_timer_on(&cpu->timer, cpunum); - pr_debug("Intel pstate controlling: cpu %d\n", cpunum); + pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); return 0; } @@ -1001,13 +1014,13 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) int cpu_num = policy->cpu; struct cpudata *cpu = all_cpu_data[cpu_num]; - pr_info("intel_pstate CPU %d exiting\n", cpu_num); + pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); del_timer_sync(&all_cpu_data[cpu_num]->timer); if (hwp_active) return; - intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false); } static int intel_pstate_cpu_init(struct cpufreq_policy *policy) diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index 529cfd92158f..5dd95dab580d 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -172,7 +172,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) unsigned int i; #ifdef CONFIG_SMP - cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); + cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); #endif /* Errata workaround */ diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index f9ce7e4bf0fe..5c035d04d827 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -57,13 +57,6 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); static struct cpufreq_driver cpufreq_amd64_driver; -#ifndef CONFIG_SMP -static inline const struct cpumask *cpu_core_mask(int cpu) -{ - return cpumask_of(0); -} -#endif - /* Return a frequency in MHz, given an input fid */ static u32 find_freq_from_fid(u32 fid) { @@ -620,7 +613,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); data->powernow_table = powernow_table; - if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) + if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu) print_basics(data); for (j = 0; j < data->numps; j++) @@ -784,7 +777,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) CPUFREQ_TABLE_END; data->powernow_table = powernow_table; - if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) + if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu) print_basics(data); /* notify BIOS that we exist */ @@ -1090,7 +1083,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) if (rc != 0) goto err_out_exit_acpi; - cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); + cpumask_copy(pol->cpus, topology_core_cpumask(pol->cpu)); data->available_cores = pol->cpus; /* min/max the cpu is capable of */ diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c index e24269ab4e9b..1d99c97defa9 100644 --- a/drivers/cpufreq/pxa2xx-cpufreq.c +++ b/drivers/cpufreq/pxa2xx-cpufreq.c @@ -56,7 +56,7 @@ module_param(pxa27x_maxfreq, uint, 0); MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz" "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)"); -typedef struct { +struct pxa_freqs { unsigned int khz; unsigned int membus; unsigned int cccr; @@ -64,7 +64,7 @@ typedef struct { unsigned int cclkcfg; int vmin; int vmax; -} pxa_freqs_t; +}; /* Define the refresh period in mSec for the SDRAM and the number of rows */ #define SDRAM_TREF 64 /* standard 64ms SDRAM */ @@ -86,7 +86,7 @@ static unsigned int sdram_rows; /* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */ #define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS -static pxa_freqs_t pxa255_run_freqs[] = +static const struct pxa_freqs pxa255_run_freqs[] = { /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ @@ -98,7 +98,7 @@ static pxa_freqs_t pxa255_run_freqs[] = }; /* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ -static pxa_freqs_t pxa255_turbo_freqs[] = +static const struct pxa_freqs pxa255_turbo_freqs[] = { /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ @@ -153,7 +153,7 @@ MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table ((HT) ? CCLKCFG_HALFTURBO : 0) | \ ((T) ? CCLKCFG_TURBO : 0)) -static pxa_freqs_t pxa27x_freqs[] = { +static struct pxa_freqs pxa27x_freqs[] = { {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 }, {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 }, {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 }, @@ -171,7 +171,7 @@ extern unsigned get_clk_frequency_khz(int info); #ifdef CONFIG_REGULATOR -static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) +static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) { int ret = 0; int vmin, vmax; @@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void) } } #else -static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) +static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq) { return 0; } @@ -211,7 +211,7 @@ static void __init pxa_cpufreq_init_voltages(void) { } #endif static void find_freq_tables(struct cpufreq_frequency_table **freq_table, - pxa_freqs_t **pxa_freqs) + const struct pxa_freqs **pxa_freqs) { if (cpu_is_pxa25x()) { if (!pxa255_turbo_table) { @@ -270,7 +270,7 @@ static unsigned int pxa_cpufreq_get(unsigned int cpu) static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx) { struct cpufreq_frequency_table *pxa_freqs_table; - pxa_freqs_t *pxa_freq_settings; + const struct pxa_freqs *pxa_freq_settings; unsigned long flags; unsigned int new_freq_cpu, new_freq_mem; unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; @@ -361,7 +361,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy) int i; unsigned int freq; struct cpufreq_frequency_table *pxa255_freq_table; - pxa_freqs_t *pxa255_freqs; + const struct pxa_freqs *pxa255_freqs; /* try to guess pxa27x cpu */ if (cpu_is_pxa27x()) diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 88b21ae0d6b0..358f0752c31e 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -27,11 +27,11 @@ /** * struct cpu_data - * @parent: the parent node of cpu clock + * @pclk: the parent clock of cpu * @table: frequency table */ struct cpu_data { - struct device_node *parent; + struct clk **pclk; struct cpufreq_frequency_table *table; }; @@ -196,7 +196,7 @@ static void freq_table_sort(struct cpufreq_frequency_table *freq_table, static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy) { - struct device_node *np; + struct device_node *np, *pnode; int i, count, ret; u32 freq, mask; struct clk *clk; @@ -219,17 +219,23 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy) goto err_nomem2; } - data->parent = of_parse_phandle(np, "clocks", 0); - if (!data->parent) { + pnode = of_parse_phandle(np, "clocks", 0); + if (!pnode) { pr_err("%s: could not get clock information\n", __func__); goto err_nomem2; } - count = of_property_count_strings(data->parent, "clock-names"); + count = of_property_count_strings(pnode, "clock-names"); + data->pclk = kcalloc(count, sizeof(struct clk *), GFP_KERNEL); + if (!data->pclk) { + pr_err("%s: no memory\n", __func__); + goto err_node; + } + table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL); if (!table) { pr_err("%s: no memory\n", __func__); - goto err_node; + goto err_pclk; } if (fmask) @@ -238,7 +244,8 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy) mask = 0x0; for (i = 0; i < count; i++) { - clk = of_clk_get(data->parent, i); + clk = of_clk_get(pnode, i); + data->pclk[i] = clk; freq = clk_get_rate(clk); /* * the clock is valid if its frequency is not masked @@ -273,13 +280,16 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = u64temp + 1; of_node_put(np); + of_node_put(pnode); return 0; err_nomem1: kfree(table); +err_pclk: + kfree(data->pclk); err_node: - of_node_put(data->parent); + of_node_put(pnode); err_nomem2: policy->driver_data = NULL; kfree(data); @@ -293,7 +303,7 @@ static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct cpu_data *data = policy->driver_data; - of_node_put(data->parent); + kfree(data->pclk); kfree(data->table); kfree(data); policy->driver_data = NULL; @@ -307,7 +317,7 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy, struct clk *parent; struct cpu_data *data = policy->driver_data; - parent = of_clk_get(data->parent, data->table[index].driver_data); + parent = data->pclk[data->table[index].driver_data]; return clk_set_parent(policy->clk, parent); } diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index e56d632a8b21..37555c6b86a7 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) /* only run on CPU to be set, or on its sibling */ #ifdef CONFIG_SMP - cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); + cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu)); #endif policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 59372077ec7c..1e3ef5ec4784 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -29,18 +29,25 @@ struct cpuidle_driver powernv_idle_driver = { static int max_idle_state; static struct cpuidle_state *cpuidle_state_table; +static u64 snooze_timeout; +static bool snooze_timeout_en; static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { + u64 snooze_exit_time; + local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); + snooze_exit_time = get_tb() + snooze_timeout; ppc64_runlatch_off(); while (!need_resched()) { HMT_low(); HMT_very_low(); + if (snooze_timeout_en && get_tb() > snooze_exit_time) + break; } HMT_medium(); @@ -252,6 +259,11 @@ static int powernv_idle_probe(void) cpuidle_state_table = powernv_states; /* Device tree can indicate more idle states */ max_idle_state = powernv_add_idle_states(); + if (max_idle_state > 1) { + snooze_timeout_en = true; + snooze_timeout = powernv_states[1].target_residency * + tb_ticks_per_usec; + } } else return -ENODEV; diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index bb9e2b6f3ecc..07135e009d8b 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -27,6 +27,8 @@ struct cpuidle_driver pseries_idle_driver = { static int max_idle_state; static struct cpuidle_state *cpuidle_state_table; +static u64 snooze_timeout; +static bool snooze_timeout_en; static inline void idle_loop_prolog(unsigned long *in_purr) { @@ -58,14 +60,18 @@ static int snooze_loop(struct cpuidle_device *dev, int index) { unsigned long in_purr; + u64 snooze_exit_time; idle_loop_prolog(&in_purr); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); + snooze_exit_time = get_tb() + snooze_timeout; while (!need_resched()) { HMT_low(); HMT_very_low(); + if (snooze_timeout_en && get_tb() > snooze_exit_time) + break; } HMT_medium(); @@ -244,6 +250,11 @@ static int pseries_idle_probe(void) } else return -ENODEV; + if (max_idle_state > 1) { + snooze_timeout_en = true; + snooze_timeout = cpuidle_state_table[1].target_residency * + tb_ticks_per_usec; + } return 0; } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 61c417b9e53f..e8e2775c3821 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -65,7 +65,7 @@ int cpuidle_play_dead(void) return -ENODEV; /* Find lowest-power state that supports long-term idle */ - for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) + for (i = drv->state_count - 1; i >= 0; i--) if (drv->states[i].enter_dead) return drv->states[i].enter_dead(dev, i); @@ -73,16 +73,21 @@ int cpuidle_play_dead(void) } static int find_deepest_state(struct cpuidle_driver *drv, - struct cpuidle_device *dev, bool freeze) + struct cpuidle_device *dev, + unsigned int max_latency, + unsigned int forbidden_flags, + bool freeze) { unsigned int latency_req = 0; - int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; + int i, ret = -ENXIO; - for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { + for (i = 0; i < drv->state_count; i++) { struct cpuidle_state *s = &drv->states[i]; struct cpuidle_state_usage *su = &dev->states_usage[i]; if (s->disabled || su->disable || s->exit_latency <= latency_req + || s->exit_latency > max_latency + || (s->flags & forbidden_flags) || (freeze && !s->enter_freeze)) continue; @@ -92,6 +97,7 @@ static int find_deepest_state(struct cpuidle_driver *drv, return ret; } +#ifdef CONFIG_SUSPEND /** * cpuidle_find_deepest_state - Find the deepest available idle state. * @drv: cpuidle driver for the given CPU. @@ -100,7 +106,7 @@ static int find_deepest_state(struct cpuidle_driver *drv, int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev) { - return find_deepest_state(drv, dev, false); + return find_deepest_state(drv, dev, UINT_MAX, 0, false); } static void enter_freeze_proper(struct cpuidle_driver *drv, @@ -139,18 +145,19 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) * that interrupts won't be enabled when it exits and allows the tick to * be frozen safely. */ - index = find_deepest_state(drv, dev, true); + index = find_deepest_state(drv, dev, UINT_MAX, 0, true); if (index >= 0) enter_freeze_proper(drv, dev, index); return index; } +#endif /* CONFIG_SUSPEND */ /** * cpuidle_enter_state - enter the state and update stats * @dev: cpuidle device for this cpu * @drv: cpuidle driver for this cpu - * @next_state: index into drv->states of the state to enter + * @index: index into the states table in @drv of the state to enter */ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -167,8 +174,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, * local timer will be shut down. If a local timer is used from another * CPU as a broadcast timer, this call may fail if it is not available. */ - if (broadcast && tick_broadcast_enter()) - return -EBUSY; + if (broadcast && tick_broadcast_enter()) { + index = find_deepest_state(drv, dev, target_state->exit_latency, + CPUIDLE_FLAG_TIMER_STOP, false); + if (index < 0) { + default_idle_call(); + return -EBUSY; + } + target_state = &drv->states[index]; + } + + /* Take note of the planned idle state. */ + sched_idle_set_state(target_state); trace_cpu_idle_rcuidle(index, dev->cpu); time_start = ktime_get(); @@ -178,6 +195,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, time_end = ktime_get(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + /* The cpu is no longer idle or about to enter idle. */ + sched_idle_set_state(NULL); + if (broadcast) { if (WARN_ON_ONCE(!irqs_disabled())) local_irq_disable(); @@ -249,7 +269,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ void cpuidle_reflect(struct cpuidle_device *dev, int index) { - if (cpuidle_curr_governor->reflect) + if (cpuidle_curr_governor->reflect && index >= 0) cpuidle_curr_governor->reflect(dev, index); } diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index b8a5fa15ca24..22e4463d1787 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -367,9 +367,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) static void menu_reflect(struct cpuidle_device *dev, int index) { struct menu_device *data = this_cpu_ptr(&menu_devices); + data->last_state_idx = index; - if (index >= 0) - data->needs_update = 1; + data->needs_update = 1; } /** diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 033c0c86f6ec..4044125fb5d5 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -162,10 +162,10 @@ config CRYPTO_GHASH_S390 config CRYPTO_DEV_MV_CESA tristate "Marvell's Cryptographic Engine" depends on PLAT_ORION - select CRYPTO_ALGAPI select CRYPTO_AES - select CRYPTO_BLKCIPHER2 + select CRYPTO_BLKCIPHER select CRYPTO_HASH + select SRAM help This driver allows you to utilize the Cryptographic Engines and Security Accelerator (CESA) which can be found on the Marvell Orion @@ -173,10 +173,27 @@ config CRYPTO_DEV_MV_CESA Currently the driver supports AES in ECB and CBC mode without DMA. +config CRYPTO_DEV_MARVELL_CESA + tristate "New Marvell's Cryptographic Engine driver" + depends on PLAT_ORION || ARCH_MVEBU + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_BLKCIPHER + select CRYPTO_HASH + select SRAM + help + This driver allows you to utilize the Cryptographic Engines and + Security Accelerator (CESA) which can be found on the Armada 370. + This driver supports CPU offload through DMA transfers. + + This driver is aimed at replacing the mv_cesa driver. This will only + happen once it has received proper testing. + config CRYPTO_DEV_NIAGARA2 tristate "Niagara2 Stream Processing Unit driver" select CRYPTO_DES - select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER + select CRYPTO_HASH depends on SPARC64 help Each core of a Niagara2 processor contains a Stream @@ -189,7 +206,6 @@ config CRYPTO_DEV_NIAGARA2 config CRYPTO_DEV_HIFN_795X tristate "Driver HIFN 795x crypto accelerator chips" select CRYPTO_DES - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG depends on PCI @@ -208,8 +224,10 @@ source drivers/crypto/caam/Kconfig config CRYPTO_DEV_TALITOS tristate "Talitos Freescale Security Engine (SEC)" - select CRYPTO_ALGAPI + select CRYPTO_AEAD select CRYPTO_AUTHENC + select CRYPTO_BLKCIPHER + select CRYPTO_HASH select HW_RANDOM depends on FSL_SOC help @@ -222,11 +240,29 @@ config CRYPTO_DEV_TALITOS To compile this driver as a module, choose M here: the module will be called talitos. +config CRYPTO_DEV_TALITOS1 + bool "SEC1 (SEC 1.0 and SEC Lite 1.2)" + depends on CRYPTO_DEV_TALITOS + depends on PPC_8xx || PPC_82xx + default y + help + Say 'Y' here to use the Freescale Security Engine (SEC) version 1.0 + found on MPC82xx or the Freescale Security Engine (SEC Lite) + version 1.2 found on MPC8xx + +config CRYPTO_DEV_TALITOS2 + bool "SEC2+ (SEC version 2.0 or upper)" + depends on CRYPTO_DEV_TALITOS + default y if !PPC_8xx + help + Say 'Y' here to use the Freescale Security Engine (SEC) + version 2 and following as found on MPC83xx, MPC85xx, etc ... + config CRYPTO_DEV_IXP4XX tristate "Driver for IXP4xx crypto hardware acceleration" depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE select CRYPTO_DES - select CRYPTO_ALGAPI + select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER help @@ -236,7 +272,6 @@ config CRYPTO_DEV_PPC4XX tristate "Driver AMCC PPC4xx crypto accelerator" depends on PPC && 4xx select CRYPTO_HASH - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER help This option allows you to have support for AMCC crypto acceleration. @@ -257,7 +292,7 @@ config CRYPTO_DEV_OMAP_AES tristate "Support for OMAP AES hw engine" depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS select CRYPTO_AES - select CRYPTO_BLKCIPHER2 + select CRYPTO_BLKCIPHER help OMAP processors have AES module accelerator. Select this if you want to use the OMAP module for AES algorithms. @@ -266,7 +301,7 @@ config CRYPTO_DEV_OMAP_DES tristate "Support for OMAP DES3DES hw engine" depends on ARCH_OMAP2PLUS select CRYPTO_DES - select CRYPTO_BLKCIPHER2 + select CRYPTO_BLKCIPHER help OMAP processors have DES/3DES module accelerator. Select this if you want to use the OMAP module for DES and 3DES algorithms. Currently @@ -276,9 +311,10 @@ config CRYPTO_DEV_OMAP_DES config CRYPTO_DEV_PICOXCELL tristate "Support for picoXcell IPSEC and Layer2 crypto engines" depends on ARCH_PICOXCELL && HAVE_CLK + select CRYPTO_AEAD select CRYPTO_AES select CRYPTO_AUTHENC - select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER select CRYPTO_DES select CRYPTO_CBC select CRYPTO_ECB @@ -304,7 +340,6 @@ config CRYPTO_DEV_S5P tristate "Support for Samsung S5PV210/Exynos crypto accelerator" depends on ARCH_S5PV210 || ARCH_EXYNOS select CRYPTO_AES - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER help This option allows you to have support for S5P crypto acceleration. @@ -312,11 +347,13 @@ config CRYPTO_DEV_S5P algorithms execution. config CRYPTO_DEV_NX - bool "Support for IBM Power7+ in-Nest cryptographic acceleration" - depends on PPC64 && IBMVIO && !CPU_LITTLE_ENDIAN - default n + bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration" + depends on PPC64 help - Support for Power7+ in-Nest cryptographic acceleration. + This enables support for the NX hardware cryptographic accelerator + coprocessor that is in IBM PowerPC P7+ or later processors. This + does not actually enable any drivers, it only allows you to select + which acceleration type (encryption and/or compression) to enable. if CRYPTO_DEV_NX source "drivers/crypto/nx/Kconfig" @@ -325,7 +362,6 @@ endif config CRYPTO_DEV_UX500 tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration" depends on ARCH_U8500 - select CRYPTO_ALGAPI help Driver for ST-Ericsson UX500 crypto engine. @@ -343,10 +379,7 @@ config CRYPTO_DEV_BFIN_CRC config CRYPTO_DEV_ATMEL_AES tristate "Support for Atmel AES hw accelerator" depends on ARCH_AT91 - select CRYPTO_CBC - select CRYPTO_ECB select CRYPTO_AES - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER select AT_HDMAC help @@ -361,9 +394,6 @@ config CRYPTO_DEV_ATMEL_TDES tristate "Support for Atmel DES/TDES hw accelerator" depends on ARCH_AT91 select CRYPTO_DES - select CRYPTO_CBC - select CRYPTO_ECB - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER help Some Atmel processors have DES/TDES hw accelerator. @@ -376,10 +406,7 @@ config CRYPTO_DEV_ATMEL_TDES config CRYPTO_DEV_ATMEL_SHA tristate "Support for Atmel SHA hw accelerator" depends on ARCH_AT91 - select CRYPTO_SHA1 - select CRYPTO_SHA256 - select CRYPTO_SHA512 - select CRYPTO_ALGAPI + select CRYPTO_HASH help Some Atmel processors have SHA1/SHA224/SHA256/SHA384/SHA512 hw accelerator. @@ -392,7 +419,6 @@ config CRYPTO_DEV_ATMEL_SHA config CRYPTO_DEV_CCP bool "Support for AMD Cryptographic Coprocessor" depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM - default n help The AMD Cryptographic Coprocessor provides hardware support for encryption, hashing and related operations. @@ -404,13 +430,11 @@ endif config CRYPTO_DEV_MXS_DCP tristate "Support for Freescale MXS DCP" depends on ARCH_MXS - select CRYPTO_SHA1 - select CRYPTO_SHA256 select CRYPTO_CBC select CRYPTO_ECB select CRYPTO_AES select CRYPTO_BLKCIPHER - select CRYPTO_ALGAPI + select CRYPTO_HASH help The Freescale i.MX23/i.MX28 has SHA1/SHA256 and AES128 CBC/ECB co-processor on the die. @@ -429,7 +453,6 @@ config CRYPTO_DEV_QCE select CRYPTO_CBC select CRYPTO_XTS select CRYPTO_CTR - select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER help This driver supports Qualcomm crypto engine accelerator @@ -439,7 +462,6 @@ config CRYPTO_DEV_QCE config CRYPTO_DEV_VMX bool "Support for VMX cryptographic acceleration instructions" depends on PPC64 - default n help Support for VMX cryptographic acceleration instructions. @@ -449,7 +471,6 @@ config CRYPTO_DEV_IMGTEC_HASH tristate "Imagination Technologies hardware hash accelerator" depends on MIPS || COMPILE_TEST depends on HAS_DMA - select CRYPTO_ALGAPI select CRYPTO_MD5 select CRYPTO_SHA1 select CRYPTO_SHA256 diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index fb84be7e6be5..e35c07a8da85 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o +obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o n2_crypto-y := n2_core.o n2_asm.o diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index e7555ff4cafd..e286e285aa8a 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -45,7 +45,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE config CRYPTO_DEV_FSL_CAAM_INTC bool "Job Ring interrupt coalescing" depends on CRYPTO_DEV_FSL_CAAM_JR - default n help Enable the Job Ring's interrupt coalescing feature. @@ -77,8 +76,9 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API tristate "Register algorithm implementations with the Crypto API" depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR default y - select CRYPTO_ALGAPI + select CRYPTO_AEAD select CRYPTO_AUTHENC + select CRYPTO_BLKCIPHER help Selecting this will offload crypto for users of the scatterlist crypto API (such as the linux native IPSec @@ -115,7 +115,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API config CRYPTO_DEV_FSL_CAAM_DEBUG bool "Enable debug output in CAAM driver" depends on CRYPTO_DEV_FSL_CAAM - default n help Selecting this will enable printing of various debug information in the CAAM driver. diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 29071a156cbe..daca933a82ec 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -65,6 +65,10 @@ /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ #define CAAM_MAX_IV_LENGTH 16 +#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) +#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ + CAAM_CMD_SZ * 4) + /* length of descriptors text */ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) @@ -79,18 +83,16 @@ #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) #define DESC_GCM_BASE (3 * CAAM_CMD_SZ) -#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ) -#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ) +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ) +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ) #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ) -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ) -#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ) -#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ) +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ) #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ) -#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ) -#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ) -#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ) +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ) +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ) #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \ @@ -98,8 +100,7 @@ #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \ 15 * CAAM_CMD_SZ) -#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \ - CAAM_MAX_KEY_SIZE) +#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) #ifdef DEBUG @@ -258,7 +259,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, static int aead_null_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; + unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; @@ -273,7 +274,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - /* aead_encrypt shared descriptor */ + /* old_aead_encrypt shared descriptor */ desc = ctx->sh_desc_enc; init_sh_desc(desc, HDR_SHARE_SERIAL); @@ -362,7 +363,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) desc = ctx->sh_desc_dec; - /* aead_decrypt shared descriptor */ + /* old_aead_decrypt shared descriptor */ init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip if already shared */ @@ -383,7 +384,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) /* assoclen + cryptlen = seqinlen - ivsize - authsize */ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize); + ctx->authsize + ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); @@ -449,7 +450,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead) static int aead_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; + unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct crypto_tfm *ctfm = crypto_aead_tfm(aead); const char *alg_name = crypto_tfm_alg_name(ctfm); @@ -496,7 +497,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - /* aead_encrypt shared descriptor */ + /* old_aead_encrypt shared descriptor */ desc = ctx->sh_desc_enc; /* Note: Context registers are saved. */ @@ -510,7 +511,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); @@ -518,7 +519,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* read assoc before reading payload */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | KEY_VLF); - aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); + aead_append_ld_iv(desc, ivsize, ctx1_iv_off); /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) @@ -565,7 +566,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - /* aead_decrypt shared descriptor */ + /* old_aead_decrypt shared descriptor */ desc = ctx->sh_desc_dec; /* Note: Context registers are saved. */ @@ -577,7 +578,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* assoclen + cryptlen = seqinlen - ivsize - authsize */ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize); + ctx->authsize + ivsize); /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); @@ -586,7 +587,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | KEY_VLF); - aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); + aead_append_ld_iv(desc, ivsize, ctx1_iv_off); /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) @@ -645,20 +646,20 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Generate IV */ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | - NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); /* Copy IV to class 1 context */ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | - (tfm->ivsize << MOVE_LEN_SHIFT)); + (ivsize << MOVE_LEN_SHIFT)); /* Return to encryption */ append_operation(desc, ctx->class2_alg_type | @@ -676,10 +677,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Copy iv from outfifo to class 2 fifo */ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | - NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); + NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT); append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); - append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | + append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); /* Load Counter into CONTEXT1 reg */ @@ -698,7 +699,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Not need to reload iv */ - append_seq_fifo_load(desc, tfm->ivsize, + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); /* Will read cryptlen */ @@ -738,7 +739,6 @@ static int aead_setauthsize(struct crypto_aead *authenc, static int gcm_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; @@ -754,7 +754,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ - if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN + + if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -777,34 +777,34 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + /* if assoclen + cryptlen is ZERO, skip to ICV write */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); - /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); + /* if assoclen is ZERO, skip reading the assoc data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); + + /* cryptlen = seqinlen - assoclen */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); /* if cryptlen is ZERO jump to zero-payload commands */ - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); - /* read IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); - - /* if assoclen is ZERO, skip reading the assoc data */ - append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); /* read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); set_jump_tgt_here(desc, zero_assoc_jump_cmd1); - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* write encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -814,31 +814,17 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); /* jump the zero-payload commands */ - append_jump(desc, JUMP_TEST_ALL | 7); + append_jump(desc, JUMP_TEST_ALL | 2); /* zero-payload commands */ set_jump_tgt_here(desc, zero_payload_jump_cmd); - /* if assoclen is ZERO, jump to IV reading - is the only input data */ - append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); - /* read IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); - /* read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); - /* jump to ICV writing */ - append_jump(desc, JUMP_TEST_ALL | 2); - - /* read IV - is the only input data */ + /* There is no input data */ set_jump_tgt_here(desc, zero_assoc_jump_cmd2); - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | - FIFOLD_TYPE_LAST1); /* write ICV */ append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | @@ -862,7 +848,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) * must all fit into the 64-word Descriptor h/w Buffer */ keys_fit_inline = false; - if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN + + if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -886,33 +872,30 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ - append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize); - - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); - append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ); + /* if assoclen is ZERO, skip reading the assoc data */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); - /* read IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* jump to zero-payload command if cryptlen is zero */ - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); + /* skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); - append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); - /* if asoclen is ZERO, skip reading assoc data */ - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); /* read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + set_jump_tgt_here(desc, zero_assoc_jump_cmd1); - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + /* cryptlen = seqoutlen - assoclen */ + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + + /* jump to zero-payload command if cryptlen is zero */ + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | + JUMP_COND_MATH_Z); + + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* store encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -921,21 +904,9 @@ static int gcm_set_sh_desc(struct crypto_aead *aead) append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); - /* jump the zero-payload commands */ - append_jump(desc, JUMP_TEST_ALL | 4); - /* zero-payload command */ set_jump_tgt_here(desc, zero_payload_jump_cmd); - /* if assoclen is ZERO, jump to ICV reading */ - append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | - JUMP_COND_MATH_Z); - /* read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); - set_jump_tgt_here(desc, zero_assoc_jump_cmd2); - /* read ICV */ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); @@ -968,13 +939,11 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) static int rfc4106_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; - u32 *key_jump_cmd, *move_cmd, *write_iv_cmd; + u32 *key_jump_cmd; u32 *desc; - u32 geniv; if (!ctx->enckeylen || !ctx->authsize) return 0; @@ -984,7 +953,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ - if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN + + if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -1007,29 +976,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* assoclen + cryptlen = seqinlen - ivsize */ - append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); - - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); - - /* Read Salt */ - append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), - 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); - /* Read AES-GCM-ESP IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* Read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + /* cryptlen = seqoutlen - assoclen */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + /* Will read cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Write encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -1083,30 +1044,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* assoclen + cryptlen = seqinlen - ivsize - icvsize */ - append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, - ctx->authsize + tfm->ivsize); - - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); - append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); - - /* Will write cryptlen bytes */ - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); - /* Read Salt */ - append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), - 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); - /* Read AES-GCM-ESP IV */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); + /* Skip assoc data */ + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* Read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); + /* Will write cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); + /* Will read cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* Store payload data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); @@ -1132,107 +1084,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead) desc_bytes(desc), 1); #endif - /* - * Job Descriptor and Shared Descriptors - * must all fit into the 64-word Descriptor h/w Buffer - */ - keys_fit_inline = false; - if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN + - ctx->split_key_pad_len + ctx->enckeylen <= - CAAM_DESC_BYTES_MAX) - keys_fit_inline = true; - - /* rfc4106_givencrypt shared descriptor */ - desc = ctx->sh_desc_givenc; - - init_sh_desc(desc, HDR_SHARE_SERIAL); - - /* Skip key loading if it is loaded due to sharing */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); - if (keys_fit_inline) - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); - else - append_key(desc, ctx->key_dma, ctx->enckeylen, - CLASS_1 | KEY_DEST_CLASS_REG); - set_jump_tgt_here(desc, key_jump_cmd); - - /* Generate IV */ - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | - NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); - move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); - - /* Copy generated IV to OFIFO */ - write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO | - (tfm->ivsize << MOVE_LEN_SHIFT)); - - /* Class 1 operation */ - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - - /* ivsize + cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - - /* assoclen = seqinlen - (ivsize + cryptlen) */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); - - /* Will write ivsize + cryptlen */ - append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); - - /* Read Salt and generated IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | - FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12); - /* Append Salt */ - append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); - set_move_tgt_here(desc, move_cmd); - set_move_tgt_here(desc, write_iv_cmd); - /* Blank commands. Will be overwritten by generated IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* No need to reload iv */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); - - /* Read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); - - /* Will read cryptlen */ - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - - /* Store generated IV and encrypted data */ - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); - - /* Read payload data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); - - /* Write ICV */ - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT); - - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; - } -#ifdef DEBUG - print_hex_dump(KERN_ERR, - "rfc4106 givenc shdesc@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif - return 0; } @@ -1249,14 +1100,12 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc, static int rfc4543_set_sh_desc(struct crypto_aead *aead) { - struct aead_tfm *tfm = &aead->base.crt_aead; struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool keys_fit_inline = false; - u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd; + u32 *key_jump_cmd; u32 *read_move_cmd, *write_move_cmd; u32 *desc; - u32 geniv; if (!ctx->enckeylen || !ctx->authsize) return 0; @@ -1266,7 +1115,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ - if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN + + if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -1289,48 +1138,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* Load AES-GMAC ESP IV into Math1 register */ - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | - LDST_CLASS_DECO | tfm->ivsize); - - /* Wait the DMA transaction to finish */ - append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | - (1 << JUMP_OFFSET_SHIFT)); - - /* Overwrite blank immediate AES-GMAC ESP IV data */ - write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); - - /* Overwrite blank immediate AAD data */ - write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); - - /* cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - - /* assoclen = (seqinlen - ivsize) - cryptlen */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); - - /* Read Salt and AES-GMAC ESP IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); - /* Append Salt */ - append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); - set_move_tgt_here(desc, write_iv_cmd); - /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* Read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD); - - /* Will read cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); - - /* Will write cryptlen bytes */ - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + /* assoclen + cryptlen = seqinlen */ + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); /* * MOVE_LEN opcode is not available in all SEC HW revisions, @@ -1342,16 +1151,13 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | (0x8 << MOVE_LEN_SHIFT)); - /* Authenticate AES-GMAC ESP IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_AAD | tfm->ivsize); - set_move_tgt_here(desc, write_aad_cmd); - /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ + /* Will read assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - /* Read and write cryptlen bytes */ + /* Will write assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Read and write assoclen + cryptlen bytes */ aead_append_src_dst(desc, FIFOLD_TYPE_AAD); set_move_tgt_here(desc, read_move_cmd); @@ -1382,7 +1188,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) * must all fit into the 64-word Descriptor h/w Buffer */ keys_fit_inline = false; - if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN + + if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN + ctx->enckeylen <= CAAM_DESC_BYTES_MAX) keys_fit_inline = true; @@ -1405,28 +1211,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* Load AES-GMAC ESP IV into Math1 register */ - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | - LDST_CLASS_DECO | tfm->ivsize); - - /* Wait the DMA transaction to finish */ - append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | - (1 << JUMP_OFFSET_SHIFT)); - - /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */ - append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize); - - /* Overwrite blank immediate AES-GMAC ESP IV data */ - write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); - - /* Overwrite blank immediate AAD data */ - write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); - - /* assoclen = (assoclen + cryptlen) - cryptlen */ - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); - append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); + /* assoclen + cryptlen = seqoutlen */ + append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* * MOVE_LEN opcode is not available in all SEC HW revisions, @@ -1438,40 +1224,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | (0x8 << MOVE_LEN_SHIFT)); - /* Read Salt and AES-GMAC ESP IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); - /* Append Salt */ - append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); - set_move_tgt_here(desc, write_iv_cmd); - /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* Read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD); - - /* Will read cryptlen bytes */ - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); + /* Will read assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); - /* Will write cryptlen bytes */ - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); - - /* Authenticate AES-GMAC ESP IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_AAD | tfm->ivsize); - set_move_tgt_here(desc, write_aad_cmd); - /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ + /* Will write assoclen + cryptlen bytes */ + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* Store payload data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); - /* In-snoop cryptlen data */ + /* In-snoop assoclen + cryptlen data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); @@ -1499,135 +1261,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead) desc_bytes(desc), 1); #endif - /* - * Job Descriptor and Shared Descriptors - * must all fit into the 64-word Descriptor h/w Buffer - */ - keys_fit_inline = false; - if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN + - ctx->enckeylen <= CAAM_DESC_BYTES_MAX) - keys_fit_inline = true; - - /* rfc4543_givencrypt shared descriptor */ - desc = ctx->sh_desc_givenc; - - init_sh_desc(desc, HDR_SHARE_SERIAL); - - /* Skip key loading if it is loaded due to sharing */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); - if (keys_fit_inline) - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); - else - append_key(desc, ctx->key_dma, ctx->enckeylen, - CLASS_1 | KEY_DEST_CLASS_REG); - set_jump_tgt_here(desc, key_jump_cmd); - - /* Generate IV */ - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | - NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT); - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); - /* Move generated IV to Math1 register */ - append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 | - (tfm->ivsize << MOVE_LEN_SHIFT)); - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); - - /* Overwrite blank immediate AES-GMAC IV data */ - write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); - - /* Overwrite blank immediate AAD data */ - write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | - (tfm->ivsize << MOVE_LEN_SHIFT)); - - /* Copy generated IV to OFIFO */ - append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO | - (tfm->ivsize << MOVE_LEN_SHIFT)); - - /* Class 1 operation */ - append_operation(desc, ctx->class1_alg_type | - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - - /* ivsize + cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - - /* assoclen = seqinlen - (ivsize + cryptlen) */ - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); - - /* Will write ivsize + cryptlen */ - append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); - - /* - * MOVE_LEN opcode is not available in all SEC HW revisions, - * thus need to do some magic, i.e. self-patch the descriptor - * buffer. - */ - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | - (0x6 << MOVE_LEN_SHIFT)); - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | - (0x8 << MOVE_LEN_SHIFT)); - - /* Read Salt and AES-GMAC generated IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize)); - /* Append Salt */ - append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); - set_move_tgt_here(desc, write_iv_cmd); - /* Blank commands. Will be overwritten by AES-GMAC generated IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* No need to reload iv */ - append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); - - /* Read assoc data */ - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | - FIFOLD_TYPE_AAD); - - /* Will read cryptlen */ - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); - - /* Authenticate AES-GMAC IV */ - append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | - FIFOLD_TYPE_AAD | tfm->ivsize); - set_move_tgt_here(desc, write_aad_cmd); - /* Blank commands. Will be overwritten by AES-GMAC IV. */ - append_cmd(desc, 0x00000000); - append_cmd(desc, 0x00000000); - /* End of blank commands */ - - /* Read and write cryptlen bytes */ - aead_append_src_dst(desc, FIFOLD_TYPE_AAD); - - set_move_tgt_here(desc, read_move_cmd); - set_move_tgt_here(desc, write_move_cmd); - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); - /* Move payload data to OFIFO */ - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); - - /* Write ICV */ - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | - LDST_SRCDST_BYTE_CONTEXT); - - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, - desc_bytes(desc), - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { - dev_err(jrdev, "unable to map shared descriptor\n"); - return -ENOMEM; - } -#ifdef DEBUG - print_hex_dump(KERN_ERR, - "rfc4543 givenc shdesc@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, - desc_bytes(desc), 1); -#endif - return 0; } @@ -2100,7 +1733,7 @@ struct aead_edesc { int sec4_sg_bytes; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; - u32 hw_desc[0]; + u32 hw_desc[]; }; /* @@ -2154,6 +1787,16 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc, struct aead_request *req) { + caam_unmap(dev, req->src, req->dst, + edesc->src_nents, edesc->src_chained, edesc->dst_nents, + edesc->dst_chained, 0, 0, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); +} + +static void old_aead_unmap(struct device *dev, + struct aead_edesc *edesc, + struct aead_request *req) +{ struct crypto_aead *aead = crypto_aead_reqtfm(req); int ivsize = crypto_aead_ivsize(aead); @@ -2184,6 +1827,28 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct aead_request *req = context; struct aead_edesc *edesc; + +#ifdef DEBUG + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = container_of(desc, struct aead_edesc, hw_desc[0]); + + if (err) + caam_jr_strstatus(jrdev, err); + + aead_unmap(jrdev, edesc, req); + + kfree(edesc); + + aead_request_complete(req, err); +} + +static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct aead_request *req = context; + struct aead_edesc *edesc; #ifdef DEBUG struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -2198,7 +1863,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - aead_unmap(jrdev, edesc, req); + old_aead_unmap(jrdev, edesc, req); #ifdef DEBUG print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ", @@ -2223,6 +1888,34 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct aead_request *req = context; struct aead_edesc *edesc; + +#ifdef DEBUG + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); +#endif + + edesc = container_of(desc, struct aead_edesc, hw_desc[0]); + + if (err) + caam_jr_strstatus(jrdev, err); + + aead_unmap(jrdev, edesc, req); + + /* + * verify hw auth check passed else return -EBADMSG + */ + if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK) + err = -EBADMSG; + + kfree(edesc); + + aead_request_complete(req, err); +} + +static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct aead_request *req = context; + struct aead_edesc *edesc; #ifdef DEBUG struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -2246,7 +1939,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - aead_unmap(jrdev, edesc, req); + old_aead_unmap(jrdev, edesc, req); /* * verify hw auth check passed else return -EBADMSG @@ -2342,10 +2035,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, /* * Fill in aead job descriptor */ -static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, - struct aead_edesc *edesc, - struct aead_request *req, - bool all_contig, bool encrypt) +static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr, + struct aead_edesc *edesc, + struct aead_request *req, + bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -2425,6 +2118,97 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr, } /* + * Fill in aead job descriptor + */ +static void init_aead_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int authsize = ctx->authsize; + u32 *desc = edesc->hw_desc; + u32 out_options, in_options; + dma_addr_t dst_dma, src_dma; + int len, sec4_sg_index = 0; + dma_addr_t ptr; + u32 *sh_desc; + + sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; + ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; + + len = desc_len(sh_desc); + init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); + + if (all_contig) { + src_dma = sg_dma_address(req->src); + in_options = 0; + } else { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index += edesc->src_nents; + in_options = LDST_SGF; + } + + append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen, + in_options); + + dst_dma = src_dma; + out_options = in_options; + + if (unlikely(req->src != req->dst)) { + if (!edesc->dst_nents) { + dst_dma = sg_dma_address(req->dst); + } else { + dst_dma = edesc->sec4_sg_dma + + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; + } + } + + if (encrypt) + append_seq_out_ptr(desc, dst_dma, + req->assoclen + req->cryptlen + authsize, + out_options); + else + append_seq_out_ptr(desc, dst_dma, + req->assoclen + req->cryptlen - authsize, + out_options); + + /* REG3 = assoclen */ + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); +} + +static void init_gcm_job(struct aead_request *req, + struct aead_edesc *edesc, + bool all_contig, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + u32 *desc = edesc->hw_desc; + bool generic_gcm = (ivsize == 12); + unsigned int last; + + init_aead_job(req, edesc, all_contig, encrypt); + + /* BUG This should not be specific to generic GCM. */ + last = 0; + if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen)) + last = FIFOLD_TYPE_LAST1; + + /* Read GCM IV */ + append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); + /* Append Salt */ + if (!generic_gcm) + append_data(desc, ctx->key + ctx->enckeylen, 4); + /* Append IV */ + append_data(desc, req->iv, ivsize); + /* End of blank commands */ +} + +/* * Fill in aead givencrypt job descriptor */ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr, @@ -2608,9 +2392,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr, /* * allocate and map the aead extended descriptor */ -static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, - int desc_bytes, bool *all_contig_ptr, - bool encrypt) +static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req, + int desc_bytes, + bool *all_contig_ptr, + bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -2713,10 +2498,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_index = 0; if (!all_contig) { if (!is_gcm) { - sg_to_sec4_sg(req->assoc, - assoc_nents, - edesc->sec4_sg + - sec4_sg_index, 0); + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); sec4_sg_index += assoc_nents; } @@ -2725,10 +2508,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, sec4_sg_index += 1; if (is_gcm) { - sg_to_sec4_sg(req->assoc, - assoc_nents, - edesc->sec4_sg + - sec4_sg_index, 0); + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); sec4_sg_index += assoc_nents; } @@ -2752,7 +2533,124 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, return edesc; } -static int aead_encrypt(struct aead_request *req) +/* + * allocate and map the aead extended descriptor + */ +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, + int desc_bytes, bool *all_contig_ptr, + bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; + int src_nents, dst_nents = 0; + struct aead_edesc *edesc; + int sgc; + bool all_contig = true; + bool src_chained = false, dst_chained = false; + int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; + unsigned int authsize = ctx->authsize; + + if (unlikely(req->dst != req->src)) { + src_nents = sg_count(req->src, req->assoclen + req->cryptlen, + &src_chained); + dst_nents = sg_count(req->dst, + req->assoclen + req->cryptlen + + (encrypt ? authsize : (-authsize)), + &dst_chained); + } else { + src_nents = sg_count(req->src, + req->assoclen + req->cryptlen + + (encrypt ? authsize : 0), + &src_chained); + } + + /* Check if data are contiguous. */ + all_contig = !src_nents; + if (!all_contig) { + src_nents = src_nents ? : 1; + sec4_sg_len = src_nents; + } + + sec4_sg_len += dst_nents; + + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); + + /* allocate space for base edesc and hw desc commands, link tables */ + edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes + + sec4_sg_bytes, GFP_DMA | flags); + if (!edesc) { + dev_err(jrdev, "could not allocate extended descriptor\n"); + return ERR_PTR(-ENOMEM); + } + + if (likely(req->src == req->dst)) { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); + if (unlikely(!sgc)) { + dev_err(jrdev, "unable to map source\n"); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + if (unlikely(!sgc)) { + dev_err(jrdev, "unable to map source\n"); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); + if (unlikely(!sgc)) { + dev_err(jrdev, "unable to map destination\n"); + dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + } + + edesc->src_nents = src_nents; + edesc->src_chained = src_chained; + edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; + edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + + desc_bytes; + *all_contig_ptr = all_contig; + + sec4_sg_index = 0; + if (!all_contig) { + sg_to_sec4_sg_last(req->src, src_nents, + edesc->sec4_sg + sec4_sg_index, 0); + sec4_sg_index += src_nents; + } + if (dst_nents) { + sg_to_sec4_sg_last(req->dst, dst_nents, + edesc->sec4_sg + sec4_sg_index, 0); + } + + if (!sec4_sg_bytes) + return edesc; + + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + aead_unmap(jrdev, edesc, req); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } + + edesc->sec4_sg_bytes = sec4_sg_bytes; + + return edesc; +} + +static int gcm_encrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -2763,14 +2661,12 @@ static int aead_encrypt(struct aead_request *req) int ret = 0; /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig, true); + edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor */ - init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, - all_contig, true); + init_gcm_job(req, edesc, all_contig, true); #ifdef DEBUG print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, @@ -2789,7 +2685,7 @@ static int aead_encrypt(struct aead_request *req) return ret; } -static int aead_decrypt(struct aead_request *req) +static int old_aead_encrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -2800,8 +2696,80 @@ static int aead_decrypt(struct aead_request *req) int ret = 0; /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN * - CAAM_CMD_SZ, &all_contig, false); + edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &all_contig, true); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor */ + old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req, + all_contig, true); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + old_aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int gcm_decrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + /* Create and submit job descriptor*/ + init_gcm_job(req, edesc, all_contig, false); +#ifdef DEBUG + print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", + DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, + desc_bytes(edesc->hw_desc), 1); +#endif + + desc = edesc->hw_desc; + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); + if (!ret) { + ret = -EINPROGRESS; + } else { + aead_unmap(jrdev, edesc, req); + kfree(edesc); + } + + return ret; +} + +static int old_aead_decrypt(struct aead_request *req) +{ + struct aead_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx->jrdev; + bool all_contig; + u32 *desc; + int ret = 0; + + /* allocate extended descriptor */ + edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN * + CAAM_CMD_SZ, &all_contig, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); @@ -2812,8 +2780,8 @@ static int aead_decrypt(struct aead_request *req) #endif /* Create and submit job descriptor*/ - init_aead_job(ctx->sh_desc_dec, - ctx->sh_desc_dec_dma, edesc, req, all_contig, false); + old_init_aead_job(ctx->sh_desc_dec, + ctx->sh_desc_dec_dma, edesc, req, all_contig, false); #ifdef DEBUG print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, @@ -2821,11 +2789,11 @@ static int aead_decrypt(struct aead_request *req) #endif desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); + ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req); if (!ret) { ret = -EINPROGRESS; } else { - aead_unmap(jrdev, edesc, req); + old_aead_unmap(jrdev, edesc, req); kfree(edesc); } @@ -2953,8 +2921,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request sec4_sg_index = 0; if (!(contig & GIV_SRC_CONTIG)) { if (!is_gcm) { - sg_to_sec4_sg(req->assoc, assoc_nents, - edesc->sec4_sg + sec4_sg_index, 0); + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); sec4_sg_index += assoc_nents; } @@ -2963,8 +2931,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request sec4_sg_index += 1; if (is_gcm) { - sg_to_sec4_sg(req->assoc, assoc_nents, - edesc->sec4_sg + sec4_sg_index, 0); + sg_to_sec4_sg_len(req->assoc, req->assoclen, + edesc->sec4_sg + sec4_sg_index); sec4_sg_index += assoc_nents; } @@ -2999,7 +2967,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request return edesc; } -static int aead_givencrypt(struct aead_givcrypt_request *areq) +static int old_aead_givencrypt(struct aead_givcrypt_request *areq) { struct aead_request *req = &areq->areq; struct aead_edesc *edesc; @@ -3033,11 +3001,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) #endif desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); + ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req); if (!ret) { ret = -EINPROGRESS; } else { - aead_unmap(jrdev, edesc, req); + old_aead_unmap(jrdev, edesc, req); kfree(edesc); } @@ -3046,7 +3014,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq) static int aead_null_givencrypt(struct aead_givcrypt_request *areq) { - return aead_encrypt(&areq->areq); + return old_aead_encrypt(&areq->areq); } /* @@ -3379,11 +3347,7 @@ struct caam_alg_template { u32 type; union { struct ablkcipher_alg ablkcipher; - struct aead_alg aead; - struct blkcipher_alg blkcipher; - struct cipher_alg cipher; - struct compress_alg compress; - struct rng_alg rng; + struct old_aead_alg aead; } template_u; u32 class1_alg_type; u32 class2_alg_type; @@ -3400,8 +3364,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "<built-in>", .ivsize = NULL_IV_SIZE, @@ -3419,8 +3383,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "<built-in>", .ivsize = NULL_IV_SIZE, @@ -3438,8 +3402,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "<built-in>", .ivsize = NULL_IV_SIZE, @@ -3458,8 +3422,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "<built-in>", .ivsize = NULL_IV_SIZE, @@ -3478,8 +3442,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "<built-in>", .ivsize = NULL_IV_SIZE, @@ -3498,8 +3462,8 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, .givencrypt = aead_null_givencrypt, .geniv = "<built-in>", .ivsize = NULL_IV_SIZE, @@ -3518,9 +3482,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -3537,9 +3501,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -3556,9 +3520,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -3576,9 +3540,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -3596,9 +3560,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -3617,9 +3581,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -3637,9 +3601,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -3656,9 +3620,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -3675,9 +3639,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -3695,9 +3659,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -3715,9 +3679,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -3735,9 +3699,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -3755,9 +3719,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -3774,9 +3738,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -3793,9 +3757,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -3813,9 +3777,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -3833,9 +3797,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -3853,9 +3817,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -3873,9 +3837,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, @@ -3892,9 +3856,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, @@ -3911,9 +3875,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, @@ -3931,9 +3895,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, @@ -3951,9 +3915,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, @@ -3971,9 +3935,9 @@ static struct caam_alg_template driver_algs[] = { .template_aead = { .setkey = aead_setkey, .setauthsize = aead_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, + .encrypt = old_aead_encrypt, + .decrypt = old_aead_decrypt, + .givencrypt = old_aead_givencrypt, .geniv = "<built-in>", .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, @@ -3983,58 +3947,6 @@ static struct caam_alg_template driver_algs[] = { OP_ALG_AAI_HMAC_PRECOMP, .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC, }, - { - .name = "rfc4106(gcm(aes))", - .driver_name = "rfc4106-gcm-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = rfc4106_setkey, - .setauthsize = rfc4106_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, - }, - { - .name = "rfc4543(gcm(aes))", - .driver_name = "rfc4543-gcm-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = rfc4543_setkey, - .setauthsize = rfc4543_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = aead_givencrypt, - .geniv = "<built-in>", - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, - }, - /* Galois Counter Mode */ - { - .name = "gcm(aes)", - .driver_name = "gcm-aes-caam", - .blocksize = 1, - .type = CRYPTO_ALG_TYPE_AEAD, - .template_aead = { - .setkey = gcm_setkey, - .setauthsize = gcm_setauthsize, - .encrypt = aead_encrypt, - .decrypt = aead_decrypt, - .givencrypt = NULL, - .geniv = "<built-in>", - .ivsize = 12, - .maxauthsize = AES_BLOCK_SIZE, - }, - .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, - }, /* ablkcipher descriptor */ { .name = "cbc(aes)", @@ -4124,21 +4036,84 @@ static struct caam_alg_template driver_algs[] = { } }; -struct caam_crypto_alg { - struct list_head entry; +struct caam_alg_entry { int class1_alg_type; int class2_alg_type; int alg_op; +}; + +struct caam_aead_alg { + struct aead_alg aead; + struct caam_alg_entry caam; + bool registered; +}; + +static struct caam_aead_alg driver_aeads[] = { + { + .aead = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc4106_setkey, + .setauthsize = rfc4106_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + }, + { + .aead = { + .base = { + .cra_name = "rfc4543(gcm(aes))", + .cra_driver_name = "rfc4543-gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = rfc4543_setkey, + .setauthsize = rfc4543_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + }, + /* Galois Counter Mode */ + { + .aead = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-caam", + .cra_blocksize = 1, + }, + .setkey = gcm_setkey, + .setauthsize = gcm_setauthsize, + .encrypt = gcm_encrypt, + .decrypt = gcm_decrypt, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + }, + .caam = { + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, + }, + }, +}; + +struct caam_crypto_alg { struct crypto_alg crypto_alg; + struct list_head entry; + struct caam_alg_entry caam; }; -static int caam_cra_init(struct crypto_tfm *tfm) +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) { - struct crypto_alg *alg = tfm->__crt_alg; - struct caam_crypto_alg *caam_alg = - container_of(alg, struct caam_crypto_alg, crypto_alg); - struct caam_ctx *ctx = crypto_tfm_ctx(tfm); - ctx->jrdev = caam_jr_alloc(); if (IS_ERR(ctx->jrdev)) { pr_err("Job Ring Device allocation for transform failed\n"); @@ -4146,17 +4121,35 @@ static int caam_cra_init(struct crypto_tfm *tfm) } /* copy descriptor header template value */ - ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; - ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type; - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op; + ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; + ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; + ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op; return 0; } -static void caam_cra_exit(struct crypto_tfm *tfm) +static int caam_cra_init(struct crypto_tfm *tfm) { + struct crypto_alg *alg = tfm->__crt_alg; + struct caam_crypto_alg *caam_alg = + container_of(alg, struct caam_crypto_alg, crypto_alg); struct caam_ctx *ctx = crypto_tfm_ctx(tfm); + return caam_init_common(ctx, &caam_alg->caam); +} + +static int caam_aead_init(struct crypto_aead *tfm) +{ + struct aead_alg *alg = crypto_aead_alg(tfm); + struct caam_aead_alg *caam_alg = + container_of(alg, struct caam_aead_alg, aead); + struct caam_ctx *ctx = crypto_aead_ctx(tfm); + + return caam_init_common(ctx, &caam_alg->caam); +} + +static void caam_exit_common(struct caam_ctx *ctx) +{ if (ctx->sh_desc_enc_dma && !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma)) dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma, @@ -4179,10 +4172,28 @@ static void caam_cra_exit(struct crypto_tfm *tfm) caam_jr_free(ctx->jrdev); } +static void caam_cra_exit(struct crypto_tfm *tfm) +{ + caam_exit_common(crypto_tfm_ctx(tfm)); +} + +static void caam_aead_exit(struct crypto_aead *tfm) +{ + caam_exit_common(crypto_aead_ctx(tfm)); +} + static void __exit caam_algapi_exit(void) { struct caam_crypto_alg *t_alg, *n; + int i; + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + + if (t_alg->registered) + crypto_unregister_aead(&t_alg->aead); + } if (!alg_list.next) return; @@ -4235,13 +4246,26 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template break; } - t_alg->class1_alg_type = template->class1_alg_type; - t_alg->class2_alg_type = template->class2_alg_type; - t_alg->alg_op = template->alg_op; + t_alg->caam.class1_alg_type = template->class1_alg_type; + t_alg->caam.class2_alg_type = template->class2_alg_type; + t_alg->caam.alg_op = template->alg_op; return t_alg; } +static void caam_aead_alg_init(struct caam_aead_alg *t_alg) +{ + struct aead_alg *alg = &t_alg->aead; + + alg->base.cra_module = THIS_MODULE; + alg->base.cra_priority = CAAM_CRA_PRIORITY; + alg->base.cra_ctxsize = sizeof(struct caam_ctx); + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; + + alg->init = caam_aead_init; + alg->exit = caam_aead_exit; +} + static int __init caam_algapi_init(void) { struct device_node *dev_node; @@ -4249,6 +4273,7 @@ static int __init caam_algapi_init(void) struct device *ctrldev; void *priv; int i = 0, err = 0; + bool registered = false; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!dev_node) { @@ -4295,10 +4320,30 @@ static int __init caam_algapi_init(void) pr_warn("%s alg registration failed\n", t_alg->crypto_alg.cra_driver_name); kfree(t_alg); - } else - list_add_tail(&t_alg->entry, &alg_list); + continue; + } + + list_add_tail(&t_alg->entry, &alg_list); + registered = true; } - if (!list_empty(&alg_list)) + + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { + struct caam_aead_alg *t_alg = driver_aeads + i; + + caam_aead_alg_init(t_alg); + + err = crypto_register_aead(&t_alg->aead); + if (err) { + pr_warn("%s alg registration failed\n", + t_alg->aead.base.cra_driver_name); + continue; + } + + t_alg->registered = true; + registered = true; + } + + if (registered) pr_info("caam algorithms registered in /proc/crypto\n"); return err; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 332c8ef8dae2..dae1e8099969 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -835,17 +835,17 @@ static int ahash_update_ctx(struct ahash_request *req) src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + sec4_sg_src_index, chained); - if (*next_buflen) { + if (*next_buflen) scatterwalk_map_and_copy(next_buf, req->src, to_hash - *buflen, *next_buflen, 0); - state->current_buf = !state->current_buf; - } } else { (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN; } + state->current_buf = !state->current_buf; + sh_len = desc_len(sh_desc); desc = edesc->hw_desc; init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | @@ -1268,9 +1268,10 @@ static int ahash_update_no_ctx(struct ahash_request *req) scatterwalk_map_and_copy(next_buf, req->src, to_hash - *buflen, *next_buflen, 0); - state->current_buf = !state->current_buf; } + state->current_buf = !state->current_buf; + sh_len = desc_len(sh_desc); desc = edesc->hw_desc; init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index acd7743e2603..f57f395db33f 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -32,7 +32,7 @@ #include <crypto/des.h> #include <crypto/sha.h> #include <crypto/md5.h> -#include <crypto/aead.h> +#include <crypto/internal/aead.h> #include <crypto/authenc.h> #include <crypto/scatterwalk.h> #include <crypto/internal/skcipher.h> diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index efba4ccd4fac..efacab7539ef 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -301,7 +301,7 @@ static int caam_remove(struct platform_device *pdev) #endif /* Unmap controller region */ - iounmap(&ctrl); + iounmap(ctrl); return ret; } @@ -496,7 +496,7 @@ static int caam_probe(struct platform_device *pdev) sizeof(struct platform_device *) * rspec, GFP_KERNEL); if (ctrlpriv->jrpdev == NULL) { - iounmap(&ctrl); + iounmap(ctrl); return -ENOMEM; } diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 378ddc17f60e..672c97489505 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -83,35 +83,35 @@ #endif #endif +/* + * The only users of these wr/rd_reg64 functions is the Job Ring (JR). + * The DMA address registers in the JR are a pair of 32-bit registers. + * The layout is: + * + * base + 0x0000 : most-significant 32 bits + * base + 0x0004 : least-significant 32 bits + * + * The 32-bit version of this core therefore has to write to base + 0x0004 + * to set the 32-bit wide DMA address. This seems to be independent of the + * endianness of the written/read data. + */ + #ifndef CONFIG_64BIT -#ifdef __BIG_ENDIAN -static inline void wr_reg64(u64 __iomem *reg, u64 data) -{ - wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32); - wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull); -} +#define REG64_MS32(reg) ((u32 __iomem *)(reg)) +#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1) -static inline u64 rd_reg64(u64 __iomem *reg) -{ - return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) | - ((u64)rd_reg32((u32 __iomem *)reg + 1)); -} -#else -#ifdef __LITTLE_ENDIAN static inline void wr_reg64(u64 __iomem *reg, u64 data) { - wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32); - wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull); + wr_reg32(REG64_MS32(reg), data >> 32); + wr_reg32(REG64_LS32(reg), data); } static inline u64 rd_reg64(u64 __iomem *reg) { - return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) | - ((u64)rd_reg32((u32 __iomem *)reg)); + return ((u64)rd_reg32(REG64_MS32(reg)) << 32 | + (u64)rd_reg32(REG64_LS32(reg))); } #endif -#endif -#endif /* * jr_outentry diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index 3b918218aa4c..b68b74cc7b77 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -55,6 +55,21 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, sec4_sg_ptr->len |= SEC4_SG_LEN_FIN; } +static inline struct sec4_sg_entry *sg_to_sec4_sg_len( + struct scatterlist *sg, unsigned int total, + struct sec4_sg_entry *sec4_sg_ptr) +{ + do { + unsigned int len = min(sg_dma_len(sg), total); + + dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0); + sec4_sg_ptr++; + sg = sg_next(sg); + total -= len; + } while (total); + return sec4_sg_ptr - 1; +} + /* count number of elements in scatterlist */ static inline int __sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) @@ -85,34 +100,41 @@ static inline int sg_count(struct scatterlist *sg_list, int nbytes, return sg_nents; } -static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, - unsigned int nents, enum dma_data_direction dir, - bool chained) +static inline void dma_unmap_sg_chained( + struct device *dev, struct scatterlist *sg, unsigned int nents, + enum dma_data_direction dir, bool chained) { if (unlikely(chained)) { int i; for (i = 0; i < nents; i++) { - dma_map_sg(dev, sg, 1, dir); + dma_unmap_sg(dev, sg, 1, dir); sg = sg_next(sg); } - } else { - dma_map_sg(dev, sg, nents, dir); + } else if (nents) { + dma_unmap_sg(dev, sg, nents, dir); } - return nents; } -static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, - unsigned int nents, enum dma_data_direction dir, - bool chained) +static inline int dma_map_sg_chained( + struct device *dev, struct scatterlist *sg, unsigned int nents, + enum dma_data_direction dir, bool chained) { + struct scatterlist *first = sg; + if (unlikely(chained)) { int i; for (i = 0; i < nents; i++) { - dma_unmap_sg(dev, sg, 1, dir); + if (!dma_map_sg(dev, sg, 1, dir)) { + dma_unmap_sg_chained(dev, first, i, dir, + chained); + nents = 0; + break; + } + sg = sg_next(sg); } - } else { - dma_unmap_sg(dev, sg, nents, dir); - } + } else + nents = dma_map_sg(dev, sg, nents, dir); + return nents; } diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 7639ffc36c68..ae38f6b6cc10 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -13,7 +13,6 @@ config CRYPTO_DEV_CCP_CRYPTO tristate "Encryption and hashing acceleration support" depends on CRYPTO_DEV_CCP_DD default m - select CRYPTO_ALGAPI select CRYPTO_HASH select CRYPTO_BLKCIPHER select CRYPTO_AUTHENC diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 71f2e3c89424..d09c6c4af4aa 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -52,8 +52,7 @@ struct ccp_dm_workarea { struct ccp_sg_workarea { struct scatterlist *sg; - unsigned int nents; - unsigned int length; + int nents; struct scatterlist *dma_sg; struct device *dma_dev; @@ -496,8 +495,10 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, if (!sg) return 0; - wa->nents = sg_nents(sg); - wa->length = sg->length; + wa->nents = sg_nents_for_len(sg, len); + if (wa->nents < 0) + return wa->nents; + wa->bytes_left = len; wa->sg_used = 0; diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c index b1c20b2b5647..f2e6de361fd1 100644 --- a/drivers/crypto/ccp/ccp-platform.c +++ b/drivers/crypto/ccp/ccp-platform.c @@ -90,58 +90,6 @@ static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) return NULL; } -#ifdef CONFIG_ACPI -static int ccp_acpi_support(struct ccp_device *ccp) -{ - struct ccp_platform *ccp_platform = ccp->dev_specific; - struct acpi_device *adev = ACPI_COMPANION(ccp->dev); - acpi_handle handle; - acpi_status status; - unsigned long long data; - int cca; - - /* Retrieve the device cache coherency value */ - handle = adev->handle; - do { - status = acpi_evaluate_integer(handle, "_CCA", NULL, &data); - if (!ACPI_FAILURE(status)) { - cca = data; - break; - } - } while (!ACPI_FAILURE(status)); - - if (ACPI_FAILURE(status)) { - dev_err(ccp->dev, "error obtaining acpi coherency value\n"); - return -EINVAL; - } - - ccp_platform->coherent = !!cca; - - return 0; -} -#else /* CONFIG_ACPI */ -static int ccp_acpi_support(struct ccp_device *ccp) -{ - return -EINVAL; -} -#endif - -#ifdef CONFIG_OF -static int ccp_of_support(struct ccp_device *ccp) -{ - struct ccp_platform *ccp_platform = ccp->dev_specific; - - ccp_platform->coherent = of_dma_is_coherent(ccp->dev->of_node); - - return 0; -} -#else -static int ccp_of_support(struct ccp_device *ccp) -{ - return -EINVAL; -} -#endif - static int ccp_platform_probe(struct platform_device *pdev) { struct ccp_device *ccp; @@ -174,21 +122,13 @@ static int ccp_platform_probe(struct platform_device *pdev) } ccp->io_regs = ccp->io_map; - if (!dev->dma_mask) - dev->dma_mask = &dev->coherent_dma_mask; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); goto e_err; } - if (ccp_platform->use_acpi) - ret = ccp_acpi_support(ccp); - else - ret = ccp_of_support(ccp); - if (ret) - goto e_err; - + ccp_platform->coherent = device_dma_is_coherent(ccp->dev); if (ccp_platform->coherent) ccp->axcache = CACHE_WB_NO_ALLOC; else diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 48f453555f1f..7ba495f75370 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -25,7 +25,7 @@ #include <crypto/aes.h> #include <crypto/sha.h> #include <crypto/algapi.h> -#include <crypto/aead.h> +#include <crypto/internal/aead.h> #include <crypto/authenc.h> #include <crypto/scatterwalk.h> @@ -575,7 +575,8 @@ static int init_tfm_ablk(struct crypto_tfm *tfm) static int init_tfm_aead(struct crypto_tfm *tfm) { - tfm->crt_aead.reqsize = sizeof(struct aead_ctx); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct aead_ctx)); return init_tfm(tfm); } @@ -1096,7 +1097,7 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) { struct ixp_ctx *ctx = crypto_aead_ctx(tfm); u32 *flags = &tfm->base.crt_flags; - unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize; + unsigned digest_len = crypto_aead_maxauthsize(tfm); int ret; if (!ctx->enckey_len && !ctx->authkey_len) @@ -1138,7 +1139,7 @@ out: static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - int max = crypto_aead_alg(tfm)->maxauthsize >> 2; + int max = crypto_aead_maxauthsize(tfm) >> 2; if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3)) return -EINVAL; diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile new file mode 100644 index 000000000000..0c12b13574dc --- /dev/null +++ b/drivers/crypto/marvell/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o +marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c new file mode 100644 index 000000000000..a432633bced4 --- /dev/null +++ b/drivers/crypto/marvell/cesa.c @@ -0,0 +1,548 @@ +/* + * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA) + * that can be found on the following platform: Orion, Kirkwood, Armada. This + * driver supports the TDMA engine on platforms on which it is available. + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * Author: Arnaud Ebalard <arno@natisbad.org> + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/genalloc.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kthread.h> +#include <linux/mbus.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> + +#include "cesa.h" + +static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA); +module_param_named(allhwsupport, allhwsupport, int, 0444); +MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)"); + +struct mv_cesa_dev *cesa_dev; + +static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine) +{ + struct crypto_async_request *req, *backlog; + struct mv_cesa_ctx *ctx; + + spin_lock_bh(&cesa_dev->lock); + backlog = crypto_get_backlog(&cesa_dev->queue); + req = crypto_dequeue_request(&cesa_dev->queue); + engine->req = req; + spin_unlock_bh(&cesa_dev->lock); + + if (!req) + return; + + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + + ctx = crypto_tfm_ctx(req->tfm); + ctx->ops->prepare(req, engine); + ctx->ops->step(req); +} + +static irqreturn_t mv_cesa_int(int irq, void *priv) +{ + struct mv_cesa_engine *engine = priv; + struct crypto_async_request *req; + struct mv_cesa_ctx *ctx; + u32 status, mask; + irqreturn_t ret = IRQ_NONE; + + while (true) { + int res; + + mask = mv_cesa_get_int_mask(engine); + status = readl(engine->regs + CESA_SA_INT_STATUS); + + if (!(status & mask)) + break; + + /* + * TODO: avoid clearing the FPGA_INT_STATUS if this not + * relevant on some platforms. + */ + writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS); + writel(~status, engine->regs + CESA_SA_INT_STATUS); + + ret = IRQ_HANDLED; + spin_lock_bh(&engine->lock); + req = engine->req; + spin_unlock_bh(&engine->lock); + if (req) { + ctx = crypto_tfm_ctx(req->tfm); + res = ctx->ops->process(req, status & mask); + if (res != -EINPROGRESS) { + spin_lock_bh(&engine->lock); + engine->req = NULL; + mv_cesa_dequeue_req_unlocked(engine); + spin_unlock_bh(&engine->lock); + ctx->ops->cleanup(req); + local_bh_disable(); + req->complete(req, res); + local_bh_enable(); + } else { + ctx->ops->step(req); + } + } + } + + return ret; +} + +int mv_cesa_queue_req(struct crypto_async_request *req) +{ + int ret; + int i; + + spin_lock_bh(&cesa_dev->lock); + ret = crypto_enqueue_request(&cesa_dev->queue, req); + spin_unlock_bh(&cesa_dev->lock); + + if (ret != -EINPROGRESS) + return ret; + + for (i = 0; i < cesa_dev->caps->nengines; i++) { + spin_lock_bh(&cesa_dev->engines[i].lock); + if (!cesa_dev->engines[i].req) + mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]); + spin_unlock_bh(&cesa_dev->engines[i].lock); + } + + return -EINPROGRESS; +} + +static int mv_cesa_add_algs(struct mv_cesa_dev *cesa) +{ + int ret; + int i, j; + + for (i = 0; i < cesa->caps->ncipher_algs; i++) { + ret = crypto_register_alg(cesa->caps->cipher_algs[i]); + if (ret) + goto err_unregister_crypto; + } + + for (i = 0; i < cesa->caps->nahash_algs; i++) { + ret = crypto_register_ahash(cesa->caps->ahash_algs[i]); + if (ret) + goto err_unregister_ahash; + } + + return 0; + +err_unregister_ahash: + for (j = 0; j < i; j++) + crypto_unregister_ahash(cesa->caps->ahash_algs[j]); + i = cesa->caps->ncipher_algs; + +err_unregister_crypto: + for (j = 0; j < i; j++) + crypto_unregister_alg(cesa->caps->cipher_algs[j]); + + return ret; +} + +static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa) +{ + int i; + + for (i = 0; i < cesa->caps->nahash_algs; i++) + crypto_unregister_ahash(cesa->caps->ahash_algs[i]); + + for (i = 0; i < cesa->caps->ncipher_algs; i++) + crypto_unregister_alg(cesa->caps->cipher_algs[i]); +} + +static struct crypto_alg *orion_cipher_algs[] = { + &mv_cesa_ecb_des_alg, + &mv_cesa_cbc_des_alg, + &mv_cesa_ecb_des3_ede_alg, + &mv_cesa_cbc_des3_ede_alg, + &mv_cesa_ecb_aes_alg, + &mv_cesa_cbc_aes_alg, +}; + +static struct ahash_alg *orion_ahash_algs[] = { + &mv_md5_alg, + &mv_sha1_alg, + &mv_ahmac_md5_alg, + &mv_ahmac_sha1_alg, +}; + +static struct crypto_alg *armada_370_cipher_algs[] = { + &mv_cesa_ecb_des_alg, + &mv_cesa_cbc_des_alg, + &mv_cesa_ecb_des3_ede_alg, + &mv_cesa_cbc_des3_ede_alg, + &mv_cesa_ecb_aes_alg, + &mv_cesa_cbc_aes_alg, +}; + +static struct ahash_alg *armada_370_ahash_algs[] = { + &mv_md5_alg, + &mv_sha1_alg, + &mv_sha256_alg, + &mv_ahmac_md5_alg, + &mv_ahmac_sha1_alg, + &mv_ahmac_sha256_alg, +}; + +static const struct mv_cesa_caps orion_caps = { + .nengines = 1, + .cipher_algs = orion_cipher_algs, + .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), + .ahash_algs = orion_ahash_algs, + .nahash_algs = ARRAY_SIZE(orion_ahash_algs), + .has_tdma = false, +}; + +static const struct mv_cesa_caps kirkwood_caps = { + .nengines = 1, + .cipher_algs = orion_cipher_algs, + .ncipher_algs = ARRAY_SIZE(orion_cipher_algs), + .ahash_algs = orion_ahash_algs, + .nahash_algs = ARRAY_SIZE(orion_ahash_algs), + .has_tdma = true, +}; + +static const struct mv_cesa_caps armada_370_caps = { + .nengines = 1, + .cipher_algs = armada_370_cipher_algs, + .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), + .ahash_algs = armada_370_ahash_algs, + .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), + .has_tdma = true, +}; + +static const struct mv_cesa_caps armada_xp_caps = { + .nengines = 2, + .cipher_algs = armada_370_cipher_algs, + .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs), + .ahash_algs = armada_370_ahash_algs, + .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs), + .has_tdma = true, +}; + +static const struct of_device_id mv_cesa_of_match_table[] = { + { .compatible = "marvell,orion-crypto", .data = &orion_caps }, + { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps }, + { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps }, + { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps }, + { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps }, + { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps }, + { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps }, + {} +}; +MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); + +static void +mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine, + const struct mbus_dram_target_info *dram) +{ + void __iomem *iobase = engine->regs; + int i; + + for (i = 0; i < 4; i++) { + writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i)); + writel(0, iobase + CESA_TDMA_WINDOW_BASE(i)); + } + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + writel(((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + iobase + CESA_TDMA_WINDOW_CTRL(i)); + writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i)); + } +} + +static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) +{ + struct device *dev = cesa->dev; + struct mv_cesa_dev_dma *dma; + + if (!cesa->caps->has_tdma) + return 0; + + dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); + if (!dma) + return -ENOMEM; + + dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev, + sizeof(struct mv_cesa_tdma_desc), + 16, 0); + if (!dma->tdma_desc_pool) + return -ENOMEM; + + dma->op_pool = dmam_pool_create("cesa_op", dev, + sizeof(struct mv_cesa_op_ctx), 16, 0); + if (!dma->op_pool) + return -ENOMEM; + + dma->cache_pool = dmam_pool_create("cesa_cache", dev, + CESA_MAX_HASH_BLOCK_SIZE, 1, 0); + if (!dma->cache_pool) + return -ENOMEM; + + dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); + if (!dma->cache_pool) + return -ENOMEM; + + cesa->dma = dma; + + return 0; +} + +static int mv_cesa_get_sram(struct platform_device *pdev, int idx) +{ + struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); + struct mv_cesa_engine *engine = &cesa->engines[idx]; + const char *res_name = "sram"; + struct resource *res; + + engine->pool = of_get_named_gen_pool(cesa->dev->of_node, + "marvell,crypto-srams", + idx); + if (engine->pool) { + engine->sram = gen_pool_dma_alloc(engine->pool, + cesa->sram_size, + &engine->sram_dma); + if (engine->sram) + return 0; + + engine->pool = NULL; + return -ENOMEM; + } + + if (cesa->caps->nengines > 1) { + if (!idx) + res_name = "sram0"; + else + res_name = "sram1"; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + res_name); + if (!res || resource_size(res) < cesa->sram_size) + return -EINVAL; + + engine->sram = devm_ioremap_resource(cesa->dev, res); + if (IS_ERR(engine->sram)) + return PTR_ERR(engine->sram); + + engine->sram_dma = phys_to_dma(cesa->dev, + (phys_addr_t)res->start); + + return 0; +} + +static void mv_cesa_put_sram(struct platform_device *pdev, int idx) +{ + struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); + struct mv_cesa_engine *engine = &cesa->engines[idx]; + + if (!engine->pool) + return; + + gen_pool_free(engine->pool, (unsigned long)engine->sram, + cesa->sram_size); +} + +static int mv_cesa_probe(struct platform_device *pdev) +{ + const struct mv_cesa_caps *caps = &orion_caps; + const struct mbus_dram_target_info *dram; + const struct of_device_id *match; + struct device *dev = &pdev->dev; + struct mv_cesa_dev *cesa; + struct mv_cesa_engine *engines; + struct resource *res; + int irq, ret, i; + u32 sram_size; + + if (cesa_dev) { + dev_err(&pdev->dev, "Only one CESA device authorized\n"); + return -EEXIST; + } + + if (dev->of_node) { + match = of_match_node(mv_cesa_of_match_table, dev->of_node); + if (!match || !match->data) + return -ENOTSUPP; + + caps = match->data; + } + + if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport) + return -ENOTSUPP; + + cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); + if (!cesa) + return -ENOMEM; + + cesa->caps = caps; + cesa->dev = dev; + + sram_size = CESA_SA_DEFAULT_SRAM_SIZE; + of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size", + &sram_size); + if (sram_size < CESA_SA_MIN_SRAM_SIZE) + sram_size = CESA_SA_MIN_SRAM_SIZE; + + cesa->sram_size = sram_size; + cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines), + GFP_KERNEL); + if (!cesa->engines) + return -ENOMEM; + + spin_lock_init(&cesa->lock); + crypto_init_queue(&cesa->queue, 50); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + cesa->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(cesa->regs)) + return -ENOMEM; + + ret = mv_cesa_dev_dma_init(cesa); + if (ret) + return ret; + + dram = mv_mbus_dram_info_nooverlap(); + + platform_set_drvdata(pdev, cesa); + + for (i = 0; i < caps->nengines; i++) { + struct mv_cesa_engine *engine = &cesa->engines[i]; + char res_name[7]; + + engine->id = i; + spin_lock_init(&engine->lock); + + ret = mv_cesa_get_sram(pdev, i); + if (ret) + goto err_cleanup; + + irq = platform_get_irq(pdev, i); + if (irq < 0) { + ret = irq; + goto err_cleanup; + } + + /* + * Not all platforms can gate the CESA clocks: do not complain + * if the clock does not exist. + */ + snprintf(res_name, sizeof(res_name), "cesa%d", i); + engine->clk = devm_clk_get(dev, res_name); + if (IS_ERR(engine->clk)) { + engine->clk = devm_clk_get(dev, NULL); + if (IS_ERR(engine->clk)) + engine->clk = NULL; + } + + snprintf(res_name, sizeof(res_name), "cesaz%d", i); + engine->zclk = devm_clk_get(dev, res_name); + if (IS_ERR(engine->zclk)) + engine->zclk = NULL; + + ret = clk_prepare_enable(engine->clk); + if (ret) + goto err_cleanup; + + ret = clk_prepare_enable(engine->zclk); + if (ret) + goto err_cleanup; + + engine->regs = cesa->regs + CESA_ENGINE_OFF(i); + + if (dram && cesa->caps->has_tdma) + mv_cesa_conf_mbus_windows(&cesa->engines[i], dram); + + writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS); + writel(CESA_SA_CFG_STOP_DIG_ERR, + cesa->engines[i].regs + CESA_SA_CFG); + writel(engine->sram_dma & CESA_SA_SRAM_MSK, + cesa->engines[i].regs + CESA_SA_DESC_P0); + + ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int, + IRQF_ONESHOT, + dev_name(&pdev->dev), + &cesa->engines[i]); + if (ret) + goto err_cleanup; + } + + cesa_dev = cesa; + + ret = mv_cesa_add_algs(cesa); + if (ret) { + cesa_dev = NULL; + goto err_cleanup; + } + + dev_info(dev, "CESA device successfully registered\n"); + + return 0; + +err_cleanup: + for (i = 0; i < caps->nengines; i++) { + clk_disable_unprepare(cesa->engines[i].zclk); + clk_disable_unprepare(cesa->engines[i].clk); + mv_cesa_put_sram(pdev, i); + } + + return ret; +} + +static int mv_cesa_remove(struct platform_device *pdev) +{ + struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); + int i; + + mv_cesa_remove_algs(cesa); + + for (i = 0; i < cesa->caps->nengines; i++) { + clk_disable_unprepare(cesa->engines[i].zclk); + clk_disable_unprepare(cesa->engines[i].clk); + mv_cesa_put_sram(pdev, i); + } + + return 0; +} + +static struct platform_driver marvell_cesa = { + .probe = mv_cesa_probe, + .remove = mv_cesa_remove, + .driver = { + .owner = THIS_MODULE, + .name = "marvell-cesa", + .of_match_table = mv_cesa_of_match_table, + }, +}; +module_platform_driver(marvell_cesa); + +MODULE_ALIAS("platform:mv_crypto"); +MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>"); +MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>"); +MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h new file mode 100644 index 000000000000..b60698b30d30 --- /dev/null +++ b/drivers/crypto/marvell/cesa.h @@ -0,0 +1,791 @@ +#ifndef __MARVELL_CESA_H__ +#define __MARVELL_CESA_H__ + +#include <crypto/algapi.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> + +#include <linux/crypto.h> +#include <linux/dmapool.h> + +#define CESA_ENGINE_OFF(i) (((i) * 0x2000)) + +#define CESA_TDMA_BYTE_CNT 0x800 +#define CESA_TDMA_SRC_ADDR 0x810 +#define CESA_TDMA_DST_ADDR 0x820 +#define CESA_TDMA_NEXT_ADDR 0x830 + +#define CESA_TDMA_CONTROL 0x840 +#define CESA_TDMA_DST_BURST GENMASK(2, 0) +#define CESA_TDMA_DST_BURST_32B 3 +#define CESA_TDMA_DST_BURST_128B 4 +#define CESA_TDMA_OUT_RD_EN BIT(4) +#define CESA_TDMA_SRC_BURST GENMASK(8, 6) +#define CESA_TDMA_SRC_BURST_32B (3 << 6) +#define CESA_TDMA_SRC_BURST_128B (4 << 6) +#define CESA_TDMA_CHAIN BIT(9) +#define CESA_TDMA_BYTE_SWAP BIT(11) +#define CESA_TDMA_NO_BYTE_SWAP BIT(11) +#define CESA_TDMA_EN BIT(12) +#define CESA_TDMA_FETCH_ND BIT(13) +#define CESA_TDMA_ACT BIT(14) + +#define CESA_TDMA_CUR 0x870 +#define CESA_TDMA_ERROR_CAUSE 0x8c8 +#define CESA_TDMA_ERROR_MSK 0x8cc + +#define CESA_TDMA_WINDOW_BASE(x) (((x) * 0x8) + 0xa00) +#define CESA_TDMA_WINDOW_CTRL(x) (((x) * 0x8) + 0xa04) + +#define CESA_IVDIG(x) (0xdd00 + ((x) * 4) + \ + (((x) < 5) ? 0 : 0x14)) + +#define CESA_SA_CMD 0xde00 +#define CESA_SA_CMD_EN_CESA_SA_ACCL0 BIT(0) +#define CESA_SA_CMD_EN_CESA_SA_ACCL1 BIT(1) +#define CESA_SA_CMD_DISABLE_SEC BIT(2) + +#define CESA_SA_DESC_P0 0xde04 + +#define CESA_SA_DESC_P1 0xde14 + +#define CESA_SA_CFG 0xde08 +#define CESA_SA_CFG_STOP_DIG_ERR GENMASK(1, 0) +#define CESA_SA_CFG_DIG_ERR_CONT 0 +#define CESA_SA_CFG_DIG_ERR_SKIP 1 +#define CESA_SA_CFG_DIG_ERR_STOP 3 +#define CESA_SA_CFG_CH0_W_IDMA BIT(7) +#define CESA_SA_CFG_CH1_W_IDMA BIT(8) +#define CESA_SA_CFG_ACT_CH0_IDMA BIT(9) +#define CESA_SA_CFG_ACT_CH1_IDMA BIT(10) +#define CESA_SA_CFG_MULTI_PKT BIT(11) +#define CESA_SA_CFG_PARA_DIS BIT(13) + +#define CESA_SA_ACCEL_STATUS 0xde0c +#define CESA_SA_ST_ACT_0 BIT(0) +#define CESA_SA_ST_ACT_1 BIT(1) + +/* + * CESA_SA_FPGA_INT_STATUS looks like a FPGA leftover and is documented only + * in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA + * and someone forgot to remove it while switching to the core and moving to + * CESA_SA_INT_STATUS. + */ +#define CESA_SA_FPGA_INT_STATUS 0xdd68 +#define CESA_SA_INT_STATUS 0xde20 +#define CESA_SA_INT_AUTH_DONE BIT(0) +#define CESA_SA_INT_DES_E_DONE BIT(1) +#define CESA_SA_INT_AES_E_DONE BIT(2) +#define CESA_SA_INT_AES_D_DONE BIT(3) +#define CESA_SA_INT_ENC_DONE BIT(4) +#define CESA_SA_INT_ACCEL0_DONE BIT(5) +#define CESA_SA_INT_ACCEL1_DONE BIT(6) +#define CESA_SA_INT_ACC0_IDMA_DONE BIT(7) +#define CESA_SA_INT_ACC1_IDMA_DONE BIT(8) +#define CESA_SA_INT_IDMA_DONE BIT(9) +#define CESA_SA_INT_IDMA_OWN_ERR BIT(10) + +#define CESA_SA_INT_MSK 0xde24 + +#define CESA_SA_DESC_CFG_OP_MAC_ONLY 0 +#define CESA_SA_DESC_CFG_OP_CRYPT_ONLY 1 +#define CESA_SA_DESC_CFG_OP_MAC_CRYPT 2 +#define CESA_SA_DESC_CFG_OP_CRYPT_MAC 3 +#define CESA_SA_DESC_CFG_OP_MSK GENMASK(1, 0) +#define CESA_SA_DESC_CFG_MACM_SHA256 (1 << 4) +#define CESA_SA_DESC_CFG_MACM_HMAC_SHA256 (3 << 4) +#define CESA_SA_DESC_CFG_MACM_MD5 (4 << 4) +#define CESA_SA_DESC_CFG_MACM_SHA1 (5 << 4) +#define CESA_SA_DESC_CFG_MACM_HMAC_MD5 (6 << 4) +#define CESA_SA_DESC_CFG_MACM_HMAC_SHA1 (7 << 4) +#define CESA_SA_DESC_CFG_MACM_MSK GENMASK(6, 4) +#define CESA_SA_DESC_CFG_CRYPTM_DES (1 << 8) +#define CESA_SA_DESC_CFG_CRYPTM_3DES (2 << 8) +#define CESA_SA_DESC_CFG_CRYPTM_AES (3 << 8) +#define CESA_SA_DESC_CFG_CRYPTM_MSK GENMASK(9, 8) +#define CESA_SA_DESC_CFG_DIR_ENC (0 << 12) +#define CESA_SA_DESC_CFG_DIR_DEC (1 << 12) +#define CESA_SA_DESC_CFG_CRYPTCM_ECB (0 << 16) +#define CESA_SA_DESC_CFG_CRYPTCM_CBC (1 << 16) +#define CESA_SA_DESC_CFG_CRYPTCM_MSK BIT(16) +#define CESA_SA_DESC_CFG_3DES_EEE (0 << 20) +#define CESA_SA_DESC_CFG_3DES_EDE (1 << 20) +#define CESA_SA_DESC_CFG_AES_LEN_128 (0 << 24) +#define CESA_SA_DESC_CFG_AES_LEN_192 (1 << 24) +#define CESA_SA_DESC_CFG_AES_LEN_256 (2 << 24) +#define CESA_SA_DESC_CFG_AES_LEN_MSK GENMASK(25, 24) +#define CESA_SA_DESC_CFG_NOT_FRAG (0 << 30) +#define CESA_SA_DESC_CFG_FIRST_FRAG (1 << 30) +#define CESA_SA_DESC_CFG_LAST_FRAG (2 << 30) +#define CESA_SA_DESC_CFG_MID_FRAG (3 << 30) +#define CESA_SA_DESC_CFG_FRAG_MSK GENMASK(31, 30) + +/* + * /-----------\ 0 + * | ACCEL CFG | 4 * 8 + * |-----------| 0x20 + * | CRYPT KEY | 8 * 4 + * |-----------| 0x40 + * | IV IN | 4 * 4 + * |-----------| 0x40 (inplace) + * | IV BUF | 4 * 4 + * |-----------| 0x80 + * | DATA IN | 16 * x (max ->max_req_size) + * |-----------| 0x80 (inplace operation) + * | DATA OUT | 16 * x (max ->max_req_size) + * \-----------/ SRAM size + */ + +/* + * Hashing memory map: + * /-----------\ 0 + * | ACCEL CFG | 4 * 8 + * |-----------| 0x20 + * | Inner IV | 8 * 4 + * |-----------| 0x40 + * | Outer IV | 8 * 4 + * |-----------| 0x60 + * | Output BUF| 8 * 4 + * |-----------| 0x80 + * | DATA IN | 64 * x (max ->max_req_size) + * \-----------/ SRAM size + */ + +#define CESA_SA_CFG_SRAM_OFFSET 0x00 +#define CESA_SA_DATA_SRAM_OFFSET 0x80 + +#define CESA_SA_CRYPT_KEY_SRAM_OFFSET 0x20 +#define CESA_SA_CRYPT_IV_SRAM_OFFSET 0x40 + +#define CESA_SA_MAC_IIV_SRAM_OFFSET 0x20 +#define CESA_SA_MAC_OIV_SRAM_OFFSET 0x40 +#define CESA_SA_MAC_DIG_SRAM_OFFSET 0x60 + +#define CESA_SA_DESC_CRYPT_DATA(offset) \ + cpu_to_le32((CESA_SA_DATA_SRAM_OFFSET + (offset)) | \ + ((CESA_SA_DATA_SRAM_OFFSET + (offset)) << 16)) + +#define CESA_SA_DESC_CRYPT_IV(offset) \ + cpu_to_le32((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) | \ + ((CESA_SA_CRYPT_IV_SRAM_OFFSET + (offset)) << 16)) + +#define CESA_SA_DESC_CRYPT_KEY(offset) \ + cpu_to_le32(CESA_SA_CRYPT_KEY_SRAM_OFFSET + (offset)) + +#define CESA_SA_DESC_MAC_DATA(offset) \ + cpu_to_le32(CESA_SA_DATA_SRAM_OFFSET + (offset)) +#define CESA_SA_DESC_MAC_DATA_MSK GENMASK(15, 0) + +#define CESA_SA_DESC_MAC_TOTAL_LEN(total_len) cpu_to_le32((total_len) << 16) +#define CESA_SA_DESC_MAC_TOTAL_LEN_MSK GENMASK(31, 16) + +#define CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX 0xffff + +#define CESA_SA_DESC_MAC_DIGEST(offset) \ + cpu_to_le32(CESA_SA_MAC_DIG_SRAM_OFFSET + (offset)) +#define CESA_SA_DESC_MAC_DIGEST_MSK GENMASK(15, 0) + +#define CESA_SA_DESC_MAC_FRAG_LEN(frag_len) cpu_to_le32((frag_len) << 16) +#define CESA_SA_DESC_MAC_FRAG_LEN_MSK GENMASK(31, 16) + +#define CESA_SA_DESC_MAC_IV(offset) \ + cpu_to_le32((CESA_SA_MAC_IIV_SRAM_OFFSET + (offset)) | \ + ((CESA_SA_MAC_OIV_SRAM_OFFSET + (offset)) << 16)) + +#define CESA_SA_SRAM_SIZE 2048 +#define CESA_SA_SRAM_PAYLOAD_SIZE (cesa_dev->sram_size - \ + CESA_SA_DATA_SRAM_OFFSET) + +#define CESA_SA_DEFAULT_SRAM_SIZE 2048 +#define CESA_SA_MIN_SRAM_SIZE 1024 + +#define CESA_SA_SRAM_MSK (2048 - 1) + +#define CESA_MAX_HASH_BLOCK_SIZE 64 +#define CESA_HASH_BLOCK_SIZE_MSK (CESA_MAX_HASH_BLOCK_SIZE - 1) + +/** + * struct mv_cesa_sec_accel_desc - security accelerator descriptor + * @config: engine config + * @enc_p: input and output data pointers for a cipher operation + * @enc_len: cipher operation length + * @enc_key_p: cipher key pointer + * @enc_iv: cipher IV pointers + * @mac_src_p: input pointer and total hash length + * @mac_digest: digest pointer and hash operation length + * @mac_iv: hmac IV pointers + * + * Structure passed to the CESA engine to describe the crypto operation + * to be executed. + */ +struct mv_cesa_sec_accel_desc { + u32 config; + u32 enc_p; + u32 enc_len; + u32 enc_key_p; + u32 enc_iv; + u32 mac_src_p; + u32 mac_digest; + u32 mac_iv; +}; + +/** + * struct mv_cesa_blkcipher_op_ctx - cipher operation context + * @key: cipher key + * @iv: cipher IV + * + * Context associated to a cipher operation. + */ +struct mv_cesa_blkcipher_op_ctx { + u32 key[8]; + u32 iv[4]; +}; + +/** + * struct mv_cesa_hash_op_ctx - hash or hmac operation context + * @key: cipher key + * @iv: cipher IV + * + * Context associated to an hash or hmac operation. + */ +struct mv_cesa_hash_op_ctx { + u32 iv[16]; + u32 hash[8]; +}; + +/** + * struct mv_cesa_op_ctx - crypto operation context + * @desc: CESA descriptor + * @ctx: context associated to the crypto operation + * + * Context associated to a crypto operation. + */ +struct mv_cesa_op_ctx { + struct mv_cesa_sec_accel_desc desc; + union { + struct mv_cesa_blkcipher_op_ctx blkcipher; + struct mv_cesa_hash_op_ctx hash; + } ctx; +}; + +/* TDMA descriptor flags */ +#define CESA_TDMA_DST_IN_SRAM BIT(31) +#define CESA_TDMA_SRC_IN_SRAM BIT(30) +#define CESA_TDMA_TYPE_MSK GENMASK(29, 0) +#define CESA_TDMA_DUMMY 0 +#define CESA_TDMA_DATA 1 +#define CESA_TDMA_OP 2 + +/** + * struct mv_cesa_tdma_desc - TDMA descriptor + * @byte_cnt: number of bytes to transfer + * @src: DMA address of the source + * @dst: DMA address of the destination + * @next_dma: DMA address of the next TDMA descriptor + * @cur_dma: DMA address of this TDMA descriptor + * @next: pointer to the next TDMA descriptor + * @op: CESA operation attached to this TDMA descriptor + * @data: raw data attached to this TDMA descriptor + * @flags: flags describing the TDMA transfer. See the + * "TDMA descriptor flags" section above + * + * TDMA descriptor used to create a transfer chain describing a crypto + * operation. + */ +struct mv_cesa_tdma_desc { + u32 byte_cnt; + u32 src; + u32 dst; + u32 next_dma; + u32 cur_dma; + struct mv_cesa_tdma_desc *next; + union { + struct mv_cesa_op_ctx *op; + void *data; + }; + u32 flags; +}; + +/** + * struct mv_cesa_sg_dma_iter - scatter-gather iterator + * @dir: transfer direction + * @sg: scatter list + * @offset: current position in the scatter list + * @op_offset: current position in the crypto operation + * + * Iterator used to iterate over a scatterlist while creating a TDMA chain for + * a crypto operation. + */ +struct mv_cesa_sg_dma_iter { + enum dma_data_direction dir; + struct scatterlist *sg; + unsigned int offset; + unsigned int op_offset; +}; + +/** + * struct mv_cesa_dma_iter - crypto operation iterator + * @len: the crypto operation length + * @offset: current position in the crypto operation + * @op_len: sub-operation length (the crypto engine can only act on 2kb + * chunks) + * + * Iterator used to create a TDMA chain for a given crypto operation. + */ +struct mv_cesa_dma_iter { + unsigned int len; + unsigned int offset; + unsigned int op_len; +}; + +/** + * struct mv_cesa_tdma_chain - TDMA chain + * @first: first entry in the TDMA chain + * @last: last entry in the TDMA chain + * + * Stores a TDMA chain for a specific crypto operation. + */ +struct mv_cesa_tdma_chain { + struct mv_cesa_tdma_desc *first; + struct mv_cesa_tdma_desc *last; +}; + +struct mv_cesa_engine; + +/** + * struct mv_cesa_caps - CESA device capabilities + * @engines: number of engines + * @has_tdma: whether this device has a TDMA block + * @cipher_algs: supported cipher algorithms + * @ncipher_algs: number of supported cipher algorithms + * @ahash_algs: supported hash algorithms + * @nahash_algs: number of supported hash algorithms + * + * Structure used to describe CESA device capabilities. + */ +struct mv_cesa_caps { + int nengines; + bool has_tdma; + struct crypto_alg **cipher_algs; + int ncipher_algs; + struct ahash_alg **ahash_algs; + int nahash_algs; +}; + +/** + * struct mv_cesa_dev_dma - DMA pools + * @tdma_desc_pool: TDMA desc pool + * @op_pool: crypto operation pool + * @cache_pool: data cache pool (used by hash implementation when the + * hash request is smaller than the hash block size) + * @padding_pool: padding pool (used by hash implementation when hardware + * padding cannot be used) + * + * Structure containing the different DMA pools used by this driver. + */ +struct mv_cesa_dev_dma { + struct dma_pool *tdma_desc_pool; + struct dma_pool *op_pool; + struct dma_pool *cache_pool; + struct dma_pool *padding_pool; +}; + +/** + * struct mv_cesa_dev - CESA device + * @caps: device capabilities + * @regs: device registers + * @sram_size: usable SRAM size + * @lock: device lock + * @queue: crypto request queue + * @engines: array of engines + * @dma: dma pools + * + * Structure storing CESA device information. + */ +struct mv_cesa_dev { + const struct mv_cesa_caps *caps; + void __iomem *regs; + struct device *dev; + unsigned int sram_size; + spinlock_t lock; + struct crypto_queue queue; + struct mv_cesa_engine *engines; + struct mv_cesa_dev_dma *dma; +}; + +/** + * struct mv_cesa_engine - CESA engine + * @id: engine id + * @regs: engine registers + * @sram: SRAM memory region + * @sram_dma: DMA address of the SRAM memory region + * @lock: engine lock + * @req: current crypto request + * @clk: engine clk + * @zclk: engine zclk + * @max_req_len: maximum chunk length (useful to create the TDMA chain) + * @int_mask: interrupt mask cache + * @pool: memory pool pointing to the memory region reserved in + * SRAM + * + * Structure storing CESA engine information. + */ +struct mv_cesa_engine { + int id; + void __iomem *regs; + void __iomem *sram; + dma_addr_t sram_dma; + spinlock_t lock; + struct crypto_async_request *req; + struct clk *clk; + struct clk *zclk; + size_t max_req_len; + u32 int_mask; + struct gen_pool *pool; +}; + +/** + * struct mv_cesa_req_ops - CESA request operations + * @prepare: prepare a request to be executed on the specified engine + * @process: process a request chunk result (should return 0 if the + * operation, -EINPROGRESS if it needs more steps or an error + * code) + * @step: launch the crypto operation on the next chunk + * @cleanup: cleanup the crypto request (release associated data) + */ +struct mv_cesa_req_ops { + void (*prepare)(struct crypto_async_request *req, + struct mv_cesa_engine *engine); + int (*process)(struct crypto_async_request *req, u32 status); + void (*step)(struct crypto_async_request *req); + void (*cleanup)(struct crypto_async_request *req); +}; + +/** + * struct mv_cesa_ctx - CESA operation context + * @ops: crypto operations + * + * Base context structure inherited by operation specific ones. + */ +struct mv_cesa_ctx { + const struct mv_cesa_req_ops *ops; +}; + +/** + * struct mv_cesa_hash_ctx - CESA hash operation context + * @base: base context structure + * + * Hash context structure. + */ +struct mv_cesa_hash_ctx { + struct mv_cesa_ctx base; +}; + +/** + * struct mv_cesa_hash_ctx - CESA hmac operation context + * @base: base context structure + * @iv: initialization vectors + * + * HMAC context structure. + */ +struct mv_cesa_hmac_ctx { + struct mv_cesa_ctx base; + u32 iv[16]; +}; + +/** + * enum mv_cesa_req_type - request type definitions + * @CESA_STD_REQ: standard request + * @CESA_DMA_REQ: DMA request + */ +enum mv_cesa_req_type { + CESA_STD_REQ, + CESA_DMA_REQ, +}; + +/** + * struct mv_cesa_req - CESA request + * @type: request type + * @engine: engine associated with this request + */ +struct mv_cesa_req { + enum mv_cesa_req_type type; + struct mv_cesa_engine *engine; +}; + +/** + * struct mv_cesa_tdma_req - CESA TDMA request + * @base: base information + * @chain: TDMA chain + */ +struct mv_cesa_tdma_req { + struct mv_cesa_req base; + struct mv_cesa_tdma_chain chain; +}; + +/** + * struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard + * requests + * @iter: sg mapping iterator + * @offset: current offset in the SG entry mapped in memory + */ +struct mv_cesa_sg_std_iter { + struct sg_mapping_iter iter; + unsigned int offset; +}; + +/** + * struct mv_cesa_ablkcipher_std_req - cipher standard request + * @base: base information + * @op: operation context + * @offset: current operation offset + * @size: size of the crypto operation + */ +struct mv_cesa_ablkcipher_std_req { + struct mv_cesa_req base; + struct mv_cesa_op_ctx op; + unsigned int offset; + unsigned int size; + bool skip_ctx; +}; + +/** + * struct mv_cesa_ablkcipher_req - cipher request + * @req: type specific request information + * @src_nents: number of entries in the src sg list + * @dst_nents: number of entries in the dest sg list + */ +struct mv_cesa_ablkcipher_req { + union { + struct mv_cesa_req base; + struct mv_cesa_tdma_req dma; + struct mv_cesa_ablkcipher_std_req std; + } req; + int src_nents; + int dst_nents; +}; + +/** + * struct mv_cesa_ahash_std_req - standard hash request + * @base: base information + * @offset: current operation offset + */ +struct mv_cesa_ahash_std_req { + struct mv_cesa_req base; + unsigned int offset; +}; + +/** + * struct mv_cesa_ahash_dma_req - DMA hash request + * @base: base information + * @padding: padding buffer + * @padding_dma: DMA address of the padding buffer + * @cache_dma: DMA address of the cache buffer + */ +struct mv_cesa_ahash_dma_req { + struct mv_cesa_tdma_req base; + u8 *padding; + dma_addr_t padding_dma; + dma_addr_t cache_dma; +}; + +/** + * struct mv_cesa_ahash_req - hash request + * @req: type specific request information + * @cache: cache buffer + * @cache_ptr: write pointer in the cache buffer + * @len: hash total length + * @src_nents: number of entries in the scatterlist + * @last_req: define whether the current operation is the last one + * or not + * @state: hash state + */ +struct mv_cesa_ahash_req { + union { + struct mv_cesa_req base; + struct mv_cesa_ahash_dma_req dma; + struct mv_cesa_ahash_std_req std; + } req; + struct mv_cesa_op_ctx op_tmpl; + u8 *cache; + unsigned int cache_ptr; + u64 len; + int src_nents; + bool last_req; + __be32 state[8]; +}; + +/* CESA functions */ + +extern struct mv_cesa_dev *cesa_dev; + +static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op, + u32 cfg, u32 mask) +{ + op->desc.config &= cpu_to_le32(~mask); + op->desc.config |= cpu_to_le32(cfg); +} + +static inline u32 mv_cesa_get_op_cfg(struct mv_cesa_op_ctx *op) +{ + return le32_to_cpu(op->desc.config); +} + +static inline void mv_cesa_set_op_cfg(struct mv_cesa_op_ctx *op, u32 cfg) +{ + op->desc.config = cpu_to_le32(cfg); +} + +static inline void mv_cesa_adjust_op(struct mv_cesa_engine *engine, + struct mv_cesa_op_ctx *op) +{ + u32 offset = engine->sram_dma & CESA_SA_SRAM_MSK; + + op->desc.enc_p = CESA_SA_DESC_CRYPT_DATA(offset); + op->desc.enc_key_p = CESA_SA_DESC_CRYPT_KEY(offset); + op->desc.enc_iv = CESA_SA_DESC_CRYPT_IV(offset); + op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_DATA_MSK; + op->desc.mac_src_p |= CESA_SA_DESC_MAC_DATA(offset); + op->desc.mac_digest &= ~CESA_SA_DESC_MAC_DIGEST_MSK; + op->desc.mac_digest |= CESA_SA_DESC_MAC_DIGEST(offset); + op->desc.mac_iv = CESA_SA_DESC_MAC_IV(offset); +} + +static inline void mv_cesa_set_crypt_op_len(struct mv_cesa_op_ctx *op, int len) +{ + op->desc.enc_len = cpu_to_le32(len); +} + +static inline void mv_cesa_set_mac_op_total_len(struct mv_cesa_op_ctx *op, + int len) +{ + op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_TOTAL_LEN_MSK; + op->desc.mac_src_p |= CESA_SA_DESC_MAC_TOTAL_LEN(len); +} + +static inline void mv_cesa_set_mac_op_frag_len(struct mv_cesa_op_ctx *op, + int len) +{ + op->desc.mac_digest &= ~CESA_SA_DESC_MAC_FRAG_LEN_MSK; + op->desc.mac_digest |= CESA_SA_DESC_MAC_FRAG_LEN(len); +} + +static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine, + u32 int_mask) +{ + if (int_mask == engine->int_mask) + return; + + writel(int_mask, engine->regs + CESA_SA_INT_MSK); + engine->int_mask = int_mask; +} + +static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) +{ + return engine->int_mask; +} + +int mv_cesa_queue_req(struct crypto_async_request *req); + +/* TDMA functions */ + +static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, + unsigned int len) +{ + iter->len = len; + iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE); + iter->offset = 0; +} + +static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter, + struct scatterlist *sg, + enum dma_data_direction dir) +{ + iter->op_offset = 0; + iter->offset = 0; + iter->sg = sg; + iter->dir = dir; +} + +static inline unsigned int +mv_cesa_req_dma_iter_transfer_len(struct mv_cesa_dma_iter *iter, + struct mv_cesa_sg_dma_iter *sgiter) +{ + return min(iter->op_len - sgiter->op_offset, + sg_dma_len(sgiter->sg) - sgiter->offset); +} + +bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *chain, + struct mv_cesa_sg_dma_iter *sgiter, + unsigned int len); + +static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter) +{ + iter->offset += iter->op_len; + iter->op_len = min(iter->len - iter->offset, + CESA_SA_SRAM_PAYLOAD_SIZE); + + return iter->op_len; +} + +void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq); + +static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq, + u32 status) +{ + if (!(status & CESA_SA_INT_ACC0_IDMA_DONE)) + return -EINPROGRESS; + + if (status & CESA_SA_INT_IDMA_OWN_ERR) + return -EINVAL; + + return 0; +} + +void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, + struct mv_cesa_engine *engine); + +void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq); + +static inline void +mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain) +{ + memset(chain, 0, sizeof(*chain)); +} + +struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, + const struct mv_cesa_op_ctx *op_templ, + bool skip_ctx, + gfp_t flags); + +int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, + dma_addr_t dst, dma_addr_t src, u32 size, + u32 flags, gfp_t gfp_flags); + +int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, + u32 flags); + +int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags); + +int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_dma_iter *dma_iter, + struct mv_cesa_sg_dma_iter *sgiter, + gfp_t gfp_flags); + +/* Algorithm definitions */ + +extern struct ahash_alg mv_md5_alg; +extern struct ahash_alg mv_sha1_alg; +extern struct ahash_alg mv_sha256_alg; +extern struct ahash_alg mv_ahmac_md5_alg; +extern struct ahash_alg mv_ahmac_sha1_alg; +extern struct ahash_alg mv_ahmac_sha256_alg; + +extern struct crypto_alg mv_cesa_ecb_des_alg; +extern struct crypto_alg mv_cesa_cbc_des_alg; +extern struct crypto_alg mv_cesa_ecb_des3_ede_alg; +extern struct crypto_alg mv_cesa_cbc_des3_ede_alg; +extern struct crypto_alg mv_cesa_ecb_aes_alg; +extern struct crypto_alg mv_cesa_cbc_aes_alg; + +#endif /* __MARVELL_CESA_H__ */ diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c new file mode 100644 index 000000000000..0745cf3b9c0e --- /dev/null +++ b/drivers/crypto/marvell/cipher.c @@ -0,0 +1,797 @@ +/* + * Cipher algorithms supported by the CESA: DES, 3DES and AES. + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * Author: Arnaud Ebalard <arno@natisbad.org> + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <crypto/aes.h> +#include <crypto/des.h> + +#include "cesa.h" + +struct mv_cesa_des_ctx { + struct mv_cesa_ctx base; + u8 key[DES_KEY_SIZE]; +}; + +struct mv_cesa_des3_ctx { + struct mv_cesa_ctx base; + u8 key[DES3_EDE_KEY_SIZE]; +}; + +struct mv_cesa_aes_ctx { + struct mv_cesa_ctx base; + struct crypto_aes_ctx aes; +}; + +struct mv_cesa_ablkcipher_dma_iter { + struct mv_cesa_dma_iter base; + struct mv_cesa_sg_dma_iter src; + struct mv_cesa_sg_dma_iter dst; +}; + +static inline void +mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter, + struct ablkcipher_request *req) +{ + mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); + mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); + mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); +} + +static inline bool +mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) +{ + iter->src.op_offset = 0; + iter->dst.op_offset = 0; + + return mv_cesa_req_dma_iter_next_op(&iter->base); +} + +static inline void +mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + + if (req->dst != req->src) { + dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, + DMA_FROM_DEVICE); + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_TO_DEVICE); + } else { + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_BIDIRECTIONAL); + } + mv_cesa_dma_cleanup(&creq->req.dma); +} + +static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ablkcipher_dma_cleanup(req); +} + +static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + size_t len = min_t(size_t, req->nbytes - sreq->offset, + CESA_SA_SRAM_PAYLOAD_SIZE); + + len = sg_pcopy_to_buffer(req->src, creq->src_nents, + engine->sram + CESA_SA_DATA_SRAM_OFFSET, + len, sreq->offset); + + sreq->size = len; + mv_cesa_set_crypt_op_len(&sreq->op, len); + + /* FIXME: only update enc_len field */ + if (!sreq->skip_ctx) { + memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); + sreq->skip_ctx = true; + } else { + memcpy(engine->sram, &sreq->op, sizeof(sreq->op.desc)); + } + + mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); + writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); + writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); +} + +static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, + u32 status) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + size_t len; + + len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, + engine->sram + CESA_SA_DATA_SRAM_OFFSET, + sreq->size, sreq->offset); + + sreq->offset += len; + if (sreq->offset < req->nbytes) + return -EINPROGRESS; + + return 0; +} + +static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, + u32 status) +{ + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + int ret; + + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_dma_process(&creq->req.dma, status); + else + ret = mv_cesa_ablkcipher_std_process(ablkreq, status); + + if (ret) + return ret; + + memcpy(ablkreq->info, engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, + crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq))); + + return 0; +} + +static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) +{ + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_dma_step(&creq->req.dma); + else + mv_cesa_ablkcipher_std_step(ablkreq); +} + +static inline void +mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_tdma_req *dreq = &creq->req.dma; + + mv_cesa_dma_prepare(dreq, dreq->base.engine); +} + +static inline void +mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + + sreq->size = 0; + sreq->offset = 0; + mv_cesa_adjust_op(engine, &sreq->op); + memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); +} + +static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, + struct mv_cesa_engine *engine) +{ + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); + + creq->req.base.engine = engine; + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ablkcipher_dma_prepare(ablkreq); + else + mv_cesa_ablkcipher_std_prepare(ablkreq); +} + +static inline void +mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) +{ + struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); + + mv_cesa_ablkcipher_cleanup(ablkreq); +} + +static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { + .step = mv_cesa_ablkcipher_step, + .process = mv_cesa_ablkcipher_process, + .prepare = mv_cesa_ablkcipher_prepare, + .cleanup = mv_cesa_ablkcipher_req_cleanup, +}; + +static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) +{ + struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->base.ops = &mv_cesa_ablkcipher_req_ops; + + tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req); + + return 0; +} + +static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); + int remaining; + int offset; + int ret; + int i; + + ret = crypto_aes_expand_key(&ctx->aes, key, len); + if (ret) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return ret; + } + + remaining = (ctx->aes.key_length - 16) / 4; + offset = ctx->aes.key_length + 24 - remaining; + for (i = 0; i < remaining; i++) + ctx->aes.key_dec[4 + i] = + cpu_to_le32(ctx->aes.key_enc[offset + i]); + + return 0; +} + +static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + int ret; + + if (len != DES_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + ret = des_ekey(tmp, key); + if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + memcpy(ctx->key, key, DES_KEY_SIZE); + + return 0; +} + +static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher, + const u8 *key, unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); + + if (len != DES3_EDE_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); + + return 0; +} + +static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, + const struct mv_cesa_op_ctx *op_templ) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + struct mv_cesa_tdma_req *dreq = &creq->req.dma; + struct mv_cesa_ablkcipher_dma_iter iter; + struct mv_cesa_tdma_chain chain; + bool skip_ctx = false; + int ret; + + dreq->base.type = CESA_DMA_REQ; + dreq->chain.first = NULL; + dreq->chain.last = NULL; + + if (req->src != req->dst) { + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_TO_DEVICE); + if (!ret) + return -ENOMEM; + + ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents, + DMA_FROM_DEVICE); + if (!ret) { + ret = -ENOMEM; + goto err_unmap_src; + } + } else { + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_BIDIRECTIONAL); + if (!ret) + return -ENOMEM; + } + + mv_cesa_tdma_desc_iter_init(&chain); + mv_cesa_ablkcipher_req_iter_init(&iter, req); + + do { + struct mv_cesa_op_ctx *op; + + op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + skip_ctx = true; + + mv_cesa_set_crypt_op_len(op, iter.base.op_len); + + /* Add input transfers */ + ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + &iter.src, flags); + if (ret) + goto err_free_tdma; + + /* Add dummy desc to launch the crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(&chain, flags); + if (ret) + goto err_free_tdma; + + /* Add output transfers */ + ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base, + &iter.dst, flags); + if (ret) + goto err_free_tdma; + + } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); + + dreq->chain = chain; + + return 0; + +err_free_tdma: + mv_cesa_dma_cleanup(dreq); + if (req->dst != req->src) + dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, + DMA_FROM_DEVICE); + +err_unmap_src: + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, + req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); + + return ret; +} + +static inline int +mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, + const struct mv_cesa_op_ctx *op_templ) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; + + sreq->base.type = CESA_STD_REQ; + sreq->op = *op_templ; + sreq->skip_ctx = false; + + return 0; +} + +static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + unsigned int blksize = crypto_ablkcipher_blocksize(tfm); + int ret; + + if (!IS_ALIGNED(req->nbytes, blksize)) + return -EINVAL; + + creq->src_nents = sg_nents_for_len(req->src, req->nbytes); + creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); + + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, + CESA_SA_DESC_CFG_OP_MSK); + + /* TODO: add a threshold for DMA usage */ + if (cesa_dev->caps->has_tdma) + ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); + else + ret = mv_cesa_ablkcipher_std_req_init(req, tmpl); + + return ret; +} + +static int mv_cesa_des_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + int ret; + + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, + CESA_SA_DESC_CFG_CRYPTM_MSK); + + memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); + + ret = mv_cesa_ablkcipher_req_init(req, tmpl); + if (ret) + return ret; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ablkcipher_cleanup(req); + + return ret; +} + +static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_des_op(req, &tmpl); +} + +static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_des_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_ecb_des_alg = { + .cra_name = "ecb(des)", + .cra_driver_name = "mv-ecb-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = mv_cesa_des_setkey, + .encrypt = mv_cesa_ecb_des_encrypt, + .decrypt = mv_cesa_ecb_des_decrypt, + }, + }, +}; + +static int mv_cesa_cbc_des_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, + CESA_SA_DESC_CFG_CRYPTCM_MSK); + + memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE); + + return mv_cesa_des_op(req, tmpl); +} + +static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_cbc_des_op(req, &tmpl); +} + +static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_cbc_des_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_cbc_des_alg = { + .cra_name = "cbc(des)", + .cra_driver_name = "mv-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = mv_cesa_des_setkey, + .encrypt = mv_cesa_cbc_des_encrypt, + .decrypt = mv_cesa_cbc_des_decrypt, + }, + }, +}; + +static int mv_cesa_des3_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + int ret; + + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, + CESA_SA_DESC_CFG_CRYPTM_MSK); + + memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); + + ret = mv_cesa_ablkcipher_req_init(req, tmpl); + if (ret) + return ret; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ablkcipher_cleanup(req); + + return ret; +} + +static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_des3_op(req, &tmpl); +} + +static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_des3_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_ecb_des3_ede_alg = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "mv-ecb-des3-ede", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = mv_cesa_des3_ede_setkey, + .encrypt = mv_cesa_ecb_des3_ede_encrypt, + .decrypt = mv_cesa_ecb_des3_ede_decrypt, + }, + }, +}; + +static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE); + + return mv_cesa_des3_op(req, tmpl); +} + +static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_CBC | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_cbc_des3_op(req, &tmpl); +} + +static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_CBC | + CESA_SA_DESC_CFG_3DES_EDE | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_cbc_des3_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_cbc_des3_ede_alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "mv-cbc-des3-ede", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = mv_cesa_des3_ede_setkey, + .encrypt = mv_cesa_cbc_des3_ede_encrypt, + .decrypt = mv_cesa_cbc_des3_ede_decrypt, + }, + }, +}; + +static int mv_cesa_aes_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + int ret, i; + u32 *key; + u32 cfg; + + cfg = CESA_SA_DESC_CFG_CRYPTM_AES; + + if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC) + key = ctx->aes.key_dec; + else + key = ctx->aes.key_enc; + + for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++) + tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]); + + if (ctx->aes.key_length == 24) + cfg |= CESA_SA_DESC_CFG_AES_LEN_192; + else if (ctx->aes.key_length == 32) + cfg |= CESA_SA_DESC_CFG_AES_LEN_256; + + mv_cesa_update_op_cfg(tmpl, cfg, + CESA_SA_DESC_CFG_CRYPTM_MSK | + CESA_SA_DESC_CFG_AES_LEN_MSK); + + ret = mv_cesa_ablkcipher_req_init(req, tmpl); + if (ret) + return ret; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ablkcipher_cleanup(req); + + return ret; +} + +static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_aes_op(req, &tmpl); +} + +static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, + CESA_SA_DESC_CFG_CRYPTCM_ECB | + CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_aes_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "mv-ecb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = mv_cesa_aes_setkey, + .encrypt = mv_cesa_ecb_aes_encrypt, + .decrypt = mv_cesa_ecb_aes_decrypt, + }, + }, +}; + +static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, + CESA_SA_DESC_CFG_CRYPTCM_MSK); + memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE); + + return mv_cesa_aes_op(req, tmpl); +} + +static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); + + return mv_cesa_cbc_aes_op(req, &tmpl); +} + +static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); + + return mv_cesa_cbc_aes_op(req, &tmpl); +} + +struct crypto_alg mv_cesa_cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "mv-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = mv_cesa_ablkcipher_cra_init, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = mv_cesa_aes_setkey, + .encrypt = mv_cesa_cbc_aes_encrypt, + .decrypt = mv_cesa_cbc_aes_decrypt, + }, + }, +}; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c new file mode 100644 index 000000000000..ae9272eb9c1a --- /dev/null +++ b/drivers/crypto/marvell/hash.c @@ -0,0 +1,1441 @@ +/* + * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * Author: Arnaud Ebalard <arno@natisbad.org> + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <crypto/md5.h> +#include <crypto/sha.h> + +#include "cesa.h" + +struct mv_cesa_ahash_dma_iter { + struct mv_cesa_dma_iter base; + struct mv_cesa_sg_dma_iter src; +}; + +static inline void +mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, + struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int len = req->nbytes; + + if (!creq->last_req) + len = (len + creq->cache_ptr) & ~CESA_HASH_BLOCK_SIZE_MSK; + + mv_cesa_req_dma_iter_init(&iter->base, len); + mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); + iter->src.op_offset = creq->cache_ptr; +} + +static inline bool +mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) +{ + iter->src.op_offset = 0; + + return mv_cesa_req_dma_iter_next_op(&iter->base); +} + +static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma; + + creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, + &dreq->cache_dma); + if (!creq->cache) + return -ENOMEM; + + return 0; +} + +static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags); + if (!creq->cache) + return -ENOMEM; + + return 0; +} + +static int mv_cesa_ahash_alloc_cache(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + int ret; + + if (creq->cache) + return 0; + + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_ahash_dma_alloc_cache(creq, flags); + else + ret = mv_cesa_ahash_std_alloc_cache(creq, flags); + + return ret; +} + +static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq) +{ + dma_pool_free(cesa_dev->dma->cache_pool, creq->cache, + creq->req.dma.cache_dma); +} + +static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq) +{ + kfree(creq->cache); +} + +static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq) +{ + if (!creq->cache) + return; + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_free_cache(creq); + else + mv_cesa_ahash_std_free_cache(creq); + + creq->cache = NULL; +} + +static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, + gfp_t flags) +{ + if (req->padding) + return 0; + + req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, + &req->padding_dma); + if (!req->padding) + return -ENOMEM; + + return 0; +} + +static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) +{ + if (!req->padding) + return; + + dma_pool_free(cesa_dev->dma->padding_pool, req->padding, + req->padding_dma); + req->padding = NULL; +} + +static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + mv_cesa_ahash_dma_free_padding(&creq->req.dma); +} + +static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); + mv_cesa_dma_cleanup(&creq->req.dma.base); +} + +static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_cleanup(req); +} + +static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + mv_cesa_ahash_free_cache(creq); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_last_cleanup(req); +} + +static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) +{ + unsigned int index, padlen; + + index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; + padlen = (index < 56) ? (56 - index) : (64 + 56 - index); + + return padlen; +} + +static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) +{ + __be64 bits = cpu_to_be64(creq->len << 3); + unsigned int index, padlen; + + buf[0] = 0x80; + /* Pad out to 56 mod 64 */ + index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; + padlen = mv_cesa_ahash_pad_len(creq); + memset(buf + 1, 0, padlen - 1); + memcpy(buf + padlen, &bits, sizeof(bits)); + + return padlen + 8; +} + +static void mv_cesa_ahash_std_step(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_ahash_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + struct mv_cesa_op_ctx *op; + unsigned int new_cache_ptr = 0; + u32 frag_mode; + size_t len; + + if (creq->cache_ptr) + memcpy(engine->sram + CESA_SA_DATA_SRAM_OFFSET, creq->cache, + creq->cache_ptr); + + len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, + CESA_SA_SRAM_PAYLOAD_SIZE); + + if (!creq->last_req) { + new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; + len &= ~CESA_HASH_BLOCK_SIZE_MSK; + } + + if (len - creq->cache_ptr) + sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, + engine->sram + + CESA_SA_DATA_SRAM_OFFSET + + creq->cache_ptr, + len - creq->cache_ptr, + sreq->offset); + + op = &creq->op_tmpl; + + frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; + + if (creq->last_req && sreq->offset == req->nbytes && + creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { + if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) + frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; + else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) + frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; + } + + if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || + frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { + if (len && + creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { + mv_cesa_set_mac_op_total_len(op, creq->len); + } else { + int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; + + if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { + len &= CESA_HASH_BLOCK_SIZE_MSK; + new_cache_ptr = 64 - trailerlen; + memcpy(creq->cache, + engine->sram + + CESA_SA_DATA_SRAM_OFFSET + len, + new_cache_ptr); + } else { + len += mv_cesa_ahash_pad_req(creq, + engine->sram + len + + CESA_SA_DATA_SRAM_OFFSET); + } + + if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) + frag_mode = CESA_SA_DESC_CFG_MID_FRAG; + else + frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; + } + } + + mv_cesa_set_mac_op_frag_len(op, len); + mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); + + /* FIXME: only update enc_len field */ + memcpy(engine->sram, op, sizeof(*op)); + + if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) + mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + + creq->cache_ptr = new_cache_ptr; + + mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); + writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); + writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); +} + +static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_ahash_std_req *sreq = &creq->req.std; + + if (sreq->offset < (req->nbytes - creq->cache_ptr)) + return -EINPROGRESS; + + return 0; +} + +static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_tdma_req *dreq = &creq->req.dma.base; + + mv_cesa_dma_prepare(dreq, dreq->base.engine); +} + +static void mv_cesa_ahash_std_prepare(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_ahash_std_req *sreq = &creq->req.std; + struct mv_cesa_engine *engine = sreq->base.engine; + + sreq->offset = 0; + mv_cesa_adjust_op(engine, &creq->op_tmpl); + memcpy(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); +} + +static void mv_cesa_ahash_step(struct crypto_async_request *req) +{ + struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_dma_step(&creq->req.dma.base); + else + mv_cesa_ahash_std_step(ahashreq); +} + +static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) +{ + struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); + struct mv_cesa_engine *engine = creq->req.base.engine; + unsigned int digsize; + int ret, i; + + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_dma_process(&creq->req.dma.base, status); + else + ret = mv_cesa_ahash_std_process(ahashreq, status); + + if (ret == -EINPROGRESS) + return ret; + + digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); + for (i = 0; i < digsize / 4; i++) + creq->state[i] = readl(engine->regs + CESA_IVDIG(i)); + + if (creq->cache_ptr) + sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, + creq->cache, + creq->cache_ptr, + ahashreq->nbytes - creq->cache_ptr); + + if (creq->last_req) { + for (i = 0; i < digsize / 4; i++) { + /* + * Hardware provides MD5 digest in a different + * endianness than SHA-1 and SHA-256 ones. + */ + if (digsize == MD5_DIGEST_SIZE) + creq->state[i] = cpu_to_le32(creq->state[i]); + else + creq->state[i] = cpu_to_be32(creq->state[i]); + } + + memcpy(ahashreq->result, creq->state, digsize); + } + + return ret; +} + +static void mv_cesa_ahash_prepare(struct crypto_async_request *req, + struct mv_cesa_engine *engine) +{ + struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); + unsigned int digsize; + int i; + + creq->req.base.engine = engine; + + if (creq->req.base.type == CESA_DMA_REQ) + mv_cesa_ahash_dma_prepare(ahashreq); + else + mv_cesa_ahash_std_prepare(ahashreq); + + digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); + for (i = 0; i < digsize / 4; i++) + writel(creq->state[i], + engine->regs + CESA_IVDIG(i)); +} + +static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) +{ + struct ahash_request *ahashreq = ahash_request_cast(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); + + if (creq->last_req) + mv_cesa_ahash_last_cleanup(ahashreq); + + mv_cesa_ahash_cleanup(ahashreq); +} + +static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { + .step = mv_cesa_ahash_step, + .process = mv_cesa_ahash_process, + .prepare = mv_cesa_ahash_prepare, + .cleanup = mv_cesa_ahash_req_cleanup, +}; + +static int mv_cesa_ahash_init(struct ahash_request *req, + struct mv_cesa_op_ctx *tmpl) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + + memset(creq, 0, sizeof(*creq)); + mv_cesa_update_op_cfg(tmpl, + CESA_SA_DESC_CFG_OP_MAC_ONLY | + CESA_SA_DESC_CFG_FIRST_FRAG, + CESA_SA_DESC_CFG_OP_MSK | + CESA_SA_DESC_CFG_FRAG_MSK); + mv_cesa_set_mac_op_total_len(tmpl, 0); + mv_cesa_set_mac_op_frag_len(tmpl, 0); + creq->op_tmpl = *tmpl; + creq->len = 0; + + return 0; +} + +static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) +{ + struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->base.ops = &mv_cesa_ahash_req_ops; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct mv_cesa_ahash_req)); + return 0; +} + +static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + int ret; + + if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) && + !creq->last_req) { + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + } + + if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { + *cached = true; + + if (!req->nbytes) + return 0; + + sg_pcopy_to_buffer(req->src, creq->src_nents, + creq->cache + creq->cache_ptr, + req->nbytes, 0); + + creq->cache_ptr += req->nbytes; + } + + return 0; +} + +static struct mv_cesa_op_ctx * +mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_ahash_dma_iter *dma_iter, + struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + struct mv_cesa_op_ctx *op = NULL; + int ret; + + if (!creq->cache_ptr) + return NULL; + + ret = mv_cesa_dma_add_data_transfer(chain, + CESA_SA_DATA_SRAM_OFFSET, + ahashdreq->cache_dma, + creq->cache_ptr, + CESA_TDMA_DST_IN_SRAM, + flags); + if (ret) + return ERR_PTR(ret); + + if (!dma_iter->base.op_len) { + op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); + if (IS_ERR(op)) + return op; + + mv_cesa_set_mac_op_frag_len(op, creq->cache_ptr); + + /* Add dummy desc to launch crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(chain, flags); + if (ret) + return ERR_PTR(ret); + } + + return op; +} + +static struct mv_cesa_op_ctx * +mv_cesa_ahash_dma_add_data(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_ahash_dma_iter *dma_iter, + struct mv_cesa_ahash_req *creq, + gfp_t flags) +{ + struct mv_cesa_op_ctx *op; + int ret; + + op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); + if (IS_ERR(op)) + return op; + + mv_cesa_set_mac_op_frag_len(op, dma_iter->base.op_len); + + if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) == + CESA_SA_DESC_CFG_FIRST_FRAG) + mv_cesa_update_op_cfg(&creq->op_tmpl, + CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + + /* Add input transfers */ + ret = mv_cesa_dma_add_op_transfers(chain, &dma_iter->base, + &dma_iter->src, flags); + if (ret) + return ERR_PTR(ret); + + /* Add dummy desc to launch crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(chain, flags); + if (ret) + return ERR_PTR(ret); + + return op; +} + +static struct mv_cesa_op_ctx * +mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_ahash_dma_iter *dma_iter, + struct mv_cesa_ahash_req *creq, + struct mv_cesa_op_ctx *op, + gfp_t flags) +{ + struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + unsigned int len, trailerlen, padoff = 0; + int ret; + + if (!creq->last_req) + return op; + + if (op && creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { + u32 frag = CESA_SA_DESC_CFG_NOT_FRAG; + + if ((mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) != + CESA_SA_DESC_CFG_FIRST_FRAG) + frag = CESA_SA_DESC_CFG_LAST_FRAG; + + mv_cesa_update_op_cfg(op, frag, CESA_SA_DESC_CFG_FRAG_MSK); + + return op; + } + + ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); + if (ret) + return ERR_PTR(ret); + + trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); + + if (op) { + len = min(CESA_SA_SRAM_PAYLOAD_SIZE - dma_iter->base.op_len, + trailerlen); + if (len) { + ret = mv_cesa_dma_add_data_transfer(chain, + CESA_SA_DATA_SRAM_OFFSET + + dma_iter->base.op_len, + ahashdreq->padding_dma, + len, CESA_TDMA_DST_IN_SRAM, + flags); + if (ret) + return ERR_PTR(ret); + + mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + mv_cesa_set_mac_op_frag_len(op, + dma_iter->base.op_len + len); + padoff += len; + } + } + + if (padoff >= trailerlen) + return op; + + if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) != + CESA_SA_DESC_CFG_FIRST_FRAG) + mv_cesa_update_op_cfg(&creq->op_tmpl, + CESA_SA_DESC_CFG_MID_FRAG, + CESA_SA_DESC_CFG_FRAG_MSK); + + op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); + if (IS_ERR(op)) + return op; + + mv_cesa_set_mac_op_frag_len(op, trailerlen - padoff); + + ret = mv_cesa_dma_add_data_transfer(chain, + CESA_SA_DATA_SRAM_OFFSET, + ahashdreq->padding_dma + + padoff, + trailerlen - padoff, + CESA_TDMA_DST_IN_SRAM, + flags); + if (ret) + return ERR_PTR(ret); + + /* Add dummy desc to launch crypto operation */ + ret = mv_cesa_dma_add_dummy_launch(chain, flags); + if (ret) + return ERR_PTR(ret); + + return op; +} + +static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; + struct mv_cesa_tdma_req *dreq = &ahashdreq->base; + struct mv_cesa_tdma_chain chain; + struct mv_cesa_ahash_dma_iter iter; + struct mv_cesa_op_ctx *op = NULL; + int ret; + + dreq->chain.first = NULL; + dreq->chain.last = NULL; + + if (creq->src_nents) { + ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, + DMA_TO_DEVICE); + if (!ret) { + ret = -ENOMEM; + goto err; + } + } + + mv_cesa_tdma_desc_iter_init(&chain); + mv_cesa_ahash_req_iter_init(&iter, req); + + op = mv_cesa_ahash_dma_add_cache(&chain, &iter, + creq, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + + do { + if (!iter.base.op_len) + break; + + op = mv_cesa_ahash_dma_add_data(&chain, &iter, + creq, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + } while (mv_cesa_ahash_req_iter_next_op(&iter)); + + op = mv_cesa_ahash_dma_last_req(&chain, &iter, creq, op, flags); + if (IS_ERR(op)) { + ret = PTR_ERR(op); + goto err_free_tdma; + } + + if (op) { + /* Add dummy desc to wait for crypto operation end */ + ret = mv_cesa_dma_add_dummy_end(&chain, flags); + if (ret) + goto err_free_tdma; + } + + if (!creq->last_req) + creq->cache_ptr = req->nbytes + creq->cache_ptr - + iter.base.len; + else + creq->cache_ptr = 0; + + dreq->chain = chain; + + return 0; + +err_free_tdma: + mv_cesa_dma_cleanup(dreq); + dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); + +err: + mv_cesa_ahash_last_cleanup(req); + + return ret; +} + +static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + int ret; + + if (cesa_dev->caps->has_tdma) + creq->req.base.type = CESA_DMA_REQ; + else + creq->req.base.type = CESA_STD_REQ; + + creq->src_nents = sg_nents_for_len(req->src, req->nbytes); + + ret = mv_cesa_ahash_cache_req(req, cached); + if (ret) + return ret; + + if (*cached) + return 0; + + if (creq->req.base.type == CESA_DMA_REQ) + ret = mv_cesa_ahash_dma_req_init(req); + + return ret; +} + +static int mv_cesa_ahash_update(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + bool cached = false; + int ret; + + creq->len += req->nbytes; + ret = mv_cesa_ahash_req_init(req, &cached); + if (ret) + return ret; + + if (cached) + return 0; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) { + mv_cesa_ahash_cleanup(req); + return ret; + } + + return ret; +} + +static int mv_cesa_ahash_final(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; + bool cached = false; + int ret; + + mv_cesa_set_mac_op_total_len(tmpl, creq->len); + creq->last_req = true; + req->nbytes = 0; + + ret = mv_cesa_ahash_req_init(req, &cached); + if (ret) + return ret; + + if (cached) + return 0; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ahash_cleanup(req); + + return ret; +} + +static int mv_cesa_ahash_finup(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; + bool cached = false; + int ret; + + creq->len += req->nbytes; + mv_cesa_set_mac_op_total_len(tmpl, creq->len); + creq->last_req = true; + + ret = mv_cesa_ahash_req_init(req, &cached); + if (ret) + return ret; + + if (cached) + return 0; + + ret = mv_cesa_queue_req(&req->base); + if (ret && ret != -EINPROGRESS) + mv_cesa_ahash_cleanup(req); + + return ret; +} + +static int mv_cesa_md5_init(struct ahash_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_md5_export(struct ahash_request *req, void *out) +{ + struct md5_state *out_state = out; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + + out_state->byte_count = creq->len; + memcpy(out_state->hash, creq->state, digsize); + memset(out_state->block, 0, sizeof(out_state->block)); + if (creq->cache) + memcpy(out_state->block, creq->cache, creq->cache_ptr); + + return 0; +} + +static int mv_cesa_md5_import(struct ahash_request *req, const void *in) +{ + const struct md5_state *in_state = in; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + unsigned int cache_ptr; + int ret; + + creq->len = in_state->byte_count; + memcpy(creq->state, in_state->hash, digsize); + creq->cache_ptr = 0; + + cache_ptr = creq->len % sizeof(in_state->block); + if (!cache_ptr) + return 0; + + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + + memcpy(creq->cache, in_state->block, cache_ptr); + creq->cache_ptr = cache_ptr; + + return 0; +} + +static int mv_cesa_md5_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_md5_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_md5_alg = { + .init = mv_cesa_md5_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_md5_digest, + .export = mv_cesa_md5_export, + .import = mv_cesa_md5_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "md5", + .cra_driver_name = "mv-md5", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), + .cra_init = mv_cesa_ahash_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + +static int mv_cesa_sha1_init(struct ahash_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_sha1_export(struct ahash_request *req, void *out) +{ + struct sha1_state *out_state = out; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + + out_state->count = creq->len; + memcpy(out_state->state, creq->state, digsize); + memset(out_state->buffer, 0, sizeof(out_state->buffer)); + if (creq->cache) + memcpy(out_state->buffer, creq->cache, creq->cache_ptr); + + return 0; +} + +static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) +{ + const struct sha1_state *in_state = in; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + unsigned int cache_ptr; + int ret; + + creq->len = in_state->count; + memcpy(creq->state, in_state->state, digsize); + creq->cache_ptr = 0; + + cache_ptr = creq->len % SHA1_BLOCK_SIZE; + if (!cache_ptr) + return 0; + + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + + memcpy(creq->cache, in_state->buffer, cache_ptr); + creq->cache_ptr = cache_ptr; + + return 0; +} + +static int mv_cesa_sha1_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_sha1_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_sha1_alg = { + .init = mv_cesa_sha1_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_sha1_digest, + .export = mv_cesa_sha1_export, + .import = mv_cesa_sha1_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "sha1", + .cra_driver_name = "mv-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), + .cra_init = mv_cesa_ahash_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + +static int mv_cesa_sha256_init(struct ahash_request *req) +{ + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_sha256_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_sha256_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +static int mv_cesa_sha256_export(struct ahash_request *req, void *out) +{ + struct sha256_state *out_state = out; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int ds = crypto_ahash_digestsize(ahash); + + out_state->count = creq->len; + memcpy(out_state->state, creq->state, ds); + memset(out_state->buf, 0, sizeof(out_state->buf)); + if (creq->cache) + memcpy(out_state->buf, creq->cache, creq->cache_ptr); + + return 0; +} + +static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) +{ + const struct sha256_state *in_state = in; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + unsigned int digsize = crypto_ahash_digestsize(ahash); + unsigned int cache_ptr; + int ret; + + creq->len = in_state->count; + memcpy(creq->state, in_state->state, digsize); + creq->cache_ptr = 0; + + cache_ptr = creq->len % SHA256_BLOCK_SIZE; + if (!cache_ptr) + return 0; + + ret = mv_cesa_ahash_alloc_cache(req); + if (ret) + return ret; + + memcpy(creq->cache, in_state->buf, cache_ptr); + creq->cache_ptr = cache_ptr; + + return 0; +} + +struct ahash_alg mv_sha256_alg = { + .init = mv_cesa_sha256_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_sha256_digest, + .export = mv_cesa_sha256_export, + .import = mv_cesa_sha256_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "sha256", + .cra_driver_name = "mv-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), + .cra_init = mv_cesa_ahash_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + +struct mv_cesa_ahash_result { + struct completion completion; + int error; +}; + +static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, + int error) +{ + struct mv_cesa_ahash_result *result = req->data; + + if (error == -EINPROGRESS) + return; + + result->error = error; + complete(&result->completion); +} + +static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, + void *state, unsigned int blocksize) +{ + struct mv_cesa_ahash_result result; + struct scatterlist sg; + int ret; + + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + mv_cesa_hmac_ahash_complete, &result); + sg_init_one(&sg, pad, blocksize); + ahash_request_set_crypt(req, &sg, pad, blocksize); + init_completion(&result.completion); + + ret = crypto_ahash_init(req); + if (ret) + return ret; + + ret = crypto_ahash_update(req); + if (ret && ret != -EINPROGRESS) + return ret; + + wait_for_completion_interruptible(&result.completion); + if (result.error) + return result.error; + + ret = crypto_ahash_export(req, state); + if (ret) + return ret; + + return 0; +} + +static int mv_cesa_ahmac_pad_init(struct ahash_request *req, + const u8 *key, unsigned int keylen, + u8 *ipad, u8 *opad, + unsigned int blocksize) +{ + struct mv_cesa_ahash_result result; + struct scatterlist sg; + int ret; + int i; + + if (keylen <= blocksize) { + memcpy(ipad, key, keylen); + } else { + u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); + + if (!keydup) + return -ENOMEM; + + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + mv_cesa_hmac_ahash_complete, + &result); + sg_init_one(&sg, keydup, keylen); + ahash_request_set_crypt(req, &sg, ipad, keylen); + init_completion(&result.completion); + + ret = crypto_ahash_digest(req); + if (ret == -EINPROGRESS) { + wait_for_completion_interruptible(&result.completion); + ret = result.error; + } + + /* Set the memory region to 0 to avoid any leak. */ + memset(keydup, 0, keylen); + kfree(keydup); + + if (ret) + return ret; + + keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); + } + + memset(ipad + keylen, 0, blocksize - keylen); + memcpy(opad, ipad, blocksize); + + for (i = 0; i < blocksize; i++) { + ipad[i] ^= 0x36; + opad[i] ^= 0x5c; + } + + return 0; +} + +static int mv_cesa_ahmac_setkey(const char *hash_alg_name, + const u8 *key, unsigned int keylen, + void *istate, void *ostate) +{ + struct ahash_request *req; + struct crypto_ahash *tfm; + unsigned int blocksize; + u8 *ipad = NULL; + u8 *opad; + int ret; + + tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH, + CRYPTO_ALG_TYPE_AHASH_MASK); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto free_ahash; + } + + crypto_ahash_clear_flags(tfm, ~0); + + blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + + ipad = kzalloc(2 * blocksize, GFP_KERNEL); + if (!ipad) { + ret = -ENOMEM; + goto free_req; + } + + opad = ipad + blocksize; + + ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); + if (ret) + goto free_ipad; + + ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); + if (ret) + goto free_ipad; + + ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); + +free_ipad: + kfree(ipad); +free_req: + ahash_request_free(req); +free_ahash: + crypto_free_ahash(tfm); + + return ret; +} + +static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->base.ops = &mv_cesa_ahash_req_ops; + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct mv_cesa_ahash_req)); + return 0; +} + +static int mv_cesa_ahmac_md5_init(struct ahash_request *req) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); + memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct md5_state istate, ostate; + int ret, i; + + ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(istate.hash); i++) + ctx->iv[i] = be32_to_cpu(istate.hash[i]); + + for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) + ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); + + return 0; +} + +static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_ahmac_md5_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_ahmac_md5_alg = { + .init = mv_cesa_ahmac_md5_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_ahmac_md5_digest, + .setkey = mv_cesa_ahmac_md5_setkey, + .export = mv_cesa_md5_export, + .import = mv_cesa_md5_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct md5_state), + .base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "mv-hmac-md5", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), + .cra_init = mv_cesa_ahmac_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + +static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); + memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct sha1_state istate, ostate; + int ret, i; + + ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(istate.state); i++) + ctx->iv[i] = be32_to_cpu(istate.state[i]); + + for (i = 0; i < ARRAY_SIZE(ostate.state); i++) + ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); + + return 0; +} + +static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_ahmac_sha1_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_ahmac_sha1_alg = { + .init = mv_cesa_ahmac_sha1_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_ahmac_sha1_digest, + .setkey = mv_cesa_ahmac_sha1_setkey, + .export = mv_cesa_sha1_export, + .import = mv_cesa_sha1_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "mv-hmac-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), + .cra_init = mv_cesa_ahmac_cra_init, + .cra_module = THIS_MODULE, + } + } +}; + +static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct sha256_state istate, ostate; + int ret, i; + + ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(istate.state); i++) + ctx->iv[i] = be32_to_cpu(istate.state[i]); + + for (i = 0; i < ARRAY_SIZE(ostate.state); i++) + ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); + + return 0; +} + +static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) +{ + struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct mv_cesa_op_ctx tmpl; + + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); + memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); + + mv_cesa_ahash_init(req, &tmpl); + + return 0; +} + +static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) +{ + int ret; + + ret = mv_cesa_ahmac_sha256_init(req); + if (ret) + return ret; + + return mv_cesa_ahash_finup(req); +} + +struct ahash_alg mv_ahmac_sha256_alg = { + .init = mv_cesa_ahmac_sha256_init, + .update = mv_cesa_ahash_update, + .final = mv_cesa_ahash_final, + .finup = mv_cesa_ahash_finup, + .digest = mv_cesa_ahmac_sha256_digest, + .setkey = mv_cesa_ahmac_sha256_setkey, + .export = mv_cesa_sha256_export, + .import = mv_cesa_sha256_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "mv-hmac-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), + .cra_init = mv_cesa_ahmac_cra_init, + .cra_module = THIS_MODULE, + } + } +}; diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c new file mode 100644 index 000000000000..64a366c50174 --- /dev/null +++ b/drivers/crypto/marvell/tdma.c @@ -0,0 +1,224 @@ +/* + * Provide TDMA helper functions used by cipher and hash algorithm + * implementations. + * + * Author: Boris Brezillon <boris.brezillon@free-electrons.com> + * Author: Arnaud Ebalard <arno@natisbad.org> + * + * This work is based on an initial version written by + * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include "cesa.h" + +bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter, + struct mv_cesa_sg_dma_iter *sgiter, + unsigned int len) +{ + if (!sgiter->sg) + return false; + + sgiter->op_offset += len; + sgiter->offset += len; + if (sgiter->offset == sg_dma_len(sgiter->sg)) { + if (sg_is_last(sgiter->sg)) + return false; + sgiter->offset = 0; + sgiter->sg = sg_next(sgiter->sg); + } + + if (sgiter->op_offset == iter->op_len) + return false; + + return true; +} + +void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq) +{ + struct mv_cesa_engine *engine = dreq->base.engine; + + writel(0, engine->regs + CESA_SA_CFG); + + mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); + writel(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B | + CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN, + engine->regs + CESA_TDMA_CONTROL); + + writel(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT | + CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS, + engine->regs + CESA_SA_CFG); + writel(dreq->chain.first->cur_dma, + engine->regs + CESA_TDMA_NEXT_ADDR); + writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); +} + +void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) +{ + struct mv_cesa_tdma_desc *tdma; + + for (tdma = dreq->chain.first; tdma;) { + struct mv_cesa_tdma_desc *old_tdma = tdma; + + if (tdma->flags & CESA_TDMA_OP) + dma_pool_free(cesa_dev->dma->op_pool, tdma->op, + le32_to_cpu(tdma->src)); + + tdma = tdma->next; + dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, + le32_to_cpu(old_tdma->cur_dma)); + } + + dreq->chain.first = NULL; + dreq->chain.last = NULL; +} + +void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, + struct mv_cesa_engine *engine) +{ + struct mv_cesa_tdma_desc *tdma; + + for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { + if (tdma->flags & CESA_TDMA_DST_IN_SRAM) + tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma); + + if (tdma->flags & CESA_TDMA_SRC_IN_SRAM) + tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); + + if (tdma->flags & CESA_TDMA_OP) + mv_cesa_adjust_op(engine, tdma->op); + } +} + +static struct mv_cesa_tdma_desc * +mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) +{ + struct mv_cesa_tdma_desc *new_tdma = NULL; + dma_addr_t dma_handle; + + new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags, + &dma_handle); + if (!new_tdma) + return ERR_PTR(-ENOMEM); + + memset(new_tdma, 0, sizeof(*new_tdma)); + new_tdma->cur_dma = cpu_to_le32(dma_handle); + if (chain->last) { + chain->last->next_dma = new_tdma->cur_dma; + chain->last->next = new_tdma; + } else { + chain->first = new_tdma; + } + + chain->last = new_tdma; + + return new_tdma; +} + +struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, + const struct mv_cesa_op_ctx *op_templ, + bool skip_ctx, + gfp_t flags) +{ + struct mv_cesa_tdma_desc *tdma; + struct mv_cesa_op_ctx *op; + dma_addr_t dma_handle; + + tdma = mv_cesa_dma_add_desc(chain, flags); + if (IS_ERR(tdma)) + return ERR_CAST(tdma); + + op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle); + if (!op) + return ERR_PTR(-ENOMEM); + + *op = *op_templ; + + tdma = chain->last; + tdma->op = op; + tdma->byte_cnt = (skip_ctx ? sizeof(op->desc) : sizeof(*op)) | BIT(31); + tdma->src = dma_handle; + tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; + + return op; +} + +int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, + dma_addr_t dst, dma_addr_t src, u32 size, + u32 flags, gfp_t gfp_flags) +{ + struct mv_cesa_tdma_desc *tdma; + + tdma = mv_cesa_dma_add_desc(chain, gfp_flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + tdma->byte_cnt = size | BIT(31); + tdma->src = src; + tdma->dst = dst; + + flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); + tdma->flags = flags | CESA_TDMA_DATA; + + return 0; +} + +int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, + u32 flags) +{ + struct mv_cesa_tdma_desc *tdma; + + tdma = mv_cesa_dma_add_desc(chain, flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + return 0; +} + +int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, u32 flags) +{ + struct mv_cesa_tdma_desc *tdma; + + tdma = mv_cesa_dma_add_desc(chain, flags); + if (IS_ERR(tdma)) + return PTR_ERR(tdma); + + tdma->byte_cnt = BIT(31); + + return 0; +} + +int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, + struct mv_cesa_dma_iter *dma_iter, + struct mv_cesa_sg_dma_iter *sgiter, + gfp_t gfp_flags) +{ + u32 flags = sgiter->dir == DMA_TO_DEVICE ? + CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM; + unsigned int len; + + do { + dma_addr_t dst, src; + int ret; + + len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter); + if (sgiter->dir == DMA_TO_DEVICE) { + dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; + src = sg_dma_address(sgiter->sg) + sgiter->offset; + } else { + dst = sg_dma_address(sgiter->sg) + sgiter->offset; + src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; + } + + ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len, + flags, gfp_flags); + if (ret) + return ret; + + } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len)); + + return 0; +} diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index f91f15ddee92..5bcd575fa96f 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -9,6 +9,7 @@ #include <crypto/aes.h> #include <crypto/algapi.h> #include <linux/crypto.h> +#include <linux/genalloc.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kthread.h> @@ -29,6 +30,8 @@ #define MAX_HW_HASH_SIZE 0xFFFF #define MV_CESA_EXPIRE 500 /* msec */ +#define MV_CESA_DEFAULT_SRAM_SIZE 2048 + /* * STM: * /---------------------------------------\ @@ -83,6 +86,8 @@ struct req_progress { struct crypto_priv { void __iomem *reg; void __iomem *sram; + struct gen_pool *sram_pool; + dma_addr_t sram_dma; int irq; struct clk *clk; struct task_struct *queue_th; @@ -595,7 +600,7 @@ static int queue_manag(void *data) cpg->eng_st = ENGINE_IDLE; do { struct crypto_async_request *async_req = NULL; - struct crypto_async_request *backlog; + struct crypto_async_request *backlog = NULL; __set_current_state(TASK_INTERRUPTIBLE); @@ -1019,6 +1024,39 @@ static struct ahash_alg mv_hmac_sha1_alg = { } }; +static int mv_cesa_get_sram(struct platform_device *pdev, + struct crypto_priv *cp) +{ + struct resource *res; + u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE; + + of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size", + &sram_size); + + cp->sram_size = sram_size; + cp->sram_pool = of_get_named_gen_pool(pdev->dev.of_node, + "marvell,crypto-srams", 0); + if (cp->sram_pool) { + cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size, + &cp->sram_dma); + if (cp->sram) + return 0; + + return -ENOMEM; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "sram"); + if (!res || resource_size(res) < cp->sram_size) + return -EINVAL; + + cp->sram = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cp->sram)) + return PTR_ERR(cp->sram); + + return 0; +} + static int mv_probe(struct platform_device *pdev) { struct crypto_priv *cp; @@ -1041,24 +1079,17 @@ static int mv_probe(struct platform_device *pdev) spin_lock_init(&cp->lock); crypto_init_queue(&cp->queue, 50); - cp->reg = ioremap(res->start, resource_size(res)); - if (!cp->reg) { - ret = -ENOMEM; + cp->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cp->reg)) { + ret = PTR_ERR(cp->reg); goto err; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); - if (!res) { - ret = -ENXIO; - goto err_unmap_reg; - } - cp->sram_size = resource_size(res); + ret = mv_cesa_get_sram(pdev, cp); + if (ret) + goto err; + cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; - cp->sram = ioremap(res->start, cp->sram_size); - if (!cp->sram) { - ret = -ENOMEM; - goto err_unmap_reg; - } if (pdev->dev.of_node) irq = irq_of_parse_and_map(pdev->dev.of_node, 0); @@ -1066,7 +1097,7 @@ static int mv_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0 || irq == NO_IRQ) { ret = irq; - goto err_unmap_sram; + goto err; } cp->irq = irq; @@ -1076,7 +1107,7 @@ static int mv_probe(struct platform_device *pdev) cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); if (IS_ERR(cp->queue_th)) { ret = PTR_ERR(cp->queue_th); - goto err_unmap_sram; + goto err; } ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), @@ -1134,10 +1165,6 @@ err_irq: } err_thread: kthread_stop(cp->queue_th); -err_unmap_sram: - iounmap(cp->sram); -err_unmap_reg: - iounmap(cp->reg); err: kfree(cp); cpg = NULL; @@ -1157,8 +1184,6 @@ static int mv_remove(struct platform_device *pdev) kthread_stop(cp->queue_th); free_irq(cp->irq, cp); memset(cp->sram, 0, cp->sram_size); - iounmap(cp->sram); - iounmap(cp->reg); if (!IS_ERR(cp->clk)) { clk_disable_unprepare(cp->clk); @@ -1172,6 +1197,8 @@ static int mv_remove(struct platform_device *pdev) static const struct of_device_id mv_cesa_of_match_table[] = { { .compatible = "marvell,orion-crypto", }, + { .compatible = "marvell,kirkwood-crypto", }, + { .compatible = "marvell,dove-crypto", }, {} }; MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 10a9aeff1666..2e8dab9d4263 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1281,10 +1281,10 @@ static const char md5_zero[MD5_DIGEST_SIZE] = { 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, }; static const u32 md5_init[MD5_HASH_WORDS] = { - cpu_to_le32(0x67452301), - cpu_to_le32(0xefcdab89), - cpu_to_le32(0x98badcfe), - cpu_to_le32(0x10325476), + cpu_to_le32(MD5_H0), + cpu_to_le32(MD5_H1), + cpu_to_le32(MD5_H2), + cpu_to_le32(MD5_H3), }; static const char sha1_zero[SHA1_DIGEST_SIZE] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig index f82616621ae1..e421c96c763a 100644 --- a/drivers/crypto/nx/Kconfig +++ b/drivers/crypto/nx/Kconfig @@ -1,26 +1,55 @@ + config CRYPTO_DEV_NX_ENCRYPT - tristate "Encryption acceleration support" - depends on PPC64 && IBMVIO + tristate "Encryption acceleration support on pSeries platform" + depends on PPC_PSERIES && IBMVIO && !CPU_LITTLE_ENDIAN default y select CRYPTO_AES - select CRYPTO_CBC - select CRYPTO_ECB select CRYPTO_CCM - select CRYPTO_GCM - select CRYPTO_AUTHENC - select CRYPTO_XCBC - select CRYPTO_SHA256 - select CRYPTO_SHA512 help - Support for Power7+ in-Nest encryption acceleration. This - module supports acceleration for AES and SHA2 algorithms. If you - choose 'M' here, this module will be called nx_crypto. + Support for PowerPC Nest (NX) encryption acceleration. This + module supports acceleration for AES and SHA2 algorithms on + the pSeries platform. If you choose 'M' here, this module + will be called nx_crypto. config CRYPTO_DEV_NX_COMPRESS tristate "Compression acceleration support" - depends on PPC64 && IBMVIO default y help - Support for Power7+ in-Nest compression acceleration. This - module supports acceleration for AES and SHA2 algorithms. If you - choose 'M' here, this module will be called nx_compress. + Support for PowerPC Nest (NX) compression acceleration. This + module supports acceleration for compressing memory with the 842 + algorithm. One of the platform drivers must be selected also. + If you choose 'M' here, this module will be called nx_compress. + +if CRYPTO_DEV_NX_COMPRESS + +config CRYPTO_DEV_NX_COMPRESS_PSERIES + tristate "Compression acceleration support on pSeries platform" + depends on PPC_PSERIES && IBMVIO + default y + help + Support for PowerPC Nest (NX) compression acceleration. This + module supports acceleration for compressing memory with the 842 + algorithm. This supports NX hardware on the pSeries platform. + If you choose 'M' here, this module will be called nx_compress_pseries. + +config CRYPTO_DEV_NX_COMPRESS_POWERNV + tristate "Compression acceleration support on PowerNV platform" + depends on PPC_POWERNV + default y + help + Support for PowerPC Nest (NX) compression acceleration. This + module supports acceleration for compressing memory with the 842 + algorithm. This supports NX hardware on the PowerNV platform. + If you choose 'M' here, this module will be called nx_compress_powernv. + +config CRYPTO_DEV_NX_COMPRESS_CRYPTO + tristate "Compression acceleration cryptographic interface" + select CRYPTO_ALGAPI + select 842_DECOMPRESS + default y + help + Support for PowerPC Nest (NX) accelerators using the cryptographic + API. If you choose 'M' here, this module will be called + nx_compress_crypto. + +endif diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile index bb770ea45ce9..e1684f5adb11 100644 --- a/drivers/crypto/nx/Makefile +++ b/drivers/crypto/nx/Makefile @@ -10,5 +10,12 @@ nx-crypto-objs := nx.o \ nx-sha256.o \ nx-sha512.o -obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o +obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o nx-compress-objs := nx-842.o +nx-compress-platform-objs := nx-842-platform.o +nx-compress-pseries-objs := nx-842-pseries.o +nx-compress-powernv-objs := nx-842-powernv.o +nx-compress-crypto-objs := nx-842-crypto.o diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c new file mode 100644 index 000000000000..d53a1dcd7b4e --- /dev/null +++ b/drivers/crypto/nx/nx-842-crypto.c @@ -0,0 +1,580 @@ +/* + * Cryptographic API for the NX-842 hardware compression. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) IBM Corporation, 2011-2015 + * + * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com> + * Seth Jennings <sjenning@linux.vnet.ibm.com> + * + * Rewrite: Dan Streetman <ddstreet@ieee.org> + * + * This is an interface to the NX-842 compression hardware in PowerPC + * processors. Most of the complexity of this drvier is due to the fact that + * the NX-842 compression hardware requires the input and output data buffers + * to be specifically aligned, to be a specific multiple in length, and within + * specific minimum and maximum lengths. Those restrictions, provided by the + * nx-842 driver via nx842_constraints, mean this driver must use bounce + * buffers and headers to correct misaligned in or out buffers, and to split + * input buffers that are too large. + * + * This driver will fall back to software decompression if the hardware + * decompression fails, so this driver's decompression should never fail as + * long as the provided compressed buffer is valid. Any compressed buffer + * created by this driver will have a header (except ones where the input + * perfectly matches the constraints); so users of this driver cannot simply + * pass a compressed buffer created by this driver over to the 842 software + * decompression library. Instead, users must use this driver to decompress; + * if the hardware fails or is unavailable, the compressed buffer will be + * parsed and the header removed, and the raw 842 buffer(s) passed to the 842 + * software decompression library. + * + * This does not fall back to software compression, however, since the caller + * of this function is specifically requesting hardware compression; if the + * hardware compression fails, the caller can fall back to software + * compression, and the raw 842 compressed buffer that the software compressor + * creates can be passed to this driver for hardware decompression; any + * buffer without our specific header magic is assumed to be a raw 842 buffer + * and passed directly to the hardware. Note that the software compression + * library will produce a compressed buffer that is incompatible with the + * hardware decompressor if the original input buffer length is not a multiple + * of 8; if such a compressed buffer is passed to this driver for + * decompression, the hardware will reject it and this driver will then pass + * it over to the software library for decompression. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/crypto.h> +#include <linux/vmalloc.h> +#include <linux/sw842.h> +#include <linux/ratelimit.h> + +#include "nx-842.h" + +/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit + * template (see lib/842/842.h), so this magic number will never appear at + * the start of a raw 842 compressed buffer. That is important, as any buffer + * passed to us without this magic is assumed to be a raw 842 compressed + * buffer, and passed directly to the hardware to decompress. + */ +#define NX842_CRYPTO_MAGIC (0xf842) +#define NX842_CRYPTO_GROUP_MAX (0x20) +#define NX842_CRYPTO_HEADER_SIZE(g) \ + (sizeof(struct nx842_crypto_header) + \ + sizeof(struct nx842_crypto_header_group) * (g)) +#define NX842_CRYPTO_HEADER_MAX_SIZE \ + NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX) + +/* bounce buffer size */ +#define BOUNCE_BUFFER_ORDER (2) +#define BOUNCE_BUFFER_SIZE \ + ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER)) + +/* try longer on comp because we can fallback to sw decomp if hw is busy */ +#define COMP_BUSY_TIMEOUT (250) /* ms */ +#define DECOMP_BUSY_TIMEOUT (50) /* ms */ + +struct nx842_crypto_header_group { + __be16 padding; /* unused bytes at start of group */ + __be32 compressed_length; /* compressed bytes in group */ + __be32 uncompressed_length; /* bytes after decompression */ +} __packed; + +struct nx842_crypto_header { + __be16 magic; /* NX842_CRYPTO_MAGIC */ + __be16 ignore; /* decompressed end bytes to ignore */ + u8 groups; /* total groups in this header */ + struct nx842_crypto_header_group group[]; +} __packed; + +struct nx842_crypto_param { + u8 *in; + unsigned int iremain; + u8 *out; + unsigned int oremain; + unsigned int ototal; +}; + +static int update_param(struct nx842_crypto_param *p, + unsigned int slen, unsigned int dlen) +{ + if (p->iremain < slen) + return -EOVERFLOW; + if (p->oremain < dlen) + return -ENOSPC; + + p->in += slen; + p->iremain -= slen; + p->out += dlen; + p->oremain -= dlen; + p->ototal += dlen; + + return 0; +} + +struct nx842_crypto_ctx { + u8 *wmem; + u8 *sbounce, *dbounce; + + struct nx842_crypto_header header; + struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX]; +}; + +static int nx842_crypto_init(struct crypto_tfm *tfm) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL); + ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); + if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { + kfree(ctx->wmem); + free_page((unsigned long)ctx->sbounce); + free_page((unsigned long)ctx->dbounce); + return -ENOMEM; + } + + return 0; +} + +static void nx842_crypto_exit(struct crypto_tfm *tfm) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + kfree(ctx->wmem); + free_page((unsigned long)ctx->sbounce); + free_page((unsigned long)ctx->dbounce); +} + +static int read_constraints(struct nx842_constraints *c) +{ + int ret; + + ret = nx842_constraints(c); + if (ret) { + pr_err_ratelimited("could not get nx842 constraints : %d\n", + ret); + return ret; + } + + /* limit maximum, to always have enough bounce buffer to decompress */ + if (c->maximum > BOUNCE_BUFFER_SIZE) { + c->maximum = BOUNCE_BUFFER_SIZE; + pr_info_once("limiting nx842 maximum to %x\n", c->maximum); + } + + return 0; +} + +static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf) +{ + int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups); + + /* compress should have added space for header */ + if (s > be16_to_cpu(hdr->group[0].padding)) { + pr_err("Internal error: no space for header\n"); + return -EINVAL; + } + + memcpy(buf, hdr, s); + + print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0); + + return 0; +} + +static int compress(struct nx842_crypto_ctx *ctx, + struct nx842_crypto_param *p, + struct nx842_crypto_header_group *g, + struct nx842_constraints *c, + u16 *ignore, + unsigned int hdrsize) +{ + unsigned int slen = p->iremain, dlen = p->oremain, tmplen; + unsigned int adj_slen = slen; + u8 *src = p->in, *dst = p->out; + int ret, dskip = 0; + ktime_t timeout; + + if (p->iremain == 0) + return -EOVERFLOW; + + if (p->oremain == 0 || hdrsize + c->minimum > dlen) + return -ENOSPC; + + if (slen % c->multiple) + adj_slen = round_up(slen, c->multiple); + if (slen < c->minimum) + adj_slen = c->minimum; + if (slen > c->maximum) + adj_slen = slen = c->maximum; + if (adj_slen > slen || (u64)src % c->alignment) { + adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE); + slen = min(slen, BOUNCE_BUFFER_SIZE); + if (adj_slen > slen) + memset(ctx->sbounce + slen, 0, adj_slen - slen); + memcpy(ctx->sbounce, src, slen); + src = ctx->sbounce; + slen = adj_slen; + pr_debug("using comp sbounce buffer, len %x\n", slen); + } + + dst += hdrsize; + dlen -= hdrsize; + + if ((u64)dst % c->alignment) { + dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst); + dst += dskip; + dlen -= dskip; + } + if (dlen % c->multiple) + dlen = round_down(dlen, c->multiple); + if (dlen < c->minimum) { +nospc: + dst = ctx->dbounce; + dlen = min(p->oremain, BOUNCE_BUFFER_SIZE); + dlen = round_down(dlen, c->multiple); + dskip = 0; + pr_debug("using comp dbounce buffer, len %x\n", dlen); + } + if (dlen > c->maximum) + dlen = c->maximum; + + tmplen = dlen; + timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT); + do { + dlen = tmplen; /* reset dlen, if we're retrying */ + ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem); + /* possibly we should reduce the slen here, instead of + * retrying with the dbounce buffer? + */ + if (ret == -ENOSPC && dst != ctx->dbounce) + goto nospc; + } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); + if (ret) + return ret; + + dskip += hdrsize; + + if (dst == ctx->dbounce) + memcpy(p->out + dskip, dst, dlen); + + g->padding = cpu_to_be16(dskip); + g->compressed_length = cpu_to_be32(dlen); + g->uncompressed_length = cpu_to_be32(slen); + + if (p->iremain < slen) { + *ignore = slen - p->iremain; + slen = p->iremain; + } + + pr_debug("compress slen %x ignore %x dlen %x padding %x\n", + slen, *ignore, dlen, dskip); + + return update_param(p, slen, dskip + dlen); +} + +static int nx842_crypto_compress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_header *hdr = &ctx->header; + struct nx842_crypto_param p; + struct nx842_constraints c; + unsigned int groups, hdrsize, h; + int ret, n; + bool add_header; + u16 ignore = 0; + + p.in = (u8 *)src; + p.iremain = slen; + p.out = dst; + p.oremain = *dlen; + p.ototal = 0; + + *dlen = 0; + + ret = read_constraints(&c); + if (ret) + return ret; + + groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX, + DIV_ROUND_UP(p.iremain, c.maximum)); + hdrsize = NX842_CRYPTO_HEADER_SIZE(groups); + + /* skip adding header if the buffers meet all constraints */ + add_header = (p.iremain % c.multiple || + p.iremain < c.minimum || + p.iremain > c.maximum || + (u64)p.in % c.alignment || + p.oremain % c.multiple || + p.oremain < c.minimum || + p.oremain > c.maximum || + (u64)p.out % c.alignment); + + hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC); + hdr->groups = 0; + hdr->ignore = 0; + + while (p.iremain > 0) { + n = hdr->groups++; + if (hdr->groups > NX842_CRYPTO_GROUP_MAX) + return -ENOSPC; + + /* header goes before first group */ + h = !n && add_header ? hdrsize : 0; + + if (ignore) + pr_warn("interal error, ignore is set %x\n", ignore); + + ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); + if (ret) + return ret; + } + + if (!add_header && hdr->groups > 1) { + pr_err("Internal error: No header but multiple groups\n"); + return -EINVAL; + } + + /* ignore indicates the input stream needed to be padded */ + hdr->ignore = cpu_to_be16(ignore); + if (ignore) + pr_debug("marked %d bytes as ignore\n", ignore); + + if (add_header) + ret = nx842_crypto_add_header(hdr, dst); + if (ret) + return ret; + + *dlen = p.ototal; + + pr_debug("compress total slen %x dlen %x\n", slen, *dlen); + + return 0; +} + +static int decompress(struct nx842_crypto_ctx *ctx, + struct nx842_crypto_param *p, + struct nx842_crypto_header_group *g, + struct nx842_constraints *c, + u16 ignore, + bool usehw) +{ + unsigned int slen = be32_to_cpu(g->compressed_length); + unsigned int required_len = be32_to_cpu(g->uncompressed_length); + unsigned int dlen = p->oremain, tmplen; + unsigned int adj_slen = slen; + u8 *src = p->in, *dst = p->out; + u16 padding = be16_to_cpu(g->padding); + int ret, spadding = 0, dpadding = 0; + ktime_t timeout; + + if (!slen || !required_len) + return -EINVAL; + + if (p->iremain <= 0 || padding + slen > p->iremain) + return -EOVERFLOW; + + if (p->oremain <= 0 || required_len - ignore > p->oremain) + return -ENOSPC; + + src += padding; + + if (!usehw) + goto usesw; + + if (slen % c->multiple) + adj_slen = round_up(slen, c->multiple); + if (slen < c->minimum) + adj_slen = c->minimum; + if (slen > c->maximum) + goto usesw; + if (slen < adj_slen || (u64)src % c->alignment) { + /* we can append padding bytes because the 842 format defines + * an "end" template (see lib/842/842_decompress.c) and will + * ignore any bytes following it. + */ + if (slen < adj_slen) + memset(ctx->sbounce + slen, 0, adj_slen - slen); + memcpy(ctx->sbounce, src, slen); + src = ctx->sbounce; + spadding = adj_slen - slen; + slen = adj_slen; + pr_debug("using decomp sbounce buffer, len %x\n", slen); + } + + if (dlen % c->multiple) + dlen = round_down(dlen, c->multiple); + if (dlen < required_len || (u64)dst % c->alignment) { + dst = ctx->dbounce; + dlen = min(required_len, BOUNCE_BUFFER_SIZE); + pr_debug("using decomp dbounce buffer, len %x\n", dlen); + } + if (dlen < c->minimum) + goto usesw; + if (dlen > c->maximum) + dlen = c->maximum; + + tmplen = dlen; + timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT); + do { + dlen = tmplen; /* reset dlen, if we're retrying */ + ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem); + } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); + if (ret) { +usesw: + /* reset everything, sw doesn't have constraints */ + src = p->in + padding; + slen = be32_to_cpu(g->compressed_length); + spadding = 0; + dst = p->out; + dlen = p->oremain; + dpadding = 0; + if (dlen < required_len) { /* have ignore bytes */ + dst = ctx->dbounce; + dlen = BOUNCE_BUFFER_SIZE; + } + pr_info_ratelimited("using software 842 decompression\n"); + ret = sw842_decompress(src, slen, dst, &dlen); + } + if (ret) + return ret; + + slen -= spadding; + + dlen -= ignore; + if (ignore) + pr_debug("ignoring last %x bytes\n", ignore); + + if (dst == ctx->dbounce) + memcpy(p->out, dst, dlen); + + pr_debug("decompress slen %x padding %x dlen %x ignore %x\n", + slen, padding, dlen, ignore); + + return update_param(p, slen + padding, dlen); +} + +static int nx842_crypto_decompress(struct crypto_tfm *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_header *hdr; + struct nx842_crypto_param p; + struct nx842_constraints c; + int n, ret, hdr_len; + u16 ignore = 0; + bool usehw = true; + + p.in = (u8 *)src; + p.iremain = slen; + p.out = dst; + p.oremain = *dlen; + p.ototal = 0; + + *dlen = 0; + + if (read_constraints(&c)) + usehw = false; + + hdr = (struct nx842_crypto_header *)src; + + /* If it doesn't start with our header magic number, assume it's a raw + * 842 compressed buffer and pass it directly to the hardware driver + */ + if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) { + struct nx842_crypto_header_group g = { + .padding = 0, + .compressed_length = cpu_to_be32(p.iremain), + .uncompressed_length = cpu_to_be32(p.oremain), + }; + + ret = decompress(ctx, &p, &g, &c, 0, usehw); + if (ret) + return ret; + + *dlen = p.ototal; + + return 0; + } + + if (!hdr->groups) { + pr_err("header has no groups\n"); + return -EINVAL; + } + if (hdr->groups > NX842_CRYPTO_GROUP_MAX) { + pr_err("header has too many groups %x, max %x\n", + hdr->groups, NX842_CRYPTO_GROUP_MAX); + return -EINVAL; + } + + hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups); + if (hdr_len > slen) + return -EOVERFLOW; + + memcpy(&ctx->header, src, hdr_len); + hdr = &ctx->header; + + for (n = 0; n < hdr->groups; n++) { + /* ignore applies to last group */ + if (n + 1 == hdr->groups) + ignore = be16_to_cpu(hdr->ignore); + + ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw); + if (ret) + return ret; + } + + *dlen = p.ototal; + + pr_debug("decompress total slen %x dlen %x\n", slen, *dlen); + + return 0; +} + +static struct crypto_alg alg = { + .cra_name = "842", + .cra_driver_name = "842-nx", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, + .cra_ctxsize = sizeof(struct nx842_crypto_ctx), + .cra_module = THIS_MODULE, + .cra_init = nx842_crypto_init, + .cra_exit = nx842_crypto_exit, + .cra_u = { .compress = { + .coa_compress = nx842_crypto_compress, + .coa_decompress = nx842_crypto_decompress } } +}; + +static int __init nx842_crypto_mod_init(void) +{ + return crypto_register_alg(&alg); +} +module_init(nx842_crypto_mod_init); + +static void __exit nx842_crypto_mod_exit(void) +{ + crypto_unregister_alg(&alg); +} +module_exit(nx842_crypto_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface"); +MODULE_ALIAS_CRYPTO("842"); +MODULE_ALIAS_CRYPTO("842-nx"); +MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); diff --git a/drivers/crypto/nx/nx-842-platform.c b/drivers/crypto/nx/nx-842-platform.c new file mode 100644 index 000000000000..664f13dd06ed --- /dev/null +++ b/drivers/crypto/nx/nx-842-platform.c @@ -0,0 +1,84 @@ + +#include "nx-842.h" + +/* this is needed, separate from the main nx-842.c driver, because that main + * driver loads the platform drivers during its init(), and it expects one + * (or none) of the platform drivers to set this pointer to its driver. + * That means this pointer can't be in the main nx-842 driver, because it + * wouldn't be accessible until after the main driver loaded, which wouldn't + * be possible as it's waiting for the platform driver to load. So place it + * here. + */ +static struct nx842_driver *driver; +static DEFINE_SPINLOCK(driver_lock); + +struct nx842_driver *nx842_platform_driver(void) +{ + return driver; +} +EXPORT_SYMBOL_GPL(nx842_platform_driver); + +bool nx842_platform_driver_set(struct nx842_driver *_driver) +{ + bool ret = false; + + spin_lock(&driver_lock); + + if (!driver) { + driver = _driver; + ret = true; + } else + WARN(1, "can't set platform driver, already set to %s\n", + driver->name); + + spin_unlock(&driver_lock); + return ret; +} +EXPORT_SYMBOL_GPL(nx842_platform_driver_set); + +/* only call this from the platform driver exit function */ +void nx842_platform_driver_unset(struct nx842_driver *_driver) +{ + spin_lock(&driver_lock); + + if (driver == _driver) + driver = NULL; + else if (driver) + WARN(1, "can't unset platform driver %s, currently set to %s\n", + _driver->name, driver->name); + else + WARN(1, "can't unset platform driver, already unset\n"); + + spin_unlock(&driver_lock); +} +EXPORT_SYMBOL_GPL(nx842_platform_driver_unset); + +bool nx842_platform_driver_get(void) +{ + bool ret = false; + + spin_lock(&driver_lock); + + if (driver) + ret = try_module_get(driver->owner); + + spin_unlock(&driver_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(nx842_platform_driver_get); + +void nx842_platform_driver_put(void) +{ + spin_lock(&driver_lock); + + if (driver) + module_put(driver->owner); + + spin_unlock(&driver_lock); +} +EXPORT_SYMBOL_GPL(nx842_platform_driver_put); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); +MODULE_DESCRIPTION("842 H/W Compression platform driver"); diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c new file mode 100644 index 000000000000..33b3b0abf4ae --- /dev/null +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -0,0 +1,637 @@ +/* + * Driver for IBM PowerNV 842 compression accelerator + * + * Copyright (C) 2015 Dan Streetman, IBM Corp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "nx-842.h" + +#include <linux/timer.h> + +#include <asm/prom.h> +#include <asm/icswx.h> + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); +MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors"); + +#define WORKMEM_ALIGN (CRB_ALIGN) +#define CSB_WAIT_MAX (5000) /* ms */ + +struct nx842_workmem { + /* Below fields must be properly aligned */ + struct coprocessor_request_block crb; /* CRB_ALIGN align */ + struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */ + struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */ + /* Above fields must be properly aligned */ + + ktime_t start; + + char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */ +} __packed __aligned(WORKMEM_ALIGN); + +struct nx842_coproc { + unsigned int chip_id; + unsigned int ct; + unsigned int ci; + struct list_head list; +}; + +/* no cpu hotplug on powernv, so this list never changes after init */ +static LIST_HEAD(nx842_coprocs); +static unsigned int nx842_ct; + +/** + * setup_indirect_dde - Setup an indirect DDE + * + * The DDE is setup with the the DDE count, byte count, and address of + * first direct DDE in the list. + */ +static void setup_indirect_dde(struct data_descriptor_entry *dde, + struct data_descriptor_entry *ddl, + unsigned int dde_count, unsigned int byte_count) +{ + dde->flags = 0; + dde->count = dde_count; + dde->index = 0; + dde->length = cpu_to_be32(byte_count); + dde->address = cpu_to_be64(nx842_get_pa(ddl)); +} + +/** + * setup_direct_dde - Setup single DDE from buffer + * + * The DDE is setup with the buffer and length. The buffer must be properly + * aligned. The used length is returned. + * Returns: + * N Successfully set up DDE with N bytes + */ +static unsigned int setup_direct_dde(struct data_descriptor_entry *dde, + unsigned long pa, unsigned int len) +{ + unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa)); + + dde->flags = 0; + dde->count = 0; + dde->index = 0; + dde->length = cpu_to_be32(l); + dde->address = cpu_to_be64(pa); + + return l; +} + +/** + * setup_ddl - Setup DDL from buffer + * + * Returns: + * 0 Successfully set up DDL + */ +static int setup_ddl(struct data_descriptor_entry *dde, + struct data_descriptor_entry *ddl, + unsigned char *buf, unsigned int len, + bool in) +{ + unsigned long pa = nx842_get_pa(buf); + int i, ret, total_len = len; + + if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) { + pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n", + in ? "input" : "output", pa, DDE_BUFFER_ALIGN); + return -EINVAL; + } + + /* only need to check last mult; since buffer must be + * DDE_BUFFER_ALIGN aligned, and that is a multiple of + * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers + * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT. + */ + if (len % DDE_BUFFER_LAST_MULT) { + pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n", + in ? "input" : "output", len, DDE_BUFFER_LAST_MULT); + if (in) + return -EINVAL; + len = round_down(len, DDE_BUFFER_LAST_MULT); + } + + /* use a single direct DDE */ + if (len <= LEN_ON_PAGE(pa)) { + ret = setup_direct_dde(dde, pa, len); + WARN_ON(ret < len); + return 0; + } + + /* use the DDL */ + for (i = 0; i < DDL_LEN_MAX && len > 0; i++) { + ret = setup_direct_dde(&ddl[i], pa, len); + buf += ret; + len -= ret; + pa = nx842_get_pa(buf); + } + + if (len > 0) { + pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n", + total_len, in ? "input" : "output", len); + if (in) + return -EMSGSIZE; + total_len -= len; + } + setup_indirect_dde(dde, ddl, i, total_len); + + return 0; +} + +#define CSB_ERR(csb, msg, ...) \ + pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \ + ##__VA_ARGS__, (csb)->flags, \ + (csb)->cs, (csb)->cc, (csb)->ce, \ + be32_to_cpu((csb)->count)) + +#define CSB_ERR_ADDR(csb, msg, ...) \ + CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \ + (unsigned long)be64_to_cpu((csb)->address)) + +/** + * wait_for_csb + */ +static int wait_for_csb(struct nx842_workmem *wmem, + struct coprocessor_status_block *csb) +{ + ktime_t start = wmem->start, now = ktime_get(); + ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); + + while (!(ACCESS_ONCE(csb->flags) & CSB_V)) { + cpu_relax(); + now = ktime_get(); + if (ktime_after(now, timeout)) + break; + } + + /* hw has updated csb and output buffer */ + barrier(); + + /* check CSB flags */ + if (!(csb->flags & CSB_V)) { + CSB_ERR(csb, "CSB still not valid after %ld us, giving up", + (long)ktime_us_delta(now, start)); + return -ETIMEDOUT; + } + if (csb->flags & CSB_F) { + CSB_ERR(csb, "Invalid CSB format"); + return -EPROTO; + } + if (csb->flags & CSB_CH) { + CSB_ERR(csb, "Invalid CSB chaining state"); + return -EPROTO; + } + + /* verify CSB completion sequence is 0 */ + if (csb->cs) { + CSB_ERR(csb, "Invalid CSB completion sequence"); + return -EPROTO; + } + + /* check CSB Completion Code */ + switch (csb->cc) { + /* no error */ + case CSB_CC_SUCCESS: + break; + case CSB_CC_TPBC_GT_SPBC: + /* not an error, but the compressed data is + * larger than the uncompressed data :( + */ + break; + + /* input data errors */ + case CSB_CC_OPERAND_OVERLAP: + /* input and output buffers overlap */ + CSB_ERR(csb, "Operand Overlap error"); + return -EINVAL; + case CSB_CC_INVALID_OPERAND: + CSB_ERR(csb, "Invalid operand"); + return -EINVAL; + case CSB_CC_NOSPC: + /* output buffer too small */ + return -ENOSPC; + case CSB_CC_ABORT: + CSB_ERR(csb, "Function aborted"); + return -EINTR; + case CSB_CC_CRC_MISMATCH: + CSB_ERR(csb, "CRC mismatch"); + return -EINVAL; + case CSB_CC_TEMPL_INVALID: + CSB_ERR(csb, "Compressed data template invalid"); + return -EINVAL; + case CSB_CC_TEMPL_OVERFLOW: + CSB_ERR(csb, "Compressed data template shows data past end"); + return -EINVAL; + + /* these should not happen */ + case CSB_CC_INVALID_ALIGN: + /* setup_ddl should have detected this */ + CSB_ERR_ADDR(csb, "Invalid alignment"); + return -EINVAL; + case CSB_CC_DATA_LENGTH: + /* setup_ddl should have detected this */ + CSB_ERR(csb, "Invalid data length"); + return -EINVAL; + case CSB_CC_WR_TRANSLATION: + case CSB_CC_TRANSLATION: + case CSB_CC_TRANSLATION_DUP1: + case CSB_CC_TRANSLATION_DUP2: + case CSB_CC_TRANSLATION_DUP3: + case CSB_CC_TRANSLATION_DUP4: + case CSB_CC_TRANSLATION_DUP5: + case CSB_CC_TRANSLATION_DUP6: + /* should not happen, we use physical addrs */ + CSB_ERR_ADDR(csb, "Translation error"); + return -EPROTO; + case CSB_CC_WR_PROTECTION: + case CSB_CC_PROTECTION: + case CSB_CC_PROTECTION_DUP1: + case CSB_CC_PROTECTION_DUP2: + case CSB_CC_PROTECTION_DUP3: + case CSB_CC_PROTECTION_DUP4: + case CSB_CC_PROTECTION_DUP5: + case CSB_CC_PROTECTION_DUP6: + /* should not happen, we use physical addrs */ + CSB_ERR_ADDR(csb, "Protection error"); + return -EPROTO; + case CSB_CC_PRIVILEGE: + /* shouldn't happen, we're in HYP mode */ + CSB_ERR(csb, "Insufficient Privilege error"); + return -EPROTO; + case CSB_CC_EXCESSIVE_DDE: + /* shouldn't happen, setup_ddl doesn't use many dde's */ + CSB_ERR(csb, "Too many DDEs in DDL"); + return -EINVAL; + case CSB_CC_TRANSPORT: + /* shouldn't happen, we setup CRB correctly */ + CSB_ERR(csb, "Invalid CRB"); + return -EINVAL; + case CSB_CC_SEGMENTED_DDL: + /* shouldn't happen, setup_ddl creates DDL right */ + CSB_ERR(csb, "Segmented DDL error"); + return -EINVAL; + case CSB_CC_DDE_OVERFLOW: + /* shouldn't happen, setup_ddl creates DDL right */ + CSB_ERR(csb, "DDE overflow error"); + return -EINVAL; + case CSB_CC_SESSION: + /* should not happen with ICSWX */ + CSB_ERR(csb, "Session violation error"); + return -EPROTO; + case CSB_CC_CHAIN: + /* should not happen, we don't use chained CRBs */ + CSB_ERR(csb, "Chained CRB error"); + return -EPROTO; + case CSB_CC_SEQUENCE: + /* should not happen, we don't use chained CRBs */ + CSB_ERR(csb, "CRB seqeunce number error"); + return -EPROTO; + case CSB_CC_UNKNOWN_CODE: + CSB_ERR(csb, "Unknown subfunction code"); + return -EPROTO; + + /* hardware errors */ + case CSB_CC_RD_EXTERNAL: + case CSB_CC_RD_EXTERNAL_DUP1: + case CSB_CC_RD_EXTERNAL_DUP2: + case CSB_CC_RD_EXTERNAL_DUP3: + CSB_ERR_ADDR(csb, "Read error outside coprocessor"); + return -EPROTO; + case CSB_CC_WR_EXTERNAL: + CSB_ERR_ADDR(csb, "Write error outside coprocessor"); + return -EPROTO; + case CSB_CC_INTERNAL: + CSB_ERR(csb, "Internal error in coprocessor"); + return -EPROTO; + case CSB_CC_PROVISION: + CSB_ERR(csb, "Storage provision error"); + return -EPROTO; + case CSB_CC_HW: + CSB_ERR(csb, "Correctable hardware error"); + return -EPROTO; + + default: + CSB_ERR(csb, "Invalid CC %d", csb->cc); + return -EPROTO; + } + + /* check Completion Extension state */ + if (csb->ce & CSB_CE_TERMINATION) { + CSB_ERR(csb, "CSB request was terminated"); + return -EPROTO; + } + if (csb->ce & CSB_CE_INCOMPLETE) { + CSB_ERR(csb, "CSB request not complete"); + return -EPROTO; + } + if (!(csb->ce & CSB_CE_TPBC)) { + CSB_ERR(csb, "TPBC not provided, unknown target length"); + return -EPROTO; + } + + /* successful completion */ + pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count, + (unsigned long)ktime_us_delta(now, start)); + + return 0; +} + +/** + * nx842_powernv_function - compress/decompress data using the 842 algorithm + * + * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. + * This compresses or decompresses the provided input buffer into the provided + * output buffer. + * + * Upon return from this function @outlen contains the length of the + * output data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * The @workmem buffer should only be used by one function call at a time. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size + * @fc: function code, see CCW Function Codes in nx-842.h + * + * Returns: + * 0 Success, output of length @outlenp stored in the buffer at @out + * -ENODEV Hardware unavailable + * -ENOSPC Output buffer is to small + * -EMSGSIZE Input buffer too large + * -EINVAL buffer constraints do not fix nx842_constraints + * -EPROTO hardware error during operation + * -ETIMEDOUT hardware did not complete operation in reasonable time + * -EINTR operation was aborted + */ +static int nx842_powernv_function(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *workmem, int fc) +{ + struct coprocessor_request_block *crb; + struct coprocessor_status_block *csb; + struct nx842_workmem *wmem; + int ret; + u64 csb_addr; + u32 ccw; + unsigned int outlen = *outlenp; + + wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); + + *outlenp = 0; + + /* shoudn't happen, we don't load without a coproc */ + if (!nx842_ct) { + pr_err_ratelimited("coprocessor CT is 0"); + return -ENODEV; + } + + crb = &wmem->crb; + csb = &crb->csb; + + /* Clear any previous values */ + memset(crb, 0, sizeof(*crb)); + + /* set up DDLs */ + ret = setup_ddl(&crb->source, wmem->ddl_in, + (unsigned char *)in, inlen, true); + if (ret) + return ret; + ret = setup_ddl(&crb->target, wmem->ddl_out, + out, outlen, false); + if (ret) + return ret; + + /* set up CCW */ + ccw = 0; + ccw = SET_FIELD(ccw, CCW_CT, nx842_ct); + ccw = SET_FIELD(ccw, CCW_CI_842, 0); /* use 0 for hw auto-selection */ + ccw = SET_FIELD(ccw, CCW_FC_842, fc); + + /* set up CRB's CSB addr */ + csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS; + csb_addr |= CRB_CSB_AT; /* Addrs are phys */ + crb->csb_addr = cpu_to_be64(csb_addr); + + wmem->start = ktime_get(); + + /* do ICSWX */ + ret = icswx(cpu_to_be32(ccw), crb); + + pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret, + (unsigned int)ccw, + (unsigned int)be32_to_cpu(crb->ccw)); + + switch (ret) { + case ICSWX_INITIATED: + ret = wait_for_csb(wmem, csb); + break; + case ICSWX_BUSY: + pr_debug_ratelimited("842 Coprocessor busy\n"); + ret = -EBUSY; + break; + case ICSWX_REJECTED: + pr_err_ratelimited("ICSWX rejected\n"); + ret = -EPROTO; + break; + default: + pr_err_ratelimited("Invalid ICSWX return code %x\n", ret); + ret = -EPROTO; + break; + } + + if (!ret) + *outlenp = be32_to_cpu(csb->count); + + return ret; +} + +/** + * nx842_powernv_compress - Compress data using the 842 algorithm + * + * Compression provided by the NX842 coprocessor on IBM PowerNV systems. + * The input buffer is compressed and the result is stored in the + * provided output buffer. + * + * Upon return from this function @outlen contains the length of the + * compressed data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size + * + * Returns: see @nx842_powernv_function() + */ +static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *wmem) +{ + return nx842_powernv_function(in, inlen, out, outlenp, + wmem, CCW_FC_842_COMP_NOCRC); +} + +/** + * nx842_powernv_decompress - Decompress data using the 842 algorithm + * + * Decompression provided by the NX842 coprocessor on IBM PowerNV systems. + * The input buffer is decompressed and the result is stored in the + * provided output buffer. + * + * Upon return from this function @outlen contains the length of the + * decompressed data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * @in: input buffer pointer + * @inlen: input buffer size + * @out: output buffer pointer + * @outlenp: output buffer size pointer + * @workmem: working memory buffer pointer, size determined by + * nx842_powernv_driver.workmem_size + * + * Returns: see @nx842_powernv_function() + */ +static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlenp, + void *wmem) +{ + return nx842_powernv_function(in, inlen, out, outlenp, + wmem, CCW_FC_842_DECOMP_NOCRC); +} + +static int __init nx842_powernv_probe(struct device_node *dn) +{ + struct nx842_coproc *coproc; + struct property *ct_prop, *ci_prop; + unsigned int ct, ci; + int chip_id; + + chip_id = of_get_ibm_chip_id(dn); + if (chip_id < 0) { + pr_err("ibm,chip-id missing\n"); + return -EINVAL; + } + ct_prop = of_find_property(dn, "ibm,842-coprocessor-type", NULL); + if (!ct_prop) { + pr_err("ibm,842-coprocessor-type missing\n"); + return -EINVAL; + } + ct = be32_to_cpu(*(unsigned int *)ct_prop->value); + ci_prop = of_find_property(dn, "ibm,842-coprocessor-instance", NULL); + if (!ci_prop) { + pr_err("ibm,842-coprocessor-instance missing\n"); + return -EINVAL; + } + ci = be32_to_cpu(*(unsigned int *)ci_prop->value); + + coproc = kmalloc(sizeof(*coproc), GFP_KERNEL); + if (!coproc) + return -ENOMEM; + + coproc->chip_id = chip_id; + coproc->ct = ct; + coproc->ci = ci; + INIT_LIST_HEAD(&coproc->list); + list_add(&coproc->list, &nx842_coprocs); + + pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci); + + if (!nx842_ct) + nx842_ct = ct; + else if (nx842_ct != ct) + pr_err("NX842 chip %d, CT %d != first found CT %d\n", + chip_id, ct, nx842_ct); + + return 0; +} + +static struct nx842_constraints nx842_powernv_constraints = { + .alignment = DDE_BUFFER_ALIGN, + .multiple = DDE_BUFFER_LAST_MULT, + .minimum = DDE_BUFFER_LAST_MULT, + .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE, +}; + +static struct nx842_driver nx842_powernv_driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .workmem_size = sizeof(struct nx842_workmem), + .constraints = &nx842_powernv_constraints, + .compress = nx842_powernv_compress, + .decompress = nx842_powernv_decompress, +}; + +static __init int nx842_powernv_init(void) +{ + struct device_node *dn; + + /* verify workmem size/align restrictions */ + BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); + BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN); + BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN); + /* verify buffer size/align restrictions */ + BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN); + BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); + BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); + + pr_info("loading\n"); + + for_each_compatible_node(dn, NULL, "ibm,power-nx") + nx842_powernv_probe(dn); + + if (!nx842_ct) { + pr_err("no coprocessors found\n"); + return -ENODEV; + } + + if (!nx842_platform_driver_set(&nx842_powernv_driver)) { + struct nx842_coproc *coproc, *n; + + list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { + list_del(&coproc->list); + kfree(coproc); + } + + return -EEXIST; + } + + pr_info("loaded\n"); + + return 0; +} +module_init(nx842_powernv_init); + +static void __exit nx842_powernv_exit(void) +{ + struct nx842_coproc *coproc, *n; + + nx842_platform_driver_unset(&nx842_powernv_driver); + + list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) { + list_del(&coproc->list); + kfree(coproc); + } + + pr_info("unloaded\n"); +} +module_exit(nx842_powernv_exit); diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c new file mode 100644 index 000000000000..3040a6091bf2 --- /dev/null +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -0,0 +1,1140 @@ +/* + * Driver for IBM Power 842 compression accelerator + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Authors: Robert Jennings <rcj@linux.vnet.ibm.com> + * Seth Jennings <sjenning@linux.vnet.ibm.com> + */ + +#include <asm/vio.h> + +#include "nx-842.h" +#include "nx_csbcpb.h" /* struct nx_csbcpb */ + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); + +static struct nx842_constraints nx842_pseries_constraints = { + .alignment = DDE_BUFFER_ALIGN, + .multiple = DDE_BUFFER_LAST_MULT, + .minimum = DDE_BUFFER_LAST_MULT, + .maximum = PAGE_SIZE, /* dynamic, max_sync_size */ +}; + +static int check_constraints(unsigned long buf, unsigned int *len, bool in) +{ + if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) { + pr_debug("%s buffer 0x%lx not aligned to 0x%x\n", + in ? "input" : "output", buf, + nx842_pseries_constraints.alignment); + return -EINVAL; + } + if (*len % nx842_pseries_constraints.multiple) { + pr_debug("%s buffer len 0x%x not multiple of 0x%x\n", + in ? "input" : "output", *len, + nx842_pseries_constraints.multiple); + if (in) + return -EINVAL; + *len = round_down(*len, nx842_pseries_constraints.multiple); + } + if (*len < nx842_pseries_constraints.minimum) { + pr_debug("%s buffer len 0x%x under minimum 0x%x\n", + in ? "input" : "output", *len, + nx842_pseries_constraints.minimum); + return -EINVAL; + } + if (*len > nx842_pseries_constraints.maximum) { + pr_debug("%s buffer len 0x%x over maximum 0x%x\n", + in ? "input" : "output", *len, + nx842_pseries_constraints.maximum); + if (in) + return -EINVAL; + *len = nx842_pseries_constraints.maximum; + } + return 0; +} + +/* I assume we need to align the CSB? */ +#define WORKMEM_ALIGN (256) + +struct nx842_workmem { + /* scatterlist */ + char slin[4096]; + char slout[4096]; + /* coprocessor status/parameter block */ + struct nx_csbcpb csbcpb; + + char padding[WORKMEM_ALIGN]; +} __aligned(WORKMEM_ALIGN); + +/* Macros for fields within nx_csbcpb */ +/* Check the valid bit within the csbcpb valid field */ +#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) + +/* CE macros operate on the completion_extension field bits in the csbcpb. + * CE0 0=full completion, 1=partial completion + * CE1 0=CE0 indicates completion, 1=termination (output may be modified) + * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ +#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) +#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) +#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) + +/* The NX unit accepts data only on 4K page boundaries */ +#define NX842_HW_PAGE_SIZE (4096) +#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) + +enum nx842_status { + UNAVAILABLE, + AVAILABLE +}; + +struct ibm_nx842_counters { + atomic64_t comp_complete; + atomic64_t comp_failed; + atomic64_t decomp_complete; + atomic64_t decomp_failed; + atomic64_t swdecomp; + atomic64_t comp_times[32]; + atomic64_t decomp_times[32]; +}; + +static struct nx842_devdata { + struct vio_dev *vdev; + struct device *dev; + struct ibm_nx842_counters *counters; + unsigned int max_sg_len; + unsigned int max_sync_size; + unsigned int max_sync_sg; + enum nx842_status status; +} __rcu *devdata; +static DEFINE_SPINLOCK(devdata_mutex); + +#define NX842_COUNTER_INC(_x) \ +static inline void nx842_inc_##_x( \ + const struct nx842_devdata *dev) { \ + if (dev) \ + atomic64_inc(&dev->counters->_x); \ +} +NX842_COUNTER_INC(comp_complete); +NX842_COUNTER_INC(comp_failed); +NX842_COUNTER_INC(decomp_complete); +NX842_COUNTER_INC(decomp_failed); +NX842_COUNTER_INC(swdecomp); + +#define NX842_HIST_SLOTS 16 + +static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) +{ + int bucket = fls(time); + + if (bucket) + bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); + + atomic64_inc(×[bucket]); +} + +/* NX unit operation flags */ +#define NX842_OP_COMPRESS 0x0 +#define NX842_OP_CRC 0x1 +#define NX842_OP_DECOMPRESS 0x2 +#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) +#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) +#define NX842_OP_ASYNC (1<<23) +#define NX842_OP_NOTIFY (1<<22) +#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) + +static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) +{ + /* No use of DMA mappings within the driver. */ + return 0; +} + +struct nx842_slentry { + __be64 ptr; /* Real address (use __pa()) */ + __be64 len; +}; + +/* pHyp scatterlist entry */ +struct nx842_scatterlist { + int entry_nr; /* number of slentries */ + struct nx842_slentry *entries; /* ptr to array of slentries */ +}; + +/* Does not include sizeof(entry_nr) in the size */ +static inline unsigned long nx842_get_scatterlist_size( + struct nx842_scatterlist *sl) +{ + return sl->entry_nr * sizeof(struct nx842_slentry); +} + +static int nx842_build_scatterlist(unsigned long buf, int len, + struct nx842_scatterlist *sl) +{ + unsigned long entrylen; + struct nx842_slentry *entry; + + sl->entry_nr = 0; + + entry = sl->entries; + while (len) { + entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf)); + entrylen = min_t(int, len, + LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE)); + entry->len = cpu_to_be64(entrylen); + + len -= entrylen; + buf += entrylen; + + sl->entry_nr++; + entry++; + } + + return 0; +} + +static int nx842_validate_result(struct device *dev, + struct cop_status_block *csb) +{ + /* The csb must be valid after returning from vio_h_cop_sync */ + if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { + dev_err(dev, "%s: cspcbp not valid upon completion.\n", + __func__); + dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", + csb->valid, + csb->crb_seq_number, + csb->completion_code, + csb->completion_extension); + dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", + be32_to_cpu(csb->processed_byte_count), + (unsigned long)be64_to_cpu(csb->address)); + return -EIO; + } + + /* Check return values from the hardware in the CSB */ + switch (csb->completion_code) { + case 0: /* Completed without error */ + break; + case 64: /* Target bytes > Source bytes during compression */ + case 13: /* Output buffer too small */ + dev_dbg(dev, "%s: Compression output larger than input\n", + __func__); + return -ENOSPC; + case 66: /* Input data contains an illegal template field */ + case 67: /* Template indicates data past the end of the input stream */ + dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", + __func__, csb->completion_code); + return -EINVAL; + default: + dev_dbg(dev, "%s: Unspecified error (code:%d)\n", + __func__, csb->completion_code); + return -EIO; + } + + /* Hardware sanity check */ + if (!NX842_CSBCPB_CE2(csb->completion_extension)) { + dev_err(dev, "%s: No error returned by hardware, but " + "data returned is unusable, contact support.\n" + "(Additional info: csbcbp->processed bytes " + "does not specify processed bytes for the " + "target buffer.)\n", __func__); + return -EIO; + } + + return 0; +} + +/** + * nx842_pseries_compress - Compress data using the 842 algorithm + * + * Compression provide by the NX842 coprocessor on IBM Power systems. + * The input buffer is compressed and the result is stored in the + * provided output buffer. + * + * Upon return from this function @outlen contains the length of the + * compressed data. If there is an error then @outlen will be 0 and an + * error will be specified by the return code from this function. + * + * @in: Pointer to input buffer + * @inlen: Length of input buffer + * @out: Pointer to output buffer + * @outlen: Length of output buffer + * @wrkmem: ptr to buffer for working memory, size determined by + * nx842_pseries_driver.workmem_size + * + * Returns: + * 0 Success, output of length @outlen stored in the buffer at @out + * -ENOMEM Unable to allocate internal buffers + * -ENOSPC Output buffer is to small + * -EIO Internal error + * -ENODEV Hardware unavailable + */ +static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlen, + void *wmem) +{ + struct nx842_devdata *local_devdata; + struct device *dev = NULL; + struct nx842_workmem *workmem; + struct nx842_scatterlist slin, slout; + struct nx_csbcpb *csbcpb; + int ret = 0, max_sync_size; + unsigned long inbuf, outbuf; + struct vio_pfo_op op = { + .done = NULL, + .handle = 0, + .timeout = 0, + }; + unsigned long start = get_tb(); + + inbuf = (unsigned long)in; + if (check_constraints(inbuf, &inlen, true)) + return -EINVAL; + + outbuf = (unsigned long)out; + if (check_constraints(outbuf, outlen, false)) + return -EINVAL; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (!local_devdata || !local_devdata->dev) { + rcu_read_unlock(); + return -ENODEV; + } + max_sync_size = local_devdata->max_sync_size; + dev = local_devdata->dev; + + /* Init scatterlist */ + workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); + slin.entries = (struct nx842_slentry *)workmem->slin; + slout.entries = (struct nx842_slentry *)workmem->slout; + + /* Init operation */ + op.flags = NX842_OP_COMPRESS; + csbcpb = &workmem->csbcpb; + memset(csbcpb, 0, sizeof(*csbcpb)); + op.csbcpb = nx842_get_pa(csbcpb); + + if ((inbuf & NX842_HW_PAGE_MASK) == + ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { + /* Create direct DDE */ + op.in = nx842_get_pa((void *)inbuf); + op.inlen = inlen; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(inbuf, inlen, &slin); + op.in = nx842_get_pa(slin.entries); + op.inlen = -nx842_get_scatterlist_size(&slin); + } + + if ((outbuf & NX842_HW_PAGE_MASK) == + ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { + /* Create direct DDE */ + op.out = nx842_get_pa((void *)outbuf); + op.outlen = *outlen; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(outbuf, *outlen, &slout); + op.out = nx842_get_pa(slout.entries); + op.outlen = -nx842_get_scatterlist_size(&slout); + } + + dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", + __func__, (unsigned long)op.in, (long)op.inlen, + (unsigned long)op.out, (long)op.outlen); + + /* Send request to pHyp */ + ret = vio_h_cop_sync(local_devdata->vdev, &op); + + /* Check for pHyp error */ + if (ret) { + dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", + __func__, ret, op.hcall_err); + ret = -EIO; + goto unlock; + } + + /* Check for hardware error */ + ret = nx842_validate_result(dev, &csbcpb->csb); + if (ret) + goto unlock; + + *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); + dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen); + +unlock: + if (ret) + nx842_inc_comp_failed(local_devdata); + else { + nx842_inc_comp_complete(local_devdata); + ibm_nx842_incr_hist(local_devdata->counters->comp_times, + (get_tb() - start) / tb_ticks_per_usec); + } + rcu_read_unlock(); + return ret; +} + +/** + * nx842_pseries_decompress - Decompress data using the 842 algorithm + * + * Decompression provide by the NX842 coprocessor on IBM Power systems. + * The input buffer is decompressed and the result is stored in the + * provided output buffer. The size allocated to the output buffer is + * provided by the caller of this function in @outlen. Upon return from + * this function @outlen contains the length of the decompressed data. + * If there is an error then @outlen will be 0 and an error will be + * specified by the return code from this function. + * + * @in: Pointer to input buffer + * @inlen: Length of input buffer + * @out: Pointer to output buffer + * @outlen: Length of output buffer + * @wrkmem: ptr to buffer for working memory, size determined by + * nx842_pseries_driver.workmem_size + * + * Returns: + * 0 Success, output of length @outlen stored in the buffer at @out + * -ENODEV Hardware decompression device is unavailable + * -ENOMEM Unable to allocate internal buffers + * -ENOSPC Output buffer is to small + * -EINVAL Bad input data encountered when attempting decompress + * -EIO Internal error + */ +static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen, + unsigned char *out, unsigned int *outlen, + void *wmem) +{ + struct nx842_devdata *local_devdata; + struct device *dev = NULL; + struct nx842_workmem *workmem; + struct nx842_scatterlist slin, slout; + struct nx_csbcpb *csbcpb; + int ret = 0, max_sync_size; + unsigned long inbuf, outbuf; + struct vio_pfo_op op = { + .done = NULL, + .handle = 0, + .timeout = 0, + }; + unsigned long start = get_tb(); + + /* Ensure page alignment and size */ + inbuf = (unsigned long)in; + if (check_constraints(inbuf, &inlen, true)) + return -EINVAL; + + outbuf = (unsigned long)out; + if (check_constraints(outbuf, outlen, false)) + return -EINVAL; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (!local_devdata || !local_devdata->dev) { + rcu_read_unlock(); + return -ENODEV; + } + max_sync_size = local_devdata->max_sync_size; + dev = local_devdata->dev; + + workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); + + /* Init scatterlist */ + slin.entries = (struct nx842_slentry *)workmem->slin; + slout.entries = (struct nx842_slentry *)workmem->slout; + + /* Init operation */ + op.flags = NX842_OP_DECOMPRESS; + csbcpb = &workmem->csbcpb; + memset(csbcpb, 0, sizeof(*csbcpb)); + op.csbcpb = nx842_get_pa(csbcpb); + + if ((inbuf & NX842_HW_PAGE_MASK) == + ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { + /* Create direct DDE */ + op.in = nx842_get_pa((void *)inbuf); + op.inlen = inlen; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(inbuf, inlen, &slin); + op.in = nx842_get_pa(slin.entries); + op.inlen = -nx842_get_scatterlist_size(&slin); + } + + if ((outbuf & NX842_HW_PAGE_MASK) == + ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { + /* Create direct DDE */ + op.out = nx842_get_pa((void *)outbuf); + op.outlen = *outlen; + } else { + /* Create indirect DDE (scatterlist) */ + nx842_build_scatterlist(outbuf, *outlen, &slout); + op.out = nx842_get_pa(slout.entries); + op.outlen = -nx842_get_scatterlist_size(&slout); + } + + dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", + __func__, (unsigned long)op.in, (long)op.inlen, + (unsigned long)op.out, (long)op.outlen); + + /* Send request to pHyp */ + ret = vio_h_cop_sync(local_devdata->vdev, &op); + + /* Check for pHyp error */ + if (ret) { + dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", + __func__, ret, op.hcall_err); + goto unlock; + } + + /* Check for hardware error */ + ret = nx842_validate_result(dev, &csbcpb->csb); + if (ret) + goto unlock; + + *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); + +unlock: + if (ret) + /* decompress fail */ + nx842_inc_decomp_failed(local_devdata); + else { + nx842_inc_decomp_complete(local_devdata); + ibm_nx842_incr_hist(local_devdata->counters->decomp_times, + (get_tb() - start) / tb_ticks_per_usec); + } + + rcu_read_unlock(); + return ret; +} + +/** + * nx842_OF_set_defaults -- Set default (disabled) values for devdata + * + * @devdata - struct nx842_devdata to update + * + * Returns: + * 0 on success + * -ENOENT if @devdata ptr is NULL + */ +static int nx842_OF_set_defaults(struct nx842_devdata *devdata) +{ + if (devdata) { + devdata->max_sync_size = 0; + devdata->max_sync_sg = 0; + devdata->max_sg_len = 0; + devdata->status = UNAVAILABLE; + return 0; + } else + return -ENOENT; +} + +/** + * nx842_OF_upd_status -- Update the device info from OF status prop + * + * The status property indicates if the accelerator is enabled. If the + * device is in the OF tree it indicates that the hardware is present. + * The status field indicates if the device is enabled when the status + * is 'okay'. Otherwise the device driver will be disabled. + * + * @devdata - struct nx842_devdata to update + * @prop - struct property point containing the maxsyncop for the update + * + * Returns: + * 0 - Device is available + * -EINVAL - Device is not available + */ +static int nx842_OF_upd_status(struct nx842_devdata *devdata, + struct property *prop) { + int ret = 0; + const char *status = (const char *)prop->value; + + if (!strncmp(status, "okay", (size_t)prop->length)) { + devdata->status = AVAILABLE; + } else { + dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", + __func__, status); + devdata->status = UNAVAILABLE; + } + + return ret; +} + +/** + * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop + * + * Definition of the 'ibm,max-sg-len' OF property: + * This field indicates the maximum byte length of a scatter list + * for the platform facility. It is a single cell encoded as with encode-int. + * + * Example: + * # od -x ibm,max-sg-len + * 0000000 0000 0ff0 + * + * In this example, the maximum byte length of a scatter list is + * 0x0ff0 (4,080). + * + * @devdata - struct nx842_devdata to update + * @prop - struct property point containing the maxsyncop for the update + * + * Returns: + * 0 on success + * -EINVAL on failure + */ +static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, + struct property *prop) { + int ret = 0; + const unsigned int maxsglen = of_read_number(prop->value, 1); + + if (prop->length != sizeof(maxsglen)) { + dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); + dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, + prop->length, sizeof(maxsglen)); + ret = -EINVAL; + } else { + devdata->max_sg_len = min_t(unsigned int, + maxsglen, NX842_HW_PAGE_SIZE); + } + + return ret; +} + +/** + * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop + * + * Definition of the 'ibm,max-sync-cop' OF property: + * Two series of cells. The first series of cells represents the maximums + * that can be synchronously compressed. The second series of cells + * represents the maximums that can be synchronously decompressed. + * 1. The first cell in each series contains the count of the number of + * data length, scatter list elements pairs that follow – each being + * of the form + * a. One cell data byte length + * b. One cell total number of scatter list elements + * + * Example: + * # od -x ibm,max-sync-cop + * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 + * 0000020 0000 1000 0000 01fe + * + * In this example, compression supports 0x1000 (4,096) data byte length + * and 0x1fe (510) total scatter list elements. Decompression supports + * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list + * elements. + * + * @devdata - struct nx842_devdata to update + * @prop - struct property point containing the maxsyncop for the update + * + * Returns: + * 0 on success + * -EINVAL on failure + */ +static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, + struct property *prop) { + int ret = 0; + unsigned int comp_data_limit, decomp_data_limit; + unsigned int comp_sg_limit, decomp_sg_limit; + const struct maxsynccop_t { + __be32 comp_elements; + __be32 comp_data_limit; + __be32 comp_sg_limit; + __be32 decomp_elements; + __be32 decomp_data_limit; + __be32 decomp_sg_limit; + } *maxsynccop; + + if (prop->length != sizeof(*maxsynccop)) { + dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); + dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, + sizeof(*maxsynccop)); + ret = -EINVAL; + goto out; + } + + maxsynccop = (const struct maxsynccop_t *)prop->value; + comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit); + comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit); + decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit); + decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit); + + /* Use one limit rather than separate limits for compression and + * decompression. Set a maximum for this so as not to exceed the + * size that the header can support and round the value down to + * the hardware page size (4K) */ + devdata->max_sync_size = min(comp_data_limit, decomp_data_limit); + + devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, + 65536); + + if (devdata->max_sync_size < 4096) { + dev_err(devdata->dev, "%s: hardware max data size (%u) is " + "less than the driver minimum, unable to use " + "the hardware device\n", + __func__, devdata->max_sync_size); + ret = -EINVAL; + goto out; + } + + nx842_pseries_constraints.maximum = devdata->max_sync_size; + + devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit); + if (devdata->max_sync_sg < 1) { + dev_err(devdata->dev, "%s: hardware max sg size (%u) is " + "less than the driver minimum, unable to use " + "the hardware device\n", + __func__, devdata->max_sync_sg); + ret = -EINVAL; + goto out; + } + +out: + return ret; +} + +/** + * + * nx842_OF_upd -- Handle OF properties updates for the device. + * + * Set all properties from the OF tree. Optionally, a new property + * can be provided by the @new_prop pointer to overwrite an existing value. + * The device will remain disabled until all values are valid, this function + * will return an error for updates unless all values are valid. + * + * @new_prop: If not NULL, this property is being updated. If NULL, update + * all properties from the current values in the OF tree. + * + * Returns: + * 0 - Success + * -ENOMEM - Could not allocate memory for new devdata structure + * -EINVAL - property value not found, new_prop is not a recognized + * property for the device or property value is not valid. + * -ENODEV - Device is not available + */ +static int nx842_OF_upd(struct property *new_prop) +{ + struct nx842_devdata *old_devdata = NULL; + struct nx842_devdata *new_devdata = NULL; + struct device_node *of_node = NULL; + struct property *status = NULL; + struct property *maxsglen = NULL; + struct property *maxsyncop = NULL; + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + if (old_devdata) + of_node = old_devdata->dev->of_node; + + if (!old_devdata || !of_node) { + pr_err("%s: device is not available\n", __func__); + spin_unlock_irqrestore(&devdata_mutex, flags); + return -ENODEV; + } + + new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); + if (!new_devdata) { + dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); + ret = -ENOMEM; + goto error_out; + } + + memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); + new_devdata->counters = old_devdata->counters; + + /* Set ptrs for existing properties */ + status = of_find_property(of_node, "status", NULL); + maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); + maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); + if (!status || !maxsglen || !maxsyncop) { + dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); + ret = -EINVAL; + goto error_out; + } + + /* + * If this is a property update, there are only certain properties that + * we care about. Bail if it isn't in the below list + */ + if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || + strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || + strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) + goto out; + + /* Perform property updates */ + ret = nx842_OF_upd_status(new_devdata, status); + if (ret) + goto error_out; + + ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); + if (ret) + goto error_out; + + ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); + if (ret) + goto error_out; + +out: + dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", + __func__, new_devdata->max_sync_size, + old_devdata->max_sync_size); + dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", + __func__, new_devdata->max_sync_sg, + old_devdata->max_sync_sg); + dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", + __func__, new_devdata->max_sg_len, + old_devdata->max_sg_len); + + rcu_assign_pointer(devdata, new_devdata); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + dev_set_drvdata(new_devdata->dev, new_devdata); + kfree(old_devdata); + return 0; + +error_out: + if (new_devdata) { + dev_info(old_devdata->dev, "%s: device disabled\n", __func__); + nx842_OF_set_defaults(new_devdata); + rcu_assign_pointer(devdata, new_devdata); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + dev_set_drvdata(new_devdata->dev, new_devdata); + kfree(old_devdata); + } else { + dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); + spin_unlock_irqrestore(&devdata_mutex, flags); + } + + if (!ret) + ret = -EINVAL; + return ret; +} + +/** + * nx842_OF_notifier - Process updates to OF properties for the device + * + * @np: notifier block + * @action: notifier action + * @update: struct pSeries_reconfig_prop_update pointer if action is + * PSERIES_UPDATE_PROPERTY + * + * Returns: + * NOTIFY_OK on success + * NOTIFY_BAD encoded with error number on failure, use + * notifier_to_errno() to decode this value + */ +static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, + void *data) +{ + struct of_reconfig_data *upd = data; + struct nx842_devdata *local_devdata; + struct device_node *node = NULL; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (local_devdata) + node = local_devdata->dev->of_node; + + if (local_devdata && + action == OF_RECONFIG_UPDATE_PROPERTY && + !strcmp(upd->dn->name, node->name)) { + rcu_read_unlock(); + nx842_OF_upd(upd->prop); + } else + rcu_read_unlock(); + + return NOTIFY_OK; +} + +static struct notifier_block nx842_of_nb = { + .notifier_call = nx842_OF_notifier, +}; + +#define nx842_counter_read(_name) \ +static ssize_t nx842_##_name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) { \ + struct nx842_devdata *local_devdata; \ + int p = 0; \ + rcu_read_lock(); \ + local_devdata = rcu_dereference(devdata); \ + if (local_devdata) \ + p = snprintf(buf, PAGE_SIZE, "%ld\n", \ + atomic64_read(&local_devdata->counters->_name)); \ + rcu_read_unlock(); \ + return p; \ +} + +#define NX842DEV_COUNTER_ATTR_RO(_name) \ + nx842_counter_read(_name); \ + static struct device_attribute dev_attr_##_name = __ATTR(_name, \ + 0444, \ + nx842_##_name##_show,\ + NULL); + +NX842DEV_COUNTER_ATTR_RO(comp_complete); +NX842DEV_COUNTER_ATTR_RO(comp_failed); +NX842DEV_COUNTER_ATTR_RO(decomp_complete); +NX842DEV_COUNTER_ATTR_RO(decomp_failed); +NX842DEV_COUNTER_ATTR_RO(swdecomp); + +static ssize_t nx842_timehist_show(struct device *, + struct device_attribute *, char *); + +static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, + nx842_timehist_show, NULL); +static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, + 0444, nx842_timehist_show, NULL); + +static ssize_t nx842_timehist_show(struct device *dev, + struct device_attribute *attr, char *buf) { + char *p = buf; + struct nx842_devdata *local_devdata; + atomic64_t *times; + int bytes_remain = PAGE_SIZE; + int bytes; + int i; + + rcu_read_lock(); + local_devdata = rcu_dereference(devdata); + if (!local_devdata) { + rcu_read_unlock(); + return 0; + } + + if (attr == &dev_attr_comp_times) + times = local_devdata->counters->comp_times; + else if (attr == &dev_attr_decomp_times) + times = local_devdata->counters->decomp_times; + else { + rcu_read_unlock(); + return 0; + } + + for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { + bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n", + i ? (2<<(i-1)) : 0, (2<<i)-1, + atomic64_read(×[i])); + bytes_remain -= bytes; + p += bytes; + } + /* The last bucket holds everything over + * 2<<(NX842_HIST_SLOTS - 2) us */ + bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n", + 2<<(NX842_HIST_SLOTS - 2), + atomic64_read(×[(NX842_HIST_SLOTS - 1)])); + p += bytes; + + rcu_read_unlock(); + return p - buf; +} + +static struct attribute *nx842_sysfs_entries[] = { + &dev_attr_comp_complete.attr, + &dev_attr_comp_failed.attr, + &dev_attr_decomp_complete.attr, + &dev_attr_decomp_failed.attr, + &dev_attr_swdecomp.attr, + &dev_attr_comp_times.attr, + &dev_attr_decomp_times.attr, + NULL, +}; + +static struct attribute_group nx842_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = nx842_sysfs_entries, +}; + +static struct nx842_driver nx842_pseries_driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .workmem_size = sizeof(struct nx842_workmem), + .constraints = &nx842_pseries_constraints, + .compress = nx842_pseries_compress, + .decompress = nx842_pseries_decompress, +}; + +static int __init nx842_probe(struct vio_dev *viodev, + const struct vio_device_id *id) +{ + struct nx842_devdata *old_devdata, *new_devdata = NULL; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + + if (old_devdata && old_devdata->vdev != NULL) { + dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); + ret = -1; + goto error_unlock; + } + + dev_set_drvdata(&viodev->dev, NULL); + + new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); + if (!new_devdata) { + dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); + ret = -ENOMEM; + goto error_unlock; + } + + new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), + GFP_NOFS); + if (!new_devdata->counters) { + dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); + ret = -ENOMEM; + goto error_unlock; + } + + new_devdata->vdev = viodev; + new_devdata->dev = &viodev->dev; + nx842_OF_set_defaults(new_devdata); + + rcu_assign_pointer(devdata, new_devdata); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + kfree(old_devdata); + + of_reconfig_notifier_register(&nx842_of_nb); + + ret = nx842_OF_upd(NULL); + if (ret && ret != -ENODEV) { + dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); + ret = -1; + goto error; + } + + rcu_read_lock(); + dev_set_drvdata(&viodev->dev, rcu_dereference(devdata)); + rcu_read_unlock(); + + if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { + dev_err(&viodev->dev, "could not create sysfs device attributes\n"); + ret = -1; + goto error; + } + + return 0; + +error_unlock: + spin_unlock_irqrestore(&devdata_mutex, flags); + if (new_devdata) + kfree(new_devdata->counters); + kfree(new_devdata); +error: + return ret; +} + +static int __exit nx842_remove(struct vio_dev *viodev) +{ + struct nx842_devdata *old_devdata; + unsigned long flags; + + pr_info("Removing IBM Power 842 compression device\n"); + sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); + + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + of_reconfig_notifier_unregister(&nx842_of_nb); + RCU_INIT_POINTER(devdata, NULL); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + dev_set_drvdata(&viodev->dev, NULL); + if (old_devdata) + kfree(old_devdata->counters); + kfree(old_devdata); + + return 0; +} + +static struct vio_device_id nx842_vio_driver_ids[] = { + {"ibm,compression-v1", "ibm,compression"}, + {"", ""}, +}; + +static struct vio_driver nx842_vio_driver = { + .name = KBUILD_MODNAME, + .probe = nx842_probe, + .remove = __exit_p(nx842_remove), + .get_desired_dma = nx842_get_desired_dma, + .id_table = nx842_vio_driver_ids, +}; + +static int __init nx842_init(void) +{ + struct nx842_devdata *new_devdata; + int ret; + + pr_info("Registering IBM Power 842 compression driver\n"); + + if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) + return -ENODEV; + + RCU_INIT_POINTER(devdata, NULL); + new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); + if (!new_devdata) { + pr_err("Could not allocate memory for device data\n"); + return -ENOMEM; + } + new_devdata->status = UNAVAILABLE; + RCU_INIT_POINTER(devdata, new_devdata); + + ret = vio_register_driver(&nx842_vio_driver); + if (ret) { + pr_err("Could not register VIO driver %d\n", ret); + + kfree(new_devdata); + return ret; + } + + if (!nx842_platform_driver_set(&nx842_pseries_driver)) { + vio_unregister_driver(&nx842_vio_driver); + kfree(new_devdata); + return -EEXIST; + } + + return 0; +} + +module_init(nx842_init); + +static void __exit nx842_exit(void) +{ + struct nx842_devdata *old_devdata; + unsigned long flags; + + pr_info("Exiting IBM Power 842 compression driver\n"); + nx842_platform_driver_unset(&nx842_pseries_driver); + spin_lock_irqsave(&devdata_mutex, flags); + old_devdata = rcu_dereference_check(devdata, + lockdep_is_held(&devdata_mutex)); + RCU_INIT_POINTER(devdata, NULL); + spin_unlock_irqrestore(&devdata_mutex, flags); + synchronize_rcu(); + if (old_devdata && old_devdata->dev) + dev_set_drvdata(old_devdata->dev, NULL); + kfree(old_devdata); + vio_unregister_driver(&nx842_vio_driver); +} + +module_exit(nx842_exit); + diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 887196e9b50c..6e5e0d60d0c8 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -1,5 +1,10 @@ /* - * Driver for IBM Power 842 compression accelerator + * Driver frontend for IBM Power 842 compression accelerator + * + * Copyright (C) 2015 Dan Streetman, IBM Corp + * + * Designer of the Power data compression engine: + * Bulent Abali <abali@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -10,1594 +15,89 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * - * Copyright (C) IBM Corporation, 2012 - * - * Authors: Robert Jennings <rcj@linux.vnet.ibm.com> - * Seth Jennings <sjenning@linux.vnet.ibm.com> */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/nx842.h> -#include <linux/of.h> -#include <linux/slab.h> - -#include <asm/page.h> -#include <asm/vio.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include "nx_csbcpb.h" /* struct nx_csbcpb */ +#include "nx-842.h" -#define MODULE_NAME "nx-compress" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>"); +MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); -#define SHIFT_4K 12 -#define SHIFT_64K 16 -#define SIZE_4K (1UL << SHIFT_4K) -#define SIZE_64K (1UL << SHIFT_64K) - -/* IO buffer must be 128 byte aligned */ -#define IO_BUFFER_ALIGN 128 - -struct nx842_header { - int blocks_nr; /* number of compressed blocks */ - int offset; /* offset of the first block (from beginning of header) */ - int sizes[0]; /* size of compressed blocks */ -}; - -static inline int nx842_header_size(const struct nx842_header *hdr) -{ - return sizeof(struct nx842_header) + - hdr->blocks_nr * sizeof(hdr->sizes[0]); -} - -/* Macros for fields within nx_csbcpb */ -/* Check the valid bit within the csbcpb valid field */ -#define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) - -/* CE macros operate on the completion_extension field bits in the csbcpb. - * CE0 0=full completion, 1=partial completion - * CE1 0=CE0 indicates completion, 1=termination (output may be modified) - * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ -#define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) -#define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) -#define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) - -/* The NX unit accepts data only on 4K page boundaries */ -#define NX842_HW_PAGE_SHIFT SHIFT_4K -#define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT) -#define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) - -enum nx842_status { - UNAVAILABLE, - AVAILABLE -}; - -struct ibm_nx842_counters { - atomic64_t comp_complete; - atomic64_t comp_failed; - atomic64_t decomp_complete; - atomic64_t decomp_failed; - atomic64_t swdecomp; - atomic64_t comp_times[32]; - atomic64_t decomp_times[32]; -}; - -static struct nx842_devdata { - struct vio_dev *vdev; - struct device *dev; - struct ibm_nx842_counters *counters; - unsigned int max_sg_len; - unsigned int max_sync_size; - unsigned int max_sync_sg; - enum nx842_status status; -} __rcu *devdata; -static DEFINE_SPINLOCK(devdata_mutex); - -#define NX842_COUNTER_INC(_x) \ -static inline void nx842_inc_##_x( \ - const struct nx842_devdata *dev) { \ - if (dev) \ - atomic64_inc(&dev->counters->_x); \ -} -NX842_COUNTER_INC(comp_complete); -NX842_COUNTER_INC(comp_failed); -NX842_COUNTER_INC(decomp_complete); -NX842_COUNTER_INC(decomp_failed); -NX842_COUNTER_INC(swdecomp); - -#define NX842_HIST_SLOTS 16 - -static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) -{ - int bucket = fls(time); - - if (bucket) - bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); - - atomic64_inc(×[bucket]); -} - -/* NX unit operation flags */ -#define NX842_OP_COMPRESS 0x0 -#define NX842_OP_CRC 0x1 -#define NX842_OP_DECOMPRESS 0x2 -#define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) -#define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) -#define NX842_OP_ASYNC (1<<23) -#define NX842_OP_NOTIFY (1<<22) -#define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) - -static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) -{ - /* No use of DMA mappings within the driver. */ - return 0; -} - -struct nx842_slentry { - unsigned long ptr; /* Real address (use __pa()) */ - unsigned long len; -}; - -/* pHyp scatterlist entry */ -struct nx842_scatterlist { - int entry_nr; /* number of slentries */ - struct nx842_slentry *entries; /* ptr to array of slentries */ -}; - -/* Does not include sizeof(entry_nr) in the size */ -static inline unsigned long nx842_get_scatterlist_size( - struct nx842_scatterlist *sl) -{ - return sl->entry_nr * sizeof(struct nx842_slentry); -} - -static inline unsigned long nx842_get_pa(void *addr) -{ - if (is_vmalloc_addr(addr)) - return page_to_phys(vmalloc_to_page(addr)) - + offset_in_page(addr); - else - return __pa(addr); -} - -static int nx842_build_scatterlist(unsigned long buf, int len, - struct nx842_scatterlist *sl) -{ - unsigned long nextpage; - struct nx842_slentry *entry; - - sl->entry_nr = 0; - - entry = sl->entries; - while (len) { - entry->ptr = nx842_get_pa((void *)buf); - nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); - if (nextpage < buf + len) { - /* we aren't at the end yet */ - if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE)) - /* we are in the middle (or beginning) */ - entry->len = NX842_HW_PAGE_SIZE; - else - /* we are at the beginning */ - entry->len = nextpage - buf; - } else { - /* at the end */ - entry->len = len; - } - - len -= entry->len; - buf += entry->len; - sl->entry_nr++; - entry++; - } - - return 0; -} - -/* - * Working memory for software decompression - */ -struct sw842_fifo { - union { - char f8[256][8]; - char f4[512][4]; - }; - char f2[256][2]; - unsigned char f84_full; - unsigned char f2_full; - unsigned char f8_count; - unsigned char f2_count; - unsigned int f4_count; -}; - -/* - * Working memory for crypto API +/** + * nx842_constraints + * + * This provides the driver's constraints. Different nx842 implementations + * may have varying requirements. The constraints are: + * @alignment: All buffers should be aligned to this + * @multiple: All buffer lengths should be a multiple of this + * @minimum: Buffer lengths must not be less than this amount + * @maximum: Buffer lengths must not be more than this amount + * + * The constraints apply to all buffers and lengths, both input and output, + * for both compression and decompression, except for the minimum which + * only applies to compression input and decompression output; the + * compressed data can be less than the minimum constraint. It can be + * assumed that compressed data will always adhere to the multiple + * constraint. + * + * The driver may succeed even if these constraints are violated; + * however the driver can return failure or suffer reduced performance + * if any constraint is not met. */ -struct nx842_workmem { - char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */ - union { - /* hardware working memory */ - struct { - /* scatterlist */ - char slin[SIZE_4K]; - char slout[SIZE_4K]; - /* coprocessor status/parameter block */ - struct nx_csbcpb csbcpb; - }; - /* software working memory */ - struct sw842_fifo swfifo; /* software decompression fifo */ - }; -}; - -int nx842_get_workmem_size(void) -{ - return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE; -} -EXPORT_SYMBOL_GPL(nx842_get_workmem_size); - -int nx842_get_workmem_size_aligned(void) -{ - return sizeof(struct nx842_workmem); -} -EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned); - -static int nx842_validate_result(struct device *dev, - struct cop_status_block *csb) +int nx842_constraints(struct nx842_constraints *c) { - /* The csb must be valid after returning from vio_h_cop_sync */ - if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { - dev_err(dev, "%s: cspcbp not valid upon completion.\n", - __func__); - dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", - csb->valid, - csb->crb_seq_number, - csb->completion_code, - csb->completion_extension); - dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", - csb->processed_byte_count, - (unsigned long)csb->address); - return -EIO; - } - - /* Check return values from the hardware in the CSB */ - switch (csb->completion_code) { - case 0: /* Completed without error */ - break; - case 64: /* Target bytes > Source bytes during compression */ - case 13: /* Output buffer too small */ - dev_dbg(dev, "%s: Compression output larger than input\n", - __func__); - return -ENOSPC; - case 66: /* Input data contains an illegal template field */ - case 67: /* Template indicates data past the end of the input stream */ - dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", - __func__, csb->completion_code); - return -EINVAL; - default: - dev_dbg(dev, "%s: Unspecified error (code:%d)\n", - __func__, csb->completion_code); - return -EIO; - } - - /* Hardware sanity check */ - if (!NX842_CSBCPB_CE2(csb->completion_extension)) { - dev_err(dev, "%s: No error returned by hardware, but " - "data returned is unusable, contact support.\n" - "(Additional info: csbcbp->processed bytes " - "does not specify processed bytes for the " - "target buffer.)\n", __func__); - return -EIO; - } - + memcpy(c, nx842_platform_driver()->constraints, sizeof(*c)); return 0; } +EXPORT_SYMBOL_GPL(nx842_constraints); /** - * nx842_compress - Compress data using the 842 algorithm - * - * Compression provide by the NX842 coprocessor on IBM Power systems. - * The input buffer is compressed and the result is stored in the - * provided output buffer. - * - * Upon return from this function @outlen contains the length of the - * compressed data. If there is an error then @outlen will be 0 and an - * error will be specified by the return code from this function. - * - * @in: Pointer to input buffer, must be page aligned - * @inlen: Length of input buffer, must be PAGE_SIZE - * @out: Pointer to output buffer - * @outlen: Length of output buffer - * @wrkmem: ptr to buffer for working memory, size determined by - * nx842_get_workmem_size() + * nx842_workmem_size * - * Returns: - * 0 Success, output of length @outlen stored in the buffer at @out - * -ENOMEM Unable to allocate internal buffers - * -ENOSPC Output buffer is to small - * -EMSGSIZE XXX Difficult to describe this limitation - * -EIO Internal error - * -ENODEV Hardware unavailable + * Get the amount of working memory the driver requires. */ -int nx842_compress(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlen, void *wmem) +size_t nx842_workmem_size(void) { - struct nx842_header *hdr; - struct nx842_devdata *local_devdata; - struct device *dev = NULL; - struct nx842_workmem *workmem; - struct nx842_scatterlist slin, slout; - struct nx_csbcpb *csbcpb; - int ret = 0, max_sync_size, i, bytesleft, size, hdrsize; - unsigned long inbuf, outbuf, padding; - struct vio_pfo_op op = { - .done = NULL, - .handle = 0, - .timeout = 0, - }; - unsigned long start_time = get_tb(); - - /* - * Make sure input buffer is 64k page aligned. This is assumed since - * this driver is designed for page compression only (for now). This - * is very nice since we can now use direct DDE(s) for the input and - * the alignment is guaranteed. - */ - inbuf = (unsigned long)in; - if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE) - return -EINVAL; - - rcu_read_lock(); - local_devdata = rcu_dereference(devdata); - if (!local_devdata || !local_devdata->dev) { - rcu_read_unlock(); - return -ENODEV; - } - max_sync_size = local_devdata->max_sync_size; - dev = local_devdata->dev; - - /* Create the header */ - hdr = (struct nx842_header *)out; - hdr->blocks_nr = PAGE_SIZE / max_sync_size; - hdrsize = nx842_header_size(hdr); - outbuf = (unsigned long)out + hdrsize; - bytesleft = *outlen - hdrsize; - - /* Init scatterlist */ - workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, - NX842_HW_PAGE_SIZE); - slin.entries = (struct nx842_slentry *)workmem->slin; - slout.entries = (struct nx842_slentry *)workmem->slout; - - /* Init operation */ - op.flags = NX842_OP_COMPRESS; - csbcpb = &workmem->csbcpb; - memset(csbcpb, 0, sizeof(*csbcpb)); - op.csbcpb = nx842_get_pa(csbcpb); - op.out = nx842_get_pa(slout.entries); - - for (i = 0; i < hdr->blocks_nr; i++) { - /* - * Aligning the output blocks to 128 bytes does waste space, - * but it prevents the need for bounce buffers and memory - * copies. It also simplifies the code a lot. In the worst - * case (64k page, 4k max_sync_size), you lose up to - * (128*16)/64k = ~3% the compression factor. For 64k - * max_sync_size, the loss would be at most 128/64k = ~0.2%. - */ - padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf; - outbuf += padding; - bytesleft -= padding; - if (i == 0) - /* save offset into first block in header */ - hdr->offset = padding + hdrsize; - - if (bytesleft <= 0) { - ret = -ENOSPC; - goto unlock; - } - - /* - * NOTE: If the default max_sync_size is changed from 4k - * to 64k, remove the "likely" case below, since a - * scatterlist will always be needed. - */ - if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { - /* Create direct DDE */ - op.in = nx842_get_pa((void *)inbuf); - op.inlen = max_sync_size; - - } else { - /* Create indirect DDE (scatterlist) */ - nx842_build_scatterlist(inbuf, max_sync_size, &slin); - op.in = nx842_get_pa(slin.entries); - op.inlen = -nx842_get_scatterlist_size(&slin); - } - - /* - * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect - * DDE is required for the outbuf. - * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must - * also be page aligned (1 in 128/4k=32 chance) in order - * to use a direct DDE. - * This is unlikely, just use an indirect DDE always. - */ - nx842_build_scatterlist(outbuf, - min(bytesleft, max_sync_size), &slout); - /* op.out set before loop */ - op.outlen = -nx842_get_scatterlist_size(&slout); - - /* Send request to pHyp */ - ret = vio_h_cop_sync(local_devdata->vdev, &op); - - /* Check for pHyp error */ - if (ret) { - dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", - __func__, ret, op.hcall_err); - ret = -EIO; - goto unlock; - } - - /* Check for hardware error */ - ret = nx842_validate_result(dev, &csbcpb->csb); - if (ret && ret != -ENOSPC) - goto unlock; - - /* Handle incompressible data */ - if (unlikely(ret == -ENOSPC)) { - if (bytesleft < max_sync_size) { - /* - * Not enough space left in the output buffer - * to store uncompressed block - */ - goto unlock; - } else { - /* Store incompressible block */ - memcpy((void *)outbuf, (void *)inbuf, - max_sync_size); - hdr->sizes[i] = -max_sync_size; - outbuf += max_sync_size; - bytesleft -= max_sync_size; - /* Reset ret, incompressible data handled */ - ret = 0; - } - } else { - /* Normal case, compression was successful */ - size = csbcpb->csb.processed_byte_count; - dev_dbg(dev, "%s: processed_bytes=%d\n", - __func__, size); - hdr->sizes[i] = size; - outbuf += size; - bytesleft -= size; - } - - inbuf += max_sync_size; - } - - *outlen = (unsigned int)(outbuf - (unsigned long)out); - -unlock: - if (ret) - nx842_inc_comp_failed(local_devdata); - else { - nx842_inc_comp_complete(local_devdata); - ibm_nx842_incr_hist(local_devdata->counters->comp_times, - (get_tb() - start_time) / tb_ticks_per_usec); - } - rcu_read_unlock(); - return ret; + return nx842_platform_driver()->workmem_size; } -EXPORT_SYMBOL_GPL(nx842_compress); - -static int sw842_decompress(const unsigned char *, int, unsigned char *, int *, - const void *); +EXPORT_SYMBOL_GPL(nx842_workmem_size); -/** - * nx842_decompress - Decompress data using the 842 algorithm - * - * Decompression provide by the NX842 coprocessor on IBM Power systems. - * The input buffer is decompressed and the result is stored in the - * provided output buffer. The size allocated to the output buffer is - * provided by the caller of this function in @outlen. Upon return from - * this function @outlen contains the length of the decompressed data. - * If there is an error then @outlen will be 0 and an error will be - * specified by the return code from this function. - * - * @in: Pointer to input buffer, will use bounce buffer if not 128 byte - * aligned - * @inlen: Length of input buffer - * @out: Pointer to output buffer, must be page aligned - * @outlen: Length of output buffer, must be PAGE_SIZE - * @wrkmem: ptr to buffer for working memory, size determined by - * nx842_get_workmem_size() - * - * Returns: - * 0 Success, output of length @outlen stored in the buffer at @out - * -ENODEV Hardware decompression device is unavailable - * -ENOMEM Unable to allocate internal buffers - * -ENOSPC Output buffer is to small - * -EINVAL Bad input data encountered when attempting decompress - * -EIO Internal error - */ -int nx842_decompress(const unsigned char *in, unsigned int inlen, - unsigned char *out, unsigned int *outlen, void *wmem) +int nx842_compress(const unsigned char *in, unsigned int ilen, + unsigned char *out, unsigned int *olen, void *wmem) { - struct nx842_header *hdr; - struct nx842_devdata *local_devdata; - struct device *dev = NULL; - struct nx842_workmem *workmem; - struct nx842_scatterlist slin, slout; - struct nx_csbcpb *csbcpb; - int ret = 0, i, size, max_sync_size; - unsigned long inbuf, outbuf; - struct vio_pfo_op op = { - .done = NULL, - .handle = 0, - .timeout = 0, - }; - unsigned long start_time = get_tb(); - - /* Ensure page alignment and size */ - outbuf = (unsigned long)out; - if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE) - return -EINVAL; - - rcu_read_lock(); - local_devdata = rcu_dereference(devdata); - if (local_devdata) - dev = local_devdata->dev; - - /* Get header */ - hdr = (struct nx842_header *)in; - - workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem, - NX842_HW_PAGE_SIZE); - - inbuf = (unsigned long)in + hdr->offset; - if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) { - /* Copy block(s) into bounce buffer for alignment */ - memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset); - inbuf = (unsigned long)workmem->bounce; - } - - /* Init scatterlist */ - slin.entries = (struct nx842_slentry *)workmem->slin; - slout.entries = (struct nx842_slentry *)workmem->slout; - - /* Init operation */ - op.flags = NX842_OP_DECOMPRESS; - csbcpb = &workmem->csbcpb; - memset(csbcpb, 0, sizeof(*csbcpb)); - op.csbcpb = nx842_get_pa(csbcpb); - - /* - * max_sync_size may have changed since compression, - * so we can't read it from the device info. We need - * to derive it from hdr->blocks_nr. - */ - max_sync_size = PAGE_SIZE / hdr->blocks_nr; - - for (i = 0; i < hdr->blocks_nr; i++) { - /* Skip padding */ - inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN); - - if (hdr->sizes[i] < 0) { - /* Negative sizes indicate uncompressed data blocks */ - size = abs(hdr->sizes[i]); - memcpy((void *)outbuf, (void *)inbuf, size); - outbuf += size; - inbuf += size; - continue; - } - - if (!dev) - goto sw; - - /* - * The better the compression, the more likely the "likely" - * case becomes. - */ - if (likely((inbuf & NX842_HW_PAGE_MASK) == - ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { - /* Create direct DDE */ - op.in = nx842_get_pa((void *)inbuf); - op.inlen = hdr->sizes[i]; - } else { - /* Create indirect DDE (scatterlist) */ - nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); - op.in = nx842_get_pa(slin.entries); - op.inlen = -nx842_get_scatterlist_size(&slin); - } - - /* - * NOTE: If the default max_sync_size is changed from 4k - * to 64k, remove the "likely" case below, since a - * scatterlist will always be needed. - */ - if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { - /* Create direct DDE */ - op.out = nx842_get_pa((void *)outbuf); - op.outlen = max_sync_size; - } else { - /* Create indirect DDE (scatterlist) */ - nx842_build_scatterlist(outbuf, max_sync_size, &slout); - op.out = nx842_get_pa(slout.entries); - op.outlen = -nx842_get_scatterlist_size(&slout); - } - - /* Send request to pHyp */ - ret = vio_h_cop_sync(local_devdata->vdev, &op); - - /* Check for pHyp error */ - if (ret) { - dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", - __func__, ret, op.hcall_err); - dev = NULL; - goto sw; - } - - /* Check for hardware error */ - ret = nx842_validate_result(dev, &csbcpb->csb); - if (ret) { - dev = NULL; - goto sw; - } - - /* HW decompression success */ - inbuf += hdr->sizes[i]; - outbuf += csbcpb->csb.processed_byte_count; - continue; - -sw: - /* software decompression */ - size = max_sync_size; - ret = sw842_decompress( - (unsigned char *)inbuf, hdr->sizes[i], - (unsigned char *)outbuf, &size, wmem); - if (ret) - pr_debug("%s: sw842_decompress failed with %d\n", - __func__, ret); - - if (ret) { - if (ret != -ENOSPC && ret != -EINVAL && - ret != -EMSGSIZE) - ret = -EIO; - goto unlock; - } - - /* SW decompression success */ - inbuf += hdr->sizes[i]; - outbuf += size; - } - - *outlen = (unsigned int)(outbuf - (unsigned long)out); - -unlock: - if (ret) - /* decompress fail */ - nx842_inc_decomp_failed(local_devdata); - else { - if (!dev) - /* software decompress */ - nx842_inc_swdecomp(local_devdata); - nx842_inc_decomp_complete(local_devdata); - ibm_nx842_incr_hist(local_devdata->counters->decomp_times, - (get_tb() - start_time) / tb_ticks_per_usec); - } - - rcu_read_unlock(); - return ret; + return nx842_platform_driver()->compress(in, ilen, out, olen, wmem); } -EXPORT_SYMBOL_GPL(nx842_decompress); +EXPORT_SYMBOL_GPL(nx842_compress); -/** - * nx842_OF_set_defaults -- Set default (disabled) values for devdata - * - * @devdata - struct nx842_devdata to update - * - * Returns: - * 0 on success - * -ENOENT if @devdata ptr is NULL - */ -static int nx842_OF_set_defaults(struct nx842_devdata *devdata) +int nx842_decompress(const unsigned char *in, unsigned int ilen, + unsigned char *out, unsigned int *olen, void *wmem) { - if (devdata) { - devdata->max_sync_size = 0; - devdata->max_sync_sg = 0; - devdata->max_sg_len = 0; - devdata->status = UNAVAILABLE; - return 0; - } else - return -ENOENT; -} - -/** - * nx842_OF_upd_status -- Update the device info from OF status prop - * - * The status property indicates if the accelerator is enabled. If the - * device is in the OF tree it indicates that the hardware is present. - * The status field indicates if the device is enabled when the status - * is 'okay'. Otherwise the device driver will be disabled. - * - * @devdata - struct nx842_devdata to update - * @prop - struct property point containing the maxsyncop for the update - * - * Returns: - * 0 - Device is available - * -EINVAL - Device is not available - */ -static int nx842_OF_upd_status(struct nx842_devdata *devdata, - struct property *prop) { - int ret = 0; - const char *status = (const char *)prop->value; - - if (!strncmp(status, "okay", (size_t)prop->length)) { - devdata->status = AVAILABLE; - } else { - dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n", - __func__, status); - devdata->status = UNAVAILABLE; - } - - return ret; -} - -/** - * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop - * - * Definition of the 'ibm,max-sg-len' OF property: - * This field indicates the maximum byte length of a scatter list - * for the platform facility. It is a single cell encoded as with encode-int. - * - * Example: - * # od -x ibm,max-sg-len - * 0000000 0000 0ff0 - * - * In this example, the maximum byte length of a scatter list is - * 0x0ff0 (4,080). - * - * @devdata - struct nx842_devdata to update - * @prop - struct property point containing the maxsyncop for the update - * - * Returns: - * 0 on success - * -EINVAL on failure - */ -static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, - struct property *prop) { - int ret = 0; - const int *maxsglen = prop->value; - - if (prop->length != sizeof(*maxsglen)) { - dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); - dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, - prop->length, sizeof(*maxsglen)); - ret = -EINVAL; - } else { - devdata->max_sg_len = (unsigned int)min(*maxsglen, - (int)NX842_HW_PAGE_SIZE); - } - - return ret; -} - -/** - * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop - * - * Definition of the 'ibm,max-sync-cop' OF property: - * Two series of cells. The first series of cells represents the maximums - * that can be synchronously compressed. The second series of cells - * represents the maximums that can be synchronously decompressed. - * 1. The first cell in each series contains the count of the number of - * data length, scatter list elements pairs that follow – each being - * of the form - * a. One cell data byte length - * b. One cell total number of scatter list elements - * - * Example: - * # od -x ibm,max-sync-cop - * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 - * 0000020 0000 1000 0000 01fe - * - * In this example, compression supports 0x1000 (4,096) data byte length - * and 0x1fe (510) total scatter list elements. Decompression supports - * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list - * elements. - * - * @devdata - struct nx842_devdata to update - * @prop - struct property point containing the maxsyncop for the update - * - * Returns: - * 0 on success - * -EINVAL on failure - */ -static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, - struct property *prop) { - int ret = 0; - const struct maxsynccop_t { - int comp_elements; - int comp_data_limit; - int comp_sg_limit; - int decomp_elements; - int decomp_data_limit; - int decomp_sg_limit; - } *maxsynccop; - - if (prop->length != sizeof(*maxsynccop)) { - dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); - dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, - sizeof(*maxsynccop)); - ret = -EINVAL; - goto out; - } - - maxsynccop = (const struct maxsynccop_t *)prop->value; - - /* Use one limit rather than separate limits for compression and - * decompression. Set a maximum for this so as not to exceed the - * size that the header can support and round the value down to - * the hardware page size (4K) */ - devdata->max_sync_size = - (unsigned int)min(maxsynccop->comp_data_limit, - maxsynccop->decomp_data_limit); - - devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, - SIZE_64K); - - if (devdata->max_sync_size < SIZE_4K) { - dev_err(devdata->dev, "%s: hardware max data size (%u) is " - "less than the driver minimum, unable to use " - "the hardware device\n", - __func__, devdata->max_sync_size); - ret = -EINVAL; - goto out; - } - - devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit, - maxsynccop->decomp_sg_limit); - if (devdata->max_sync_sg < 1) { - dev_err(devdata->dev, "%s: hardware max sg size (%u) is " - "less than the driver minimum, unable to use " - "the hardware device\n", - __func__, devdata->max_sync_sg); - ret = -EINVAL; - goto out; - } - -out: - return ret; + return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem); } +EXPORT_SYMBOL_GPL(nx842_decompress); -/** - * - * nx842_OF_upd -- Handle OF properties updates for the device. - * - * Set all properties from the OF tree. Optionally, a new property - * can be provided by the @new_prop pointer to overwrite an existing value. - * The device will remain disabled until all values are valid, this function - * will return an error for updates unless all values are valid. - * - * @new_prop: If not NULL, this property is being updated. If NULL, update - * all properties from the current values in the OF tree. - * - * Returns: - * 0 - Success - * -ENOMEM - Could not allocate memory for new devdata structure - * -EINVAL - property value not found, new_prop is not a recognized - * property for the device or property value is not valid. - * -ENODEV - Device is not available - */ -static int nx842_OF_upd(struct property *new_prop) +static __init int nx842_init(void) { - struct nx842_devdata *old_devdata = NULL; - struct nx842_devdata *new_devdata = NULL; - struct device_node *of_node = NULL; - struct property *status = NULL; - struct property *maxsglen = NULL; - struct property *maxsyncop = NULL; - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&devdata_mutex, flags); - old_devdata = rcu_dereference_check(devdata, - lockdep_is_held(&devdata_mutex)); - if (old_devdata) - of_node = old_devdata->dev->of_node; + request_module("nx-compress-powernv"); + request_module("nx-compress-pseries"); - if (!old_devdata || !of_node) { - pr_err("%s: device is not available\n", __func__); - spin_unlock_irqrestore(&devdata_mutex, flags); - return -ENODEV; - } - - new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); - if (!new_devdata) { - dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__); - ret = -ENOMEM; - goto error_out; - } - - memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); - new_devdata->counters = old_devdata->counters; - - /* Set ptrs for existing properties */ - status = of_find_property(of_node, "status", NULL); - maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); - maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); - if (!status || !maxsglen || !maxsyncop) { - dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); - ret = -EINVAL; - goto error_out; - } - - /* - * If this is a property update, there are only certain properties that - * we care about. Bail if it isn't in the below list + /* we prevent loading if there's no platform driver, and we get the + * module that set it so it won't unload, so we don't need to check + * if it's set in any of the above functions */ - if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || - strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || - strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) - goto out; - - /* Perform property updates */ - ret = nx842_OF_upd_status(new_devdata, status); - if (ret) - goto error_out; - - ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); - if (ret) - goto error_out; - - ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); - if (ret) - goto error_out; - -out: - dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", - __func__, new_devdata->max_sync_size, - old_devdata->max_sync_size); - dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", - __func__, new_devdata->max_sync_sg, - old_devdata->max_sync_sg); - dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", - __func__, new_devdata->max_sg_len, - old_devdata->max_sg_len); - - rcu_assign_pointer(devdata, new_devdata); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - dev_set_drvdata(new_devdata->dev, new_devdata); - kfree(old_devdata); - return 0; - -error_out: - if (new_devdata) { - dev_info(old_devdata->dev, "%s: device disabled\n", __func__); - nx842_OF_set_defaults(new_devdata); - rcu_assign_pointer(devdata, new_devdata); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - dev_set_drvdata(new_devdata->dev, new_devdata); - kfree(old_devdata); - } else { - dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); - spin_unlock_irqrestore(&devdata_mutex, flags); - } - - if (!ret) - ret = -EINVAL; - return ret; -} - -/** - * nx842_OF_notifier - Process updates to OF properties for the device - * - * @np: notifier block - * @action: notifier action - * @update: struct pSeries_reconfig_prop_update pointer if action is - * PSERIES_UPDATE_PROPERTY - * - * Returns: - * NOTIFY_OK on success - * NOTIFY_BAD encoded with error number on failure, use - * notifier_to_errno() to decode this value - */ -static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, - void *data) -{ - struct of_reconfig_data *upd = data; - struct nx842_devdata *local_devdata; - struct device_node *node = NULL; - - rcu_read_lock(); - local_devdata = rcu_dereference(devdata); - if (local_devdata) - node = local_devdata->dev->of_node; - - if (local_devdata && - action == OF_RECONFIG_UPDATE_PROPERTY && - !strcmp(upd->dn->name, node->name)) { - rcu_read_unlock(); - nx842_OF_upd(upd->prop); - } else - rcu_read_unlock(); - - return NOTIFY_OK; -} - -static struct notifier_block nx842_of_nb = { - .notifier_call = nx842_OF_notifier, -}; - -#define nx842_counter_read(_name) \ -static ssize_t nx842_##_name##_show(struct device *dev, \ - struct device_attribute *attr, \ - char *buf) { \ - struct nx842_devdata *local_devdata; \ - int p = 0; \ - rcu_read_lock(); \ - local_devdata = rcu_dereference(devdata); \ - if (local_devdata) \ - p = snprintf(buf, PAGE_SIZE, "%ld\n", \ - atomic64_read(&local_devdata->counters->_name)); \ - rcu_read_unlock(); \ - return p; \ -} - -#define NX842DEV_COUNTER_ATTR_RO(_name) \ - nx842_counter_read(_name); \ - static struct device_attribute dev_attr_##_name = __ATTR(_name, \ - 0444, \ - nx842_##_name##_show,\ - NULL); - -NX842DEV_COUNTER_ATTR_RO(comp_complete); -NX842DEV_COUNTER_ATTR_RO(comp_failed); -NX842DEV_COUNTER_ATTR_RO(decomp_complete); -NX842DEV_COUNTER_ATTR_RO(decomp_failed); -NX842DEV_COUNTER_ATTR_RO(swdecomp); - -static ssize_t nx842_timehist_show(struct device *, - struct device_attribute *, char *); - -static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, - nx842_timehist_show, NULL); -static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, - 0444, nx842_timehist_show, NULL); - -static ssize_t nx842_timehist_show(struct device *dev, - struct device_attribute *attr, char *buf) { - char *p = buf; - struct nx842_devdata *local_devdata; - atomic64_t *times; - int bytes_remain = PAGE_SIZE; - int bytes; - int i; - - rcu_read_lock(); - local_devdata = rcu_dereference(devdata); - if (!local_devdata) { - rcu_read_unlock(); - return 0; - } - - if (attr == &dev_attr_comp_times) - times = local_devdata->counters->comp_times; - else if (attr == &dev_attr_decomp_times) - times = local_devdata->counters->decomp_times; - else { - rcu_read_unlock(); - return 0; - } - - for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { - bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n", - i ? (2<<(i-1)) : 0, (2<<i)-1, - atomic64_read(×[i])); - bytes_remain -= bytes; - p += bytes; - } - /* The last bucket holds everything over - * 2<<(NX842_HIST_SLOTS - 2) us */ - bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n", - 2<<(NX842_HIST_SLOTS - 2), - atomic64_read(×[(NX842_HIST_SLOTS - 1)])); - p += bytes; - - rcu_read_unlock(); - return p - buf; -} - -static struct attribute *nx842_sysfs_entries[] = { - &dev_attr_comp_complete.attr, - &dev_attr_comp_failed.attr, - &dev_attr_decomp_complete.attr, - &dev_attr_decomp_failed.attr, - &dev_attr_swdecomp.attr, - &dev_attr_comp_times.attr, - &dev_attr_decomp_times.attr, - NULL, -}; - -static struct attribute_group nx842_attribute_group = { - .name = NULL, /* put in device directory */ - .attrs = nx842_sysfs_entries, -}; - -static int __init nx842_probe(struct vio_dev *viodev, - const struct vio_device_id *id) -{ - struct nx842_devdata *old_devdata, *new_devdata = NULL; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&devdata_mutex, flags); - old_devdata = rcu_dereference_check(devdata, - lockdep_is_held(&devdata_mutex)); - - if (old_devdata && old_devdata->vdev != NULL) { - dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); - ret = -1; - goto error_unlock; - } - - dev_set_drvdata(&viodev->dev, NULL); - - new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); - if (!new_devdata) { - dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__); - ret = -ENOMEM; - goto error_unlock; - } - - new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), - GFP_NOFS); - if (!new_devdata->counters) { - dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__); - ret = -ENOMEM; - goto error_unlock; - } - - new_devdata->vdev = viodev; - new_devdata->dev = &viodev->dev; - nx842_OF_set_defaults(new_devdata); - - rcu_assign_pointer(devdata, new_devdata); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - kfree(old_devdata); - - of_reconfig_notifier_register(&nx842_of_nb); - - ret = nx842_OF_upd(NULL); - if (ret && ret != -ENODEV) { - dev_err(&viodev->dev, "could not parse device tree. %d\n", ret); - ret = -1; - goto error; - } - - rcu_read_lock(); - dev_set_drvdata(&viodev->dev, rcu_dereference(devdata)); - rcu_read_unlock(); - - if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { - dev_err(&viodev->dev, "could not create sysfs device attributes\n"); - ret = -1; - goto error; + if (!nx842_platform_driver_get()) { + pr_err("no nx842 driver found.\n"); + return -ENODEV; } return 0; - -error_unlock: - spin_unlock_irqrestore(&devdata_mutex, flags); - if (new_devdata) - kfree(new_devdata->counters); - kfree(new_devdata); -error: - return ret; -} - -static int __exit nx842_remove(struct vio_dev *viodev) -{ - struct nx842_devdata *old_devdata; - unsigned long flags; - - pr_info("Removing IBM Power 842 compression device\n"); - sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); - - spin_lock_irqsave(&devdata_mutex, flags); - old_devdata = rcu_dereference_check(devdata, - lockdep_is_held(&devdata_mutex)); - of_reconfig_notifier_unregister(&nx842_of_nb); - RCU_INIT_POINTER(devdata, NULL); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - dev_set_drvdata(&viodev->dev, NULL); - if (old_devdata) - kfree(old_devdata->counters); - kfree(old_devdata); - return 0; -} - -static struct vio_device_id nx842_driver_ids[] = { - {"ibm,compression-v1", "ibm,compression"}, - {"", ""}, -}; - -static struct vio_driver nx842_driver = { - .name = MODULE_NAME, - .probe = nx842_probe, - .remove = __exit_p(nx842_remove), - .get_desired_dma = nx842_get_desired_dma, - .id_table = nx842_driver_ids, -}; - -static int __init nx842_init(void) -{ - struct nx842_devdata *new_devdata; - pr_info("Registering IBM Power 842 compression driver\n"); - - RCU_INIT_POINTER(devdata, NULL); - new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); - if (!new_devdata) { - pr_err("Could not allocate memory for device data\n"); - return -ENOMEM; - } - new_devdata->status = UNAVAILABLE; - RCU_INIT_POINTER(devdata, new_devdata); - - return vio_register_driver(&nx842_driver); } - module_init(nx842_init); static void __exit nx842_exit(void) { - struct nx842_devdata *old_devdata; - unsigned long flags; - - pr_info("Exiting IBM Power 842 compression driver\n"); - spin_lock_irqsave(&devdata_mutex, flags); - old_devdata = rcu_dereference_check(devdata, - lockdep_is_held(&devdata_mutex)); - RCU_INIT_POINTER(devdata, NULL); - spin_unlock_irqrestore(&devdata_mutex, flags); - synchronize_rcu(); - if (old_devdata) - dev_set_drvdata(old_devdata->dev, NULL); - kfree(old_devdata); - vio_unregister_driver(&nx842_driver); + nx842_platform_driver_put(); } - module_exit(nx842_exit); - -/********************************* - * 842 software decompressor -*********************************/ -typedef int (*sw842_template_op)(const char **, int *, unsigned char **, - struct sw842_fifo *); - -static int sw842_data8(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_data4(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_data2(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_ptr8(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_ptr4(const char **, int *, unsigned char **, - struct sw842_fifo *); -static int sw842_ptr2(const char **, int *, unsigned char **, - struct sw842_fifo *); - -/* special templates */ -#define SW842_TMPL_REPEAT 0x1B -#define SW842_TMPL_ZEROS 0x1C -#define SW842_TMPL_EOF 0x1E - -static sw842_template_op sw842_tmpl_ops[26][4] = { - { sw842_data8, NULL}, /* 0 (00000) */ - { sw842_data4, sw842_data2, sw842_ptr2, NULL}, - { sw842_data4, sw842_ptr2, sw842_data2, NULL}, - { sw842_data4, sw842_ptr2, sw842_ptr2, NULL}, - { sw842_data4, sw842_ptr4, NULL}, - { sw842_data2, sw842_ptr2, sw842_data4, NULL}, - { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2}, - { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2}, - { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,}, - { sw842_data2, sw842_ptr2, sw842_ptr4, NULL}, - { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */ - { sw842_ptr2, sw842_data4, sw842_ptr2, NULL}, - { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2}, - { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2}, - { sw842_ptr2, sw842_data2, sw842_ptr4, NULL}, - { sw842_ptr2, sw842_ptr2, sw842_data4, NULL}, - { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2}, - { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2}, - { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2}, - { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL}, - { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */ - { sw842_ptr4, sw842_data2, sw842_ptr2, NULL}, - { sw842_ptr4, sw842_ptr2, sw842_data2, NULL}, - { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL}, - { sw842_ptr4, sw842_ptr4, NULL}, - { sw842_ptr8, NULL} -}; - -/* Software decompress helpers */ - -static uint8_t sw842_get_byte(const char *buf, int bit) -{ - uint8_t tmpl; - uint16_t tmp; - tmp = htons(*(uint16_t *)(buf)); - tmp = (uint16_t)(tmp << bit); - tmp = ntohs(tmp); - memcpy(&tmpl, &tmp, 1); - return tmpl; -} - -static uint8_t sw842_get_template(const char **buf, int *bit) -{ - uint8_t byte; - byte = sw842_get_byte(*buf, *bit); - byte = byte >> 3; - byte &= 0x1F; - *buf += (*bit + 5) / 8; - *bit = (*bit + 5) % 8; - return byte; -} - -/* repeat_count happens to be 5-bit too (like the template) */ -static uint8_t sw842_get_repeat_count(const char **buf, int *bit) -{ - uint8_t byte; - byte = sw842_get_byte(*buf, *bit); - byte = byte >> 2; - byte &= 0x3F; - *buf += (*bit + 6) / 8; - *bit = (*bit + 6) % 8; - return byte; -} - -static uint8_t sw842_get_ptr2(const char **buf, int *bit) -{ - uint8_t ptr; - ptr = sw842_get_byte(*buf, *bit); - (*buf)++; - return ptr; -} - -static uint16_t sw842_get_ptr4(const char **buf, int *bit, - struct sw842_fifo *fifo) -{ - uint16_t ptr; - ptr = htons(*(uint16_t *)(*buf)); - ptr = (uint16_t)(ptr << *bit); - ptr = ptr >> 7; - ptr &= 0x01FF; - *buf += (*bit + 9) / 8; - *bit = (*bit + 9) % 8; - return ptr; -} - -static uint8_t sw842_get_ptr8(const char **buf, int *bit, - struct sw842_fifo *fifo) -{ - return sw842_get_ptr2(buf, bit); -} - -/* Software decompress template ops */ - -static int sw842_data8(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - int ret; - - ret = sw842_data4(inbuf, inbit, outbuf, fifo); - if (ret) - return ret; - ret = sw842_data4(inbuf, inbit, outbuf, fifo); - return ret; -} - -static int sw842_data4(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - int ret; - - ret = sw842_data2(inbuf, inbit, outbuf, fifo); - if (ret) - return ret; - ret = sw842_data2(inbuf, inbit, outbuf, fifo); - return ret; -} - -static int sw842_data2(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - **outbuf = sw842_get_byte(*inbuf, *inbit); - (*inbuf)++; - (*outbuf)++; - **outbuf = sw842_get_byte(*inbuf, *inbit); - (*inbuf)++; - (*outbuf)++; - return 0; -} - -static int sw842_ptr8(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - uint8_t ptr; - ptr = sw842_get_ptr8(inbuf, inbit, fifo); - if (!fifo->f84_full && (ptr >= fifo->f8_count)) - return 1; - memcpy(*outbuf, fifo->f8[ptr], 8); - *outbuf += 8; - return 0; -} - -static int sw842_ptr4(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - uint16_t ptr; - ptr = sw842_get_ptr4(inbuf, inbit, fifo); - if (!fifo->f84_full && (ptr >= fifo->f4_count)) - return 1; - memcpy(*outbuf, fifo->f4[ptr], 4); - *outbuf += 4; - return 0; -} - -static int sw842_ptr2(const char **inbuf, int *inbit, - unsigned char **outbuf, struct sw842_fifo *fifo) -{ - uint8_t ptr; - ptr = sw842_get_ptr2(inbuf, inbit); - if (!fifo->f2_full && (ptr >= fifo->f2_count)) - return 1; - memcpy(*outbuf, fifo->f2[ptr], 2); - *outbuf += 2; - return 0; -} - -static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo) -{ - unsigned char initial_f2count = fifo->f2_count; - - memcpy(fifo->f8[fifo->f8_count], buf, 8); - fifo->f4_count += 2; - fifo->f8_count += 1; - - if (!fifo->f84_full && fifo->f4_count >= 512) { - fifo->f84_full = 1; - fifo->f4_count /= 512; - } - - memcpy(fifo->f2[fifo->f2_count++], buf, 2); - memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2); - memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2); - memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2); - if (fifo->f2_count < initial_f2count) - fifo->f2_full = 1; -} - -static int sw842_decompress(const unsigned char *src, int srclen, - unsigned char *dst, int *destlen, - const void *wrkmem) -{ - uint8_t tmpl; - const char *inbuf; - int inbit = 0; - unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf; - const char *inbuf_end; - sw842_template_op op; - int opindex; - int i, repeat_count; - struct sw842_fifo *fifo; - int ret = 0; - - fifo = &((struct nx842_workmem *)(wrkmem))->swfifo; - memset(fifo, 0, sizeof(*fifo)); - - origbuf = NULL; - inbuf = src; - inbuf_end = src + srclen; - outbuf = dst; - outbuf_end = dst + *destlen; - - while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) { - if (inbuf >= inbuf_end) { - ret = -EINVAL; - goto out; - } - - opindex = 0; - prevbuf = origbuf; - origbuf = outbuf; - switch (tmpl) { - case SW842_TMPL_REPEAT: - if (prevbuf == NULL) { - ret = -EINVAL; - goto out; - } - - repeat_count = sw842_get_repeat_count(&inbuf, - &inbit) + 1; - - /* Did the repeat count advance past the end of input */ - if (inbuf > inbuf_end) { - ret = -EINVAL; - goto out; - } - - for (i = 0; i < repeat_count; i++) { - /* Would this overflow the output buffer */ - if ((outbuf + 8) > outbuf_end) { - ret = -ENOSPC; - goto out; - } - - memcpy(outbuf, prevbuf, 8); - sw842_copy_to_fifo(outbuf, fifo); - outbuf += 8; - } - break; - - case SW842_TMPL_ZEROS: - /* Would this overflow the output buffer */ - if ((outbuf + 8) > outbuf_end) { - ret = -ENOSPC; - goto out; - } - - memset(outbuf, 0, 8); - sw842_copy_to_fifo(outbuf, fifo); - outbuf += 8; - break; - - default: - if (tmpl > 25) { - ret = -EINVAL; - goto out; - } - - /* Does this go past the end of the input buffer */ - if ((inbuf + 2) > inbuf_end) { - ret = -EINVAL; - goto out; - } - - /* Would this overflow the output buffer */ - if ((outbuf + 8) > outbuf_end) { - ret = -ENOSPC; - goto out; - } - - while (opindex < 4 && - (op = sw842_tmpl_ops[tmpl][opindex++]) - != NULL) { - ret = (*op)(&inbuf, &inbit, &outbuf, fifo); - if (ret) { - ret = -EINVAL; - goto out; - } - sw842_copy_to_fifo(origbuf, fifo); - } - } - } - -out: - if (!ret) - *destlen = (unsigned int)(outbuf - dst); - else - *destlen = 0; - - return ret; -} diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h new file mode 100644 index 000000000000..ac0ea79d0f8b --- /dev/null +++ b/drivers/crypto/nx/nx-842.h @@ -0,0 +1,144 @@ + +#ifndef __NX_842_H__ +#define __NX_842_H__ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sw842.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/ratelimit.h> + +/* Restrictions on Data Descriptor List (DDL) and Entry (DDE) buffers + * + * From NX P8 workbook, sec 4.9.1 "842 details" + * Each DDE buffer is 128 byte aligned + * Each DDE buffer size is a multiple of 32 bytes (except the last) + * The last DDE buffer size is a multiple of 8 bytes + */ +#define DDE_BUFFER_ALIGN (128) +#define DDE_BUFFER_SIZE_MULT (32) +#define DDE_BUFFER_LAST_MULT (8) + +/* Arbitrary DDL length limit + * Allows max buffer size of MAX-1 to MAX pages + * (depending on alignment) + */ +#define DDL_LEN_MAX (17) + +/* CCW 842 CI/FC masks + * NX P8 workbook, section 4.3.1, figure 4-6 + * "CI/FC Boundary by NX CT type" + */ +#define CCW_CI_842 (0x00003ff8) +#define CCW_FC_842 (0x00000007) + +/* CCW Function Codes (FC) for 842 + * NX P8 workbook, section 4.9, table 4-28 + * "Function Code Definitions for 842 Memory Compression" + */ +#define CCW_FC_842_COMP_NOCRC (0) +#define CCW_FC_842_COMP_CRC (1) +#define CCW_FC_842_DECOMP_NOCRC (2) +#define CCW_FC_842_DECOMP_CRC (3) +#define CCW_FC_842_MOVE (4) + +/* CSB CC Error Types for 842 + * NX P8 workbook, section 4.10.3, table 4-30 + * "Reported Error Types Summary Table" + */ +/* These are all duplicates of existing codes defined in icswx.h. */ +#define CSB_CC_TRANSLATION_DUP1 (80) +#define CSB_CC_TRANSLATION_DUP2 (82) +#define CSB_CC_TRANSLATION_DUP3 (84) +#define CSB_CC_TRANSLATION_DUP4 (86) +#define CSB_CC_TRANSLATION_DUP5 (92) +#define CSB_CC_TRANSLATION_DUP6 (94) +#define CSB_CC_PROTECTION_DUP1 (81) +#define CSB_CC_PROTECTION_DUP2 (83) +#define CSB_CC_PROTECTION_DUP3 (85) +#define CSB_CC_PROTECTION_DUP4 (87) +#define CSB_CC_PROTECTION_DUP5 (93) +#define CSB_CC_PROTECTION_DUP6 (95) +#define CSB_CC_RD_EXTERNAL_DUP1 (89) +#define CSB_CC_RD_EXTERNAL_DUP2 (90) +#define CSB_CC_RD_EXTERNAL_DUP3 (91) +/* These are specific to NX */ +/* 842 codes */ +#define CSB_CC_TPBC_GT_SPBC (64) /* no error, but >1 comp ratio */ +#define CSB_CC_CRC_MISMATCH (65) /* decomp crc mismatch */ +#define CSB_CC_TEMPL_INVALID (66) /* decomp invalid template value */ +#define CSB_CC_TEMPL_OVERFLOW (67) /* decomp template shows data after end */ +/* sym crypt codes */ +#define CSB_CC_DECRYPT_OVERFLOW (64) +/* asym crypt codes */ +#define CSB_CC_MINV_OVERFLOW (128) +/* These are reserved for hypervisor use */ +#define CSB_CC_HYP_RESERVE_START (240) +#define CSB_CC_HYP_RESERVE_END (253) +#define CSB_CC_HYP_NO_HW (254) +#define CSB_CC_HYP_HANG_ABORTED (255) + +/* CCB Completion Modes (CM) for 842 + * NX P8 workbook, section 4.3, figure 4-5 + * "CRB Details - Normal Cop_Req (CL=00, C=1)" + */ +#define CCB_CM_EXTRA_WRITE (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_STORE) +#define CCB_CM_INTERRUPT (CCB_CM0_ALL_COMPLETIONS & CCB_CM12_INTERRUPT) + +#define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1))) +#define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE) + +static inline unsigned long nx842_get_pa(void *addr) +{ + if (!is_vmalloc_addr(addr)) + return __pa(addr); + + return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); +} + +/* Get/Set bit fields */ +#define MASK_LSH(m) (__builtin_ffsl(m) - 1) +#define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m)) +#define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m))) + +struct nx842_constraints { + int alignment; + int multiple; + int minimum; + int maximum; +}; + +struct nx842_driver { + char *name; + struct module *owner; + size_t workmem_size; + + struct nx842_constraints *constraints; + + int (*compress)(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, + void *wrkmem); + int (*decompress)(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, + void *wrkmem); +}; + +struct nx842_driver *nx842_platform_driver(void); +bool nx842_platform_driver_set(struct nx842_driver *driver); +void nx842_platform_driver_unset(struct nx842_driver *driver); +bool nx842_platform_driver_get(void); +void nx842_platform_driver_put(void); + +size_t nx842_workmem_size(void); + +int nx842_constraints(struct nx842_constraints *constraints); + +int nx842_compress(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, void *wrkmem); +int nx842_decompress(const unsigned char *in, unsigned int in_len, + unsigned char *out, unsigned int *out_len, void *wrkmem); + +#endif /* __NX_842_H__ */ diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 88c562434bc0..08ac6d48688c 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -93,17 +93,6 @@ out: return rc; } -static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm, - unsigned int authsize) -{ - if (authsize > crypto_aead_alg(tfm)->maxauthsize) - return -EINVAL; - - crypto_aead_crt(tfm)->authsize = authsize; - - return 0; -} - static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { @@ -116,8 +105,6 @@ static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, return -EINVAL; } - crypto_aead_crt(tfm)->authsize = authsize; - return 0; } @@ -134,7 +121,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, unsigned int max_sg_len; if (nbytes <= AES_BLOCK_SIZE) { - scatterwalk_start(&walk, req->assoc); + scatterwalk_start(&walk, req->src); scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); return 0; @@ -159,7 +146,7 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, - req->assoc, processed, &to_process); + req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; @@ -225,7 +212,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc) NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, - req->assoc, processed, &to_process); + req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; @@ -377,7 +364,8 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; desc.tfm = (struct crypto_blkcipher *) req->base.tfm; rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, - req->src, &to_process, processed, + req->src, &to_process, + processed + req->assoclen, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) @@ -412,17 +400,19 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) mac: if (enc) { /* copy out the auth tag */ - scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, - req->dst, nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_TO_SG); + scatterwalk_map_and_copy( + csbcpb->cpb.aes_gcm.out_pat_or_mac, + req->dst, req->assoclen + nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_TO_SG); } else { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; - scatterwalk_map_and_copy(itag, req->src, nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_FROM_SG); + scatterwalk_map_and_copy( + itag, req->src, req->assoclen + nbytes, + crypto_aead_authsize(crypto_aead_reqtfm(req)), + SCATTERWALK_FROM_SG); rc = memcmp(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; @@ -481,45 +471,39 @@ static int gcm4106_aes_nx_decrypt(struct aead_request *req) * during encrypt/decrypt doesn't solve this problem, because it calls * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, * but instead uses this tfm->blocksize. */ -struct crypto_alg nx_gcm_aes_alg = { - .cra_name = "gcm(aes)", - .cra_driver_name = "gcm-aes-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_type = &crypto_aead_type, - .cra_module = THIS_MODULE, - .cra_init = nx_crypto_ctx_aes_gcm_init, - .cra_exit = nx_crypto_ctx_exit, - .cra_aead = { - .ivsize = AES_BLOCK_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - .setkey = gcm_aes_nx_set_key, - .setauthsize = gcm_aes_nx_setauthsize, - .encrypt = gcm_aes_nx_encrypt, - .decrypt = gcm_aes_nx_decrypt, - } +struct aead_alg nx_gcm_aes_alg = { + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "gcm-aes-nx", + .cra_priority = 300, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_module = THIS_MODULE, + }, + .init = nx_crypto_ctx_aes_gcm_init, + .exit = nx_crypto_ctx_aead_exit, + .ivsize = 12, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = gcm_aes_nx_set_key, + .encrypt = gcm_aes_nx_encrypt, + .decrypt = gcm_aes_nx_decrypt, }; -struct crypto_alg nx_gcm4106_aes_alg = { - .cra_name = "rfc4106(gcm(aes))", - .cra_driver_name = "rfc4106-gcm-aes-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_AEAD, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct nx_crypto_ctx), - .cra_type = &crypto_nivaead_type, - .cra_module = THIS_MODULE, - .cra_init = nx_crypto_ctx_aes_gcm_init, - .cra_exit = nx_crypto_ctx_exit, - .cra_aead = { - .ivsize = 8, - .maxauthsize = AES_BLOCK_SIZE, - .geniv = "seqiv", - .setkey = gcm4106_aes_nx_set_key, - .setauthsize = gcm4106_aes_nx_setauthsize, - .encrypt = gcm4106_aes_nx_encrypt, - .decrypt = gcm4106_aes_nx_decrypt, - } +struct aead_alg nx_gcm4106_aes_alg = { + .base = { + .cra_name = "rfc4106(gcm(aes))", + .cra_driver_name = "rfc4106-gcm-aes-nx", + .cra_priority = 300, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct nx_crypto_ctx), + .cra_module = THIS_MODULE, + }, + .init = nx_crypto_ctx_aes_gcm_init, + .exit = nx_crypto_ctx_aead_exit, + .ivsize = 8, + .maxauthsize = AES_BLOCK_SIZE, + .setkey = gcm4106_aes_nx_set_key, + .setauthsize = gcm4106_aes_nx_setauthsize, + .encrypt = gcm4106_aes_nx_encrypt, + .decrypt = gcm4106_aes_nx_decrypt, }; diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 23621da624c3..4e91bdb83c59 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -33,8 +33,9 @@ static int nx_sha256_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_sg *out_sg; int len; - int rc; + u32 max_sg_len; nx_ctx_init(nx_ctx, HCOP_FC_SHA); @@ -44,15 +45,18 @@ static int nx_sha256_init(struct shash_desc *desc) NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + len = SHA256_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - (u8 *) sctx->state, - NX_DS_SHA256); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); - if (rc) - goto out; + if (len != SHA256_DIGEST_SIZE) + return -EINVAL; sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[1] = __cpu_to_be32(SHA256_H1); @@ -64,7 +68,6 @@ static int nx_sha256_init(struct shash_desc *desc) sctx->state[7] = __cpu_to_be32(SHA256_H7); sctx->count = 0; -out: return 0; } @@ -74,10 +77,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg; u64 to_process = 0, leftover, total; unsigned long irq_flags; int rc = 0; int data_len; + u32 max_sg_len; u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); @@ -97,6 +102,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + do { /* * to_process: the SHA256_BLOCK_SIZE data chunk to process in @@ -108,25 +119,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, if (buf_len) { data_len = buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) sctx->buf, - NX_DS_SHA256); + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + &data_len, + max_sg_len); - if (rc || data_len != buf_len) + if (data_len != buf_len) { + rc = -EINVAL; goto out; + } } data_len = to_process - buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) data, - NX_DS_SHA256); + in_sg = nx_build_sg_list(in_sg, (u8 *) data, + &data_len, max_sg_len); - if (rc) - goto out; + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); to_process = (data_len + buf_len); leftover = total - to_process; @@ -173,12 +181,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; - int rc; + u32 max_sg_len; + int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count >= SHA256_BLOCK_SIZE) { @@ -195,25 +210,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); len = sctx->count & (SHA256_BLOCK_SIZE - 1); - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &len, - (u8 *) sctx->buf, - NX_DS_SHA256); + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, + &len, max_sg_len); - if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) + if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { + rc = -EINVAL; goto out; + } len = SHA256_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - out, - NX_DS_SHA256); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len); - if (rc || len != SHA256_DIGEST_SIZE) + if (len != SHA256_DIGEST_SIZE) { + rc = -EINVAL; goto out; + } + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; goto out; diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index b3adf1022673..e6a58d2ee628 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -32,8 +32,9 @@ static int nx_sha512_init(struct shash_desc *desc) { struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); + struct nx_sg *out_sg; int len; - int rc; + u32 max_sg_len; nx_ctx_init(nx_ctx, HCOP_FC_SHA); @@ -43,15 +44,18 @@ static int nx_sha512_init(struct shash_desc *desc) NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + len = SHA512_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - (u8 *)sctx->state, - NX_DS_SHA512); + out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, + &len, max_sg_len); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); - if (rc || len != SHA512_DIGEST_SIZE) - goto out; + if (len != SHA512_DIGEST_SIZE) + return -EINVAL; sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[1] = __cpu_to_be64(SHA512_H1); @@ -63,7 +67,6 @@ static int nx_sha512_init(struct shash_desc *desc) sctx->state[7] = __cpu_to_be64(SHA512_H7); sctx->count[0] = 0; -out: return 0; } @@ -73,10 +76,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg; u64 to_process, leftover = 0, total; unsigned long irq_flags; int rc = 0; int data_len; + u32 max_sg_len; u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); @@ -96,6 +101,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; + in_sg = nx_ctx->in_sg; + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + do { /* * to_process: the SHA512_BLOCK_SIZE data chunk to process in @@ -108,25 +119,26 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, if (buf_len) { data_len = buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) sctx->buf, - NX_DS_SHA512); + in_sg = nx_build_sg_list(nx_ctx->in_sg, + (u8 *) sctx->buf, + &data_len, max_sg_len); - if (rc || data_len != buf_len) + if (data_len != buf_len) { + rc = -EINVAL; goto out; + } } data_len = to_process - buf_len; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &data_len, - (u8 *) data, - NX_DS_SHA512); + in_sg = nx_build_sg_list(in_sg, (u8 *) data, + &data_len, max_sg_len); - if (rc || data_len != (to_process - buf_len)) + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + + if (data_len != (to_process - buf_len)) { + rc = -EINVAL; goto out; + } to_process = (data_len + buf_len); leftover = total - to_process; @@ -172,13 +184,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; + struct nx_sg *in_sg, *out_sg; + u32 max_sg_len; u64 count0; unsigned long irq_flags; - int rc; + int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); + max_sg_len = min_t(u64, nx_ctx->ap->sglen, + nx_driver.of.max_sg_len/sizeof(struct nx_sg)); + max_sg_len = min_t(u64, max_sg_len, + nx_ctx->ap->databytelen/NX_PAGE_SIZE); + /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count[0] >= SHA512_BLOCK_SIZE) { @@ -200,24 +219,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) csbcpb->cpb.sha512.message_bit_length_lo = count0; len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, - &nx_ctx->op.inlen, - &len, - (u8 *)sctx->buf, - NX_DS_SHA512); + in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, + max_sg_len); - if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) + if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { + rc = -EINVAL; goto out; + } len = SHA512_DIGEST_SIZE; - rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, - &nx_ctx->op.outlen, - &len, - out, - NX_DS_SHA512); + out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, + max_sg_len); - if (rc) - goto out; + nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); + nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 1da6dc59d0dd..f6198f29a4a8 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -19,8 +19,8 @@ * Author: Kent Yoder <yoder1@us.ibm.com> */ +#include <crypto/internal/aead.h> #include <crypto/internal/hash.h> -#include <crypto/hash.h> #include <crypto/aes.h> #include <crypto/sha.h> #include <crypto/algapi.h> @@ -29,10 +29,10 @@ #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/mm.h> -#include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/device.h> #include <linux/of.h> +#include <linux/types.h> #include <asm/hvcall.h> #include <asm/vio.h> @@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, * @delta: is the amount we need to crop in order to bound the list. * */ -static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta) +static long int trim_sg_list(struct nx_sg *sg, + struct nx_sg *end, + unsigned int delta, + unsigned int *nbytes) { + long int oplen; + long int data_back; + unsigned int is_delta = delta; + while (delta && end > sg) { struct nx_sg *last = end - 1; @@ -228,54 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int d delta -= last->len; } } - return (sg - end) * sizeof(struct nx_sg); -} -/** - * nx_sha_build_sg_list - walk and build sg list to sha modes - * using right bounds and limits. - * @nx_ctx: NX crypto context for the lists we're building - * @nx_sg: current sg list in or out list - * @op_len: current op_len to be used in order to build a sg list - * @nbytes: number or bytes to be processed - * @offset: buf offset - * @mode: SHA256 or SHA512 - */ -int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx, - struct nx_sg *nx_in_outsg, - s64 *op_len, - unsigned int *nbytes, - u8 *offset, - u32 mode) -{ - unsigned int delta = 0; - unsigned int total = *nbytes; - struct nx_sg *nx_insg = nx_in_outsg; - unsigned int max_sg_len; - - max_sg_len = min_t(u64, nx_ctx->ap->sglen, - nx_driver.of.max_sg_len/sizeof(struct nx_sg)); - max_sg_len = min_t(u64, max_sg_len, - nx_ctx->ap->databytelen/NX_PAGE_SIZE); - - *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); - nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len); - - switch (mode) { - case NX_DS_SHA256: - if (*nbytes < total) - delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1)); - break; - case NX_DS_SHA512: - if (*nbytes < total) - delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1)); - break; - default: - return -EINVAL; + /* There are cases where we need to crop list in order to make it + * a block size multiple, but we also need to align data. In order to + * that we need to calculate how much we need to put back to be + * processed + */ + oplen = (sg - end) * sizeof(struct nx_sg); + if (is_delta) { + data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len; + data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1)); + *nbytes -= data_back; } - *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta); - return 0; + return oplen; } /** @@ -330,8 +303,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear * buffers */ - nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta); - nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta); + nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes); + nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes); return 0; } @@ -426,6 +399,13 @@ static void nx_of_update_msc(struct device *dev, goto next_loop; } + if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) { + dev_warn(dev, "bogus sglen/databytelen: " + "%u/%u (ignored)\n", trip->sglen, + trip->databytelen); + goto next_loop; + } + switch (trip->keybitlen) { case 128: case 160: @@ -518,6 +498,72 @@ static void nx_of_init(struct device *dev, struct nx_of *props) nx_of_update_msc(dev, p, props); } +static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot) +{ + struct alg_props *props = &nx_driver.of.ap[fc][mode][slot]; + + if (!props->sglen || props->databytelen < NX_PAGE_SIZE) { + if (dev) + dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: " + "%u/%u (ignored)\n", fc, mode, slot, + props->sglen, props->databytelen); + return false; + } + + return true; +} + +static bool nx_check_props(struct device *dev, u32 fc, u32 mode) +{ + int i; + + for (i = 0; i < 3; i++) + if (!nx_check_prop(dev, fc, mode, i)) + return false; + + return true; +} + +static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode) +{ + return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? + crypto_register_alg(alg) : 0; +} + +static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode) +{ + return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? + crypto_register_aead(alg) : 0; +} + +static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot) +{ + return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev, + fc, mode, slot) : + nx_check_props(&nx_driver.viodev->dev, fc, mode)) ? + crypto_register_shash(alg) : 0; +} + +static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode) +{ + if (nx_check_props(NULL, fc, mode)) + crypto_unregister_alg(alg); +} + +static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode) +{ + if (nx_check_props(NULL, fc, mode)) + crypto_unregister_aead(alg); +} + +static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode, + int slot) +{ + if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) : + nx_check_props(NULL, fc, mode)) + crypto_unregister_shash(alg); +} + /** * nx_register_algs - register algorithms with the crypto API * @@ -542,72 +588,77 @@ static int nx_register_algs(void) nx_driver.of.status = NX_OKAY; - rc = crypto_register_alg(&nx_ecb_aes_alg); + rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); if (rc) goto out; - rc = crypto_register_alg(&nx_cbc_aes_alg); + rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); if (rc) goto out_unreg_ecb; - rc = crypto_register_alg(&nx_ctr_aes_alg); + rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); if (rc) goto out_unreg_cbc; - rc = crypto_register_alg(&nx_ctr3686_aes_alg); + rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); if (rc) goto out_unreg_ctr; - rc = crypto_register_alg(&nx_gcm_aes_alg); + rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); if (rc) goto out_unreg_ctr3686; - rc = crypto_register_alg(&nx_gcm4106_aes_alg); + rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); if (rc) goto out_unreg_gcm; - rc = crypto_register_alg(&nx_ccm_aes_alg); + rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); if (rc) goto out_unreg_gcm4106; - rc = crypto_register_alg(&nx_ccm4309_aes_alg); + rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); if (rc) goto out_unreg_ccm; - rc = crypto_register_shash(&nx_shash_sha256_alg); + rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, + NX_PROPS_SHA256); if (rc) goto out_unreg_ccm4309; - rc = crypto_register_shash(&nx_shash_sha512_alg); + rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, + NX_PROPS_SHA512); if (rc) goto out_unreg_s256; - rc = crypto_register_shash(&nx_shash_aes_xcbc_alg); + rc = nx_register_shash(&nx_shash_aes_xcbc_alg, + NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); if (rc) goto out_unreg_s512; goto out; out_unreg_s512: - crypto_unregister_shash(&nx_shash_sha512_alg); + nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, + NX_PROPS_SHA512); out_unreg_s256: - crypto_unregister_shash(&nx_shash_sha256_alg); + nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, + NX_PROPS_SHA256); out_unreg_ccm4309: - crypto_unregister_alg(&nx_ccm4309_aes_alg); + nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); out_unreg_ccm: - crypto_unregister_alg(&nx_ccm_aes_alg); + nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); out_unreg_gcm4106: - crypto_unregister_alg(&nx_gcm4106_aes_alg); + nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); out_unreg_gcm: - crypto_unregister_alg(&nx_gcm_aes_alg); + nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); out_unreg_ctr3686: - crypto_unregister_alg(&nx_ctr3686_aes_alg); + nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); out_unreg_ctr: - crypto_unregister_alg(&nx_ctr_aes_alg); + nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); out_unreg_cbc: - crypto_unregister_alg(&nx_cbc_aes_alg); + nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); out_unreg_ecb: - crypto_unregister_alg(&nx_ecb_aes_alg); + nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); out: return rc; } @@ -666,9 +717,9 @@ int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) NX_MODE_AES_CCM); } -int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm) +int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) { - return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, + return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, NX_MODE_AES_GCM); } @@ -720,6 +771,13 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm) nx_ctx->out_sg = NULL; } +void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm) +{ + struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); + + kzfree(nx_ctx->kmem); +} + static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) { dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", @@ -746,17 +804,24 @@ static int nx_remove(struct vio_dev *viodev) if (nx_driver.of.status == NX_OKAY) { NX_DEBUGFS_FINI(&nx_driver); - crypto_unregister_alg(&nx_ccm_aes_alg); - crypto_unregister_alg(&nx_ccm4309_aes_alg); - crypto_unregister_alg(&nx_gcm_aes_alg); - crypto_unregister_alg(&nx_gcm4106_aes_alg); - crypto_unregister_alg(&nx_ctr_aes_alg); - crypto_unregister_alg(&nx_ctr3686_aes_alg); - crypto_unregister_alg(&nx_cbc_aes_alg); - crypto_unregister_alg(&nx_ecb_aes_alg); - crypto_unregister_shash(&nx_shash_sha256_alg); - crypto_unregister_shash(&nx_shash_sha512_alg); - crypto_unregister_shash(&nx_shash_aes_xcbc_alg); + nx_unregister_shash(&nx_shash_aes_xcbc_alg, + NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); + nx_unregister_shash(&nx_shash_sha512_alg, + NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); + nx_unregister_shash(&nx_shash_sha256_alg, + NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); + nx_unregister_alg(&nx_ccm4309_aes_alg, + NX_FC_AES, NX_MODE_AES_CCM); + nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); + nx_unregister_aead(&nx_gcm4106_aes_alg, + NX_FC_AES, NX_MODE_AES_GCM); + nx_unregister_aead(&nx_gcm_aes_alg, + NX_FC_AES, NX_MODE_AES_GCM); + nx_unregister_alg(&nx_ctr3686_aes_alg, + NX_FC_AES, NX_MODE_AES_CTR); + nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); + nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); + nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); } return 0; diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 6c9ecaaead52..de3ea8738146 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -143,18 +143,17 @@ struct nx_crypto_ctx { /* prototypes */ int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm); -int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm); +int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm); int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm); int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm); int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm); int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm); int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm); void nx_crypto_ctx_exit(struct crypto_tfm *tfm); +void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm); void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function); int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, u32 may_sleep); -int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *, - s64 *, unsigned int *, u8 *, u32); struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int *, @@ -178,8 +177,8 @@ void nx_debugfs_fini(struct nx_crypto_driver *); extern struct crypto_alg nx_cbc_aes_alg; extern struct crypto_alg nx_ecb_aes_alg; -extern struct crypto_alg nx_gcm_aes_alg; -extern struct crypto_alg nx_gcm4106_aes_alg; +extern struct aead_alg nx_gcm_aes_alg; +extern struct aead_alg nx_gcm4106_aes_alg; extern struct crypto_alg nx_ctr_aes_alg; extern struct crypto_alg nx_ctr3686_aes_alg; extern struct crypto_alg nx_ccm_aes_alg; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 4d63e0d4da9a..b2024c95a3cf 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -362,7 +362,13 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req) static int omap_sham_hw_init(struct omap_sham_dev *dd) { - pm_runtime_get_sync(dd->dev); + int err; + + err = pm_runtime_get_sync(dd->dev); + if (err < 0) { + dev_err(dd->dev, "failed to get sync: %d\n", err); + return err; + } if (!test_bit(FLAGS_INIT, &dd->flags)) { set_bit(FLAGS_INIT, &dd->flags); @@ -1793,6 +1799,10 @@ static const struct of_device_id omap_sham_of_match[] = { .data = &omap_sham_pdata_omap2, }, { + .compatible = "ti,omap3-sham", + .data = &omap_sham_pdata_omap2, + }, + { .compatible = "ti,omap4-sham", .data = &omap_sham_pdata_omap4, }, @@ -1947,7 +1957,13 @@ static int omap_sham_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_irq_safe(dev); - pm_runtime_get_sync(dev); + + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get sync: %d\n", err); + goto err_pm; + } + rev = omap_sham_read(dd, SHA_REG_REV(dd)); pm_runtime_put_sync(&pdev->dev); @@ -1977,6 +1993,7 @@ err_algs: for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) crypto_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); +err_pm: pm_runtime_disable(dev); if (dd->dma_lch) dma_release_channel(dd->dma_lch); @@ -2019,7 +2036,11 @@ static int omap_sham_suspend(struct device *dev) static int omap_sham_resume(struct device *dev) { - pm_runtime_get_sync(dev); + int err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "failed to get sync: %d\n", err); + return err; + } return 0; } #endif diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index c178ed8c3908..da2d6777bd09 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -22,7 +22,7 @@ #include <asm/cpu_device_id.h> #include <asm/byteorder.h> #include <asm/processor.h> -#include <asm/i387.h> +#include <asm/fpu/api.h> /* * Number of data blocks actually fetched for each xcrypt insn. diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 95f7d27ce491..4e154c9b9206 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -23,7 +23,7 @@ #include <linux/kernel.h> #include <linux/scatterlist.h> #include <asm/cpu_device_id.h> -#include <asm/i387.h> +#include <asm/fpu/api.h> struct padlock_sha_desc { struct shash_desc fallback; diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 5da5b98b8f29..4f56f3681abd 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -15,7 +15,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <crypto/aead.h> +#include <crypto/internal/aead.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/authenc.h> @@ -40,6 +40,7 @@ #include <linux/rtnetlink.h> #include <linux/scatterlist.h> #include <linux/sched.h> +#include <linux/sizes.h> #include <linux/slab.h> #include <linux/timer.h> @@ -261,18 +262,9 @@ static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, } /* Count the number of scatterlist entries in a scatterlist. */ -static int sg_count(struct scatterlist *sg_list, int nbytes) +static inline int sg_count(struct scatterlist *sg_list, int nbytes) { - struct scatterlist *sg = sg_list; - int sg_nents = 0; - - while (nbytes > 0) { - ++sg_nents; - nbytes -= sg->length; - sg = sg_next(sg); - } - - return sg_nents; + return sg_nents_for_len(sg_list, nbytes); } static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) @@ -326,6 +318,7 @@ static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) struct spacc_ddt *src_ddt, *dst_ddt; unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); unsigned nents = sg_count(areq->src, areq->cryptlen); + unsigned total; dma_addr_t iv_addr; struct scatterlist *cur; int i, dst_ents, src_ents, assoc_ents; @@ -369,11 +362,18 @@ static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) * Map the associated data. For decryption we don't copy the * associated data. */ + total = areq->assoclen; for_each_sg(areq->assoc, cur, assoc_ents, i) { - ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); + unsigned len = sg_dma_len(cur); + + if (len > total) + len = total; + + total -= len; + + ddt_set(src_ddt++, sg_dma_address(cur), len); if (req->is_encrypt) - ddt_set(dst_ddt++, sg_dma_address(cur), - sg_dma_len(cur)); + ddt_set(dst_ddt++, sg_dma_address(cur), len); } ddt_set(src_ddt++, iv_addr, ivsize); @@ -790,7 +790,8 @@ static int spacc_aead_cra_init(struct crypto_tfm *tfm) get_random_bytes(ctx->salt, sizeof(ctx->salt)); - tfm->crt_aead.reqsize = sizeof(struct spacc_req); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct spacc_req)); return 0; } @@ -1754,15 +1755,15 @@ static int spacc_probe(struct platform_device *pdev) return PTR_ERR(engine->clk); } - if (clk_enable(engine->clk)) { - dev_info(&pdev->dev, "unable to enable clk\n"); + if (clk_prepare_enable(engine->clk)) { + dev_info(&pdev->dev, "unable to prepare/enable clk\n"); clk_put(engine->clk); return -EIO; } err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); if (err) { - clk_disable(engine->clk); + clk_disable_unprepare(engine->clk); clk_put(engine->clk); return err; } @@ -1830,7 +1831,7 @@ static int spacc_remove(struct platform_device *pdev) crypto_unregister_alg(&alg->alg); } - clk_disable(engine->clk); + clk_disable_unprepare(engine->clk); clk_put(engine->clk); return 0; diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig index 49bede2a9f77..6fdb9e8b22a7 100644 --- a/drivers/crypto/qat/Kconfig +++ b/drivers/crypto/qat/Kconfig @@ -2,9 +2,8 @@ config CRYPTO_DEV_QAT tristate select CRYPTO_AEAD select CRYPTO_AUTHENC - select CRYPTO_ALGAPI - select CRYPTO_AES - select CRYPTO_CBC + select CRYPTO_BLKCIPHER + select CRYPTO_HMAC select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 @@ -13,7 +12,6 @@ config CRYPTO_DEV_QAT config CRYPTO_DEV_QAT_DH895xCC tristate "Support for Intel(R) DH895xCC" depends on X86 && PCI - default n select CRYPTO_DEV_QAT help Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index f22ce7169fa5..5fe902967620 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -48,7 +48,6 @@ #define ADF_ACCEL_DEVICES_H_ #include <linux/module.h> #include <linux/list.h> -#include <linux/proc_fs.h> #include <linux/io.h> #include "adf_cfg_common.h" diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h index 0c38a155a865..ef5988afd4c6 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg_user.h +++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h @@ -54,14 +54,6 @@ struct adf_user_cfg_key_val { char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; union { - char *user_val_ptr; - uint64_t padding1; - }; - union { - struct adf_user_cfg_key_val *prev; - uint64_t padding2; - }; - union { struct adf_user_cfg_key_val *next; uint64_t padding3; }; @@ -75,10 +67,6 @@ struct adf_user_cfg_section { uint64_t padding1; }; union { - struct adf_user_cfg_section *prev; - uint64_t padding2; - }; - union { struct adf_user_cfg_section *next; uint64_t padding3; }; diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index 0666ee6a3360..27e16c09230b 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -53,6 +53,13 @@ #include "icp_qat_fw_loader_handle.h" #include "icp_qat_hal.h" +#define ADF_MAJOR_VERSION 0 +#define ADF_MINOR_VERSION 1 +#define ADF_BUILD_VERSION 3 +#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \ + __stringify(ADF_MINOR_VERSION) "." \ + __stringify(ADF_BUILD_VERSION) + #define ADF_STATUS_RESTARTING 0 #define ADF_STATUS_STARTING 1 #define ADF_STATUS_CONFIGURED 2 diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c index cb5f066e93a6..e056b9e9bf8a 100644 --- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c +++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c @@ -504,3 +504,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_ALIAS_CRYPTO("intel_qat"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 1dc5b0a17cf7..067402c7c2a9 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -47,7 +47,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/crypto.h> -#include <crypto/aead.h> +#include <crypto/internal/aead.h> #include <crypto/aes.h> #include <crypto/sha.h> #include <crypto/hash.h> @@ -653,7 +653,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, } static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, - struct scatterlist *assoc, + struct scatterlist *assoc, int assoclen, struct scatterlist *sgl, struct scatterlist *sglout, uint8_t *iv, uint8_t ivlen, @@ -685,15 +685,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, for_each_sg(assoc, sg, assoc_n, i) { if (!sg->length) continue; - bufl->bufers[bufs].addr = dma_map_single(dev, - sg_virt(sg), - sg->length, - DMA_BIDIRECTIONAL); - bufl->bufers[bufs].len = sg->length; + + if (!(assoclen > 0)) + break; + + bufl->bufers[bufs].addr = + dma_map_single(dev, sg_virt(sg), + min_t(int, assoclen, sg->length), + DMA_BIDIRECTIONAL); + bufl->bufers[bufs].len = min_t(int, assoclen, sg->length); if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) goto err; bufs++; + assoclen -= sg->length; } + if (ivlen) { bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, DMA_BIDIRECTIONAL); @@ -845,8 +851,9 @@ static int qat_alg_aead_dec(struct aead_request *areq) int digst_size = crypto_aead_crt(aead_tfm)->authsize; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, - areq->iv, AES_BLOCK_SIZE, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, + areq->src, areq->dst, areq->iv, + AES_BLOCK_SIZE, qat_req); if (unlikely(ret)) return ret; @@ -889,8 +896,9 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv, struct icp_qat_fw_la_bulk_req *msg; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, - iv, AES_BLOCK_SIZE, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen, + areq->src, areq->dst, iv, AES_BLOCK_SIZE, + qat_req); if (unlikely(ret)) return ret; @@ -1017,7 +1025,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req) struct icp_qat_fw_la_bulk_req *msg; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, + ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, NULL, 0, qat_req); if (unlikely(ret)) return ret; @@ -1055,7 +1063,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req) struct icp_qat_fw_la_bulk_req *msg; int ret, ctr = 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, + ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst, NULL, 0, qat_req); if (unlikely(ret)) return ret; @@ -1094,8 +1102,9 @@ static int qat_alg_aead_init(struct crypto_tfm *tfm, return -EFAULT; spin_lock_init(&ctx->lock); ctx->qat_hash_alg = hash; - tfm->crt_aead.reqsize = sizeof(struct aead_request) + - sizeof(struct qat_crypto_request); + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct aead_request) + + sizeof(struct qat_crypto_request)); ctx->tfm = tfm; return 0; } diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 9decea2779c6..1bde45b7a3c5 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -300,6 +300,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err; + pcie_set_readrq(pdev, 1024); + /* enable PCI device */ if (pci_enable_device(pdev)) { ret = -EFAULT; @@ -417,5 +419,6 @@ module_exit(adfdrv_release); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); -MODULE_FIRMWARE("qat_895xcc.bin"); +MODULE_FIRMWARE(ADF_DH895XCC_FW); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 6be377f6b9e7..397a500b3d8a 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -1578,8 +1578,12 @@ static int sahara_probe(struct platform_device *pdev) init_completion(&dev->dma_completion); - clk_prepare_enable(dev->clk_ipg); - clk_prepare_enable(dev->clk_ahb); + err = clk_prepare_enable(dev->clk_ipg); + if (err) + goto err_link; + err = clk_prepare_enable(dev->clk_ahb); + if (err) + goto clk_ipg_disable; version = sahara_read(dev, SAHARA_REG_VERSION); if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) { @@ -1619,10 +1623,11 @@ err_algs: dma_free_coherent(&pdev->dev, SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link), dev->hw_link[0], dev->hw_phys_link[0]); - clk_disable_unprepare(dev->clk_ipg); - clk_disable_unprepare(dev->clk_ahb); kthread_stop(dev->kthread); dev_ptr = NULL; + clk_disable_unprepare(dev->clk_ahb); +clk_ipg_disable: + clk_disable_unprepare(dev->clk_ipg); err_link: dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 857414afa29a..83aca95a95bc 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -46,7 +46,7 @@ #include <crypto/des.h> #include <crypto/sha.h> #include <crypto/md5.h> -#include <crypto/aead.h> +#include <crypto/internal/aead.h> #include <crypto/authenc.h> #include <crypto/skcipher.h> #include <crypto/hash.h> @@ -55,49 +55,92 @@ #include "talitos.h" -static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) +static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, + bool is_sec1) { - talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); - talitos_ptr->eptr = upper_32_bits(dma_addr); + ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); + if (!is_sec1) + ptr->eptr = upper_32_bits(dma_addr); +} + +static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, + bool is_sec1) +{ + if (is_sec1) { + ptr->res = 0; + ptr->len1 = cpu_to_be16(len); + } else { + ptr->len = cpu_to_be16(len); + } +} + +static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, + bool is_sec1) +{ + if (is_sec1) + return be16_to_cpu(ptr->len1); + else + return be16_to_cpu(ptr->len); +} + +static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) +{ + if (!is_sec1) + ptr->j_extent = 0; } /* * map virtual single (contiguous) pointer to h/w descriptor pointer */ static void map_single_talitos_ptr(struct device *dev, - struct talitos_ptr *talitos_ptr, - unsigned short len, void *data, - unsigned char extent, + struct talitos_ptr *ptr, + unsigned int len, void *data, enum dma_data_direction dir) { dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); - talitos_ptr->len = cpu_to_be16(len); - to_talitos_ptr(talitos_ptr, dma_addr); - talitos_ptr->j_extent = extent; + to_talitos_ptr_len(ptr, len, is_sec1); + to_talitos_ptr(ptr, dma_addr, is_sec1); + to_talitos_ptr_extent_clear(ptr, is_sec1); } /* * unmap bus single (contiguous) h/w descriptor pointer */ static void unmap_single_talitos_ptr(struct device *dev, - struct talitos_ptr *talitos_ptr, + struct talitos_ptr *ptr, enum dma_data_direction dir) { - dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr), - be16_to_cpu(talitos_ptr->len), dir); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + dma_unmap_single(dev, be32_to_cpu(ptr->ptr), + from_talitos_ptr_len(ptr, is_sec1), dir); } static int reset_channel(struct device *dev, int ch) { struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; + bool is_sec1 = has_ftr_sec1(priv); - setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET); + if (is_sec1) { + setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, + TALITOS1_CCCR_LO_RESET); - while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET) - && --timeout) - cpu_relax(); + while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) & + TALITOS1_CCCR_LO_RESET) && --timeout) + cpu_relax(); + } else { + setbits32(priv->chan[ch].reg + TALITOS_CCCR, + TALITOS2_CCCR_RESET); + + while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & + TALITOS2_CCCR_RESET) && --timeout) + cpu_relax(); + } if (timeout == 0) { dev_err(dev, "failed to reset channel %d\n", ch); @@ -120,11 +163,12 @@ static int reset_device(struct device *dev) { struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; - u32 mcr = TALITOS_MCR_SWR; + bool is_sec1 = has_ftr_sec1(priv); + u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR; setbits32(priv->reg + TALITOS_MCR, mcr); - while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR) + while ((in_be32(priv->reg + TALITOS_MCR) & mcr) && --timeout) cpu_relax(); @@ -148,6 +192,7 @@ static int init_device(struct device *dev) { struct talitos_private *priv = dev_get_drvdata(dev); int ch, err; + bool is_sec1 = has_ftr_sec1(priv); /* * Master reset @@ -171,12 +216,19 @@ static int init_device(struct device *dev) } /* enable channel done and error interrupts */ - setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT); - setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); + if (is_sec1) { + clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT); + clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); + /* disable parity error check in DEU (erroneous? test vect.) */ + setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE); + } else { + setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT); + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); + } /* disable integrity check error interrupts (use writeback instead) */ if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) - setbits32(priv->reg + TALITOS_MDEUICR_LO, + setbits32(priv->reg_mdeu + TALITOS_EUICR_LO, TALITOS_MDEUICR_LO_ICE); return 0; @@ -204,6 +256,7 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, struct talitos_request *request; unsigned long flags; int head; + bool is_sec1 = has_ftr_sec1(priv); spin_lock_irqsave(&priv->chan[ch].head_lock, flags); @@ -217,8 +270,17 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, request = &priv->chan[ch].fifo[head]; /* map descriptor and save caller data */ - request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), - DMA_BIDIRECTIONAL); + if (is_sec1) { + desc->hdr1 = desc->hdr; + desc->next_desc = 0; + request->dma_desc = dma_map_single(dev, &desc->hdr1, + TALITOS_DESC_SIZE, + DMA_BIDIRECTIONAL); + } else { + request->dma_desc = dma_map_single(dev, desc, + TALITOS_DESC_SIZE, + DMA_BIDIRECTIONAL); + } request->callback = callback; request->context = context; @@ -250,16 +312,21 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) struct talitos_request *request, saved_req; unsigned long flags; int tail, status; + bool is_sec1 = has_ftr_sec1(priv); spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); tail = priv->chan[ch].tail; while (priv->chan[ch].fifo[tail].desc) { + __be32 hdr; + request = &priv->chan[ch].fifo[tail]; /* descriptors with their done bits set don't get the error */ rmb(); - if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE) + hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr; + + if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) status = 0; else if (!error) @@ -268,7 +335,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) status = error; dma_unmap_single(dev, request->dma_desc, - sizeof(struct talitos_desc), + TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL); /* copy entries so we can call callback outside lock */ @@ -302,8 +369,37 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) /* * process completed requests for channels that have done status */ -#define DEF_TALITOS_DONE(name, ch_done_mask) \ -static void talitos_done_##name(unsigned long data) \ +#define DEF_TALITOS1_DONE(name, ch_done_mask) \ +static void talitos1_done_##name(unsigned long data) \ +{ \ + struct device *dev = (struct device *)data; \ + struct talitos_private *priv = dev_get_drvdata(dev); \ + unsigned long flags; \ + \ + if (ch_done_mask & 0x10000000) \ + flush_channel(dev, 0, 0, 0); \ + if (priv->num_channels == 1) \ + goto out; \ + if (ch_done_mask & 0x40000000) \ + flush_channel(dev, 1, 0, 0); \ + if (ch_done_mask & 0x00010000) \ + flush_channel(dev, 2, 0, 0); \ + if (ch_done_mask & 0x00040000) \ + flush_channel(dev, 3, 0, 0); \ + \ +out: \ + /* At this point, all completed channels have been processed */ \ + /* Unmask done interrupts for channels completed later on. */ \ + spin_lock_irqsave(&priv->reg_lock, flags); \ + clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ + clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ +} + +DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) + +#define DEF_TALITOS2_DONE(name, ch_done_mask) \ +static void talitos2_done_##name(unsigned long data) \ { \ struct device *dev = (struct device *)data; \ struct talitos_private *priv = dev_get_drvdata(dev); \ @@ -325,12 +421,13 @@ out: \ /* Unmask done interrupts for channels completed later on. */ \ spin_lock_irqsave(&priv->reg_lock, flags); \ setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ - setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \ + setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \ spin_unlock_irqrestore(&priv->reg_lock, flags); \ } -DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE) -DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE) -DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE) + +DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) +DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) +DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) /* * locate current (offending) descriptor @@ -377,44 +474,44 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) switch (desc_hdr & DESC_HDR_SEL0_MASK) { case DESC_HDR_SEL0_AFEU: dev_err(dev, "AFEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_AFEUISR), - in_be32(priv->reg + TALITOS_AFEUISR_LO)); + in_be32(priv->reg_afeu + TALITOS_EUISR), + in_be32(priv->reg_afeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_DEU: dev_err(dev, "DEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_DEUISR), - in_be32(priv->reg + TALITOS_DEUISR_LO)); + in_be32(priv->reg_deu + TALITOS_EUISR), + in_be32(priv->reg_deu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_MDEUA: case DESC_HDR_SEL0_MDEUB: dev_err(dev, "MDEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_MDEUISR), - in_be32(priv->reg + TALITOS_MDEUISR_LO)); + in_be32(priv->reg_mdeu + TALITOS_EUISR), + in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_RNG: dev_err(dev, "RNGUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_RNGUISR), - in_be32(priv->reg + TALITOS_RNGUISR_LO)); + in_be32(priv->reg_rngu + TALITOS_ISR), + in_be32(priv->reg_rngu + TALITOS_ISR_LO)); break; case DESC_HDR_SEL0_PKEU: dev_err(dev, "PKEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_PKEUISR), - in_be32(priv->reg + TALITOS_PKEUISR_LO)); + in_be32(priv->reg_pkeu + TALITOS_EUISR), + in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_AESU: dev_err(dev, "AESUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_AESUISR), - in_be32(priv->reg + TALITOS_AESUISR_LO)); + in_be32(priv->reg_aesu + TALITOS_EUISR), + in_be32(priv->reg_aesu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_CRCU: dev_err(dev, "CRCUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_CRCUISR), - in_be32(priv->reg + TALITOS_CRCUISR_LO)); + in_be32(priv->reg_crcu + TALITOS_EUISR), + in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL0_KEU: dev_err(dev, "KEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_KEUISR), - in_be32(priv->reg + TALITOS_KEUISR_LO)); + in_be32(priv->reg_pkeu + TALITOS_EUISR), + in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); break; } @@ -422,13 +519,13 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) case DESC_HDR_SEL1_MDEUA: case DESC_HDR_SEL1_MDEUB: dev_err(dev, "MDEUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_MDEUISR), - in_be32(priv->reg + TALITOS_MDEUISR_LO)); + in_be32(priv->reg_mdeu + TALITOS_EUISR), + in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); break; case DESC_HDR_SEL1_CRCU: dev_err(dev, "CRCUISR 0x%08x_%08x\n", - in_be32(priv->reg + TALITOS_CRCUISR), - in_be32(priv->reg + TALITOS_CRCUISR_LO)); + in_be32(priv->reg_crcu + TALITOS_EUISR), + in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); break; } @@ -445,17 +542,24 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) { struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; - int ch, error, reset_dev = 0, reset_ch = 0; - u32 v, v_lo; + int ch, error, reset_dev = 0; + u32 v_lo; + bool is_sec1 = has_ftr_sec1(priv); + int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ for (ch = 0; ch < priv->num_channels; ch++) { /* skip channels without errors */ - if (!(isr & (1 << (ch * 2 + 1)))) - continue; + if (is_sec1) { + /* bits 29, 31, 17, 19 */ + if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6)))) + continue; + } else { + if (!(isr & (1 << (ch * 2 + 1)))) + continue; + } error = -EINVAL; - v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR); v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); if (v_lo & TALITOS_CCPSR_LO_DOF) { @@ -471,23 +575,28 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) if (v_lo & TALITOS_CCPSR_LO_MDTE) dev_err(dev, "master data transfer error\n"); if (v_lo & TALITOS_CCPSR_LO_SGDLZ) - dev_err(dev, "s/g data length zero error\n"); + dev_err(dev, is_sec1 ? "pointeur not complete error\n" + : "s/g data length zero error\n"); if (v_lo & TALITOS_CCPSR_LO_FPZ) - dev_err(dev, "fetch pointer zero error\n"); + dev_err(dev, is_sec1 ? "parity error\n" + : "fetch pointer zero error\n"); if (v_lo & TALITOS_CCPSR_LO_IDH) dev_err(dev, "illegal descriptor header error\n"); if (v_lo & TALITOS_CCPSR_LO_IEU) - dev_err(dev, "invalid execution unit error\n"); + dev_err(dev, is_sec1 ? "static assignment error\n" + : "invalid exec unit error\n"); if (v_lo & TALITOS_CCPSR_LO_EU) report_eu_error(dev, ch, current_desc_hdr(dev, ch)); - if (v_lo & TALITOS_CCPSR_LO_GB) - dev_err(dev, "gather boundary error\n"); - if (v_lo & TALITOS_CCPSR_LO_GRL) - dev_err(dev, "gather return/length error\n"); - if (v_lo & TALITOS_CCPSR_LO_SB) - dev_err(dev, "scatter boundary error\n"); - if (v_lo & TALITOS_CCPSR_LO_SRL) - dev_err(dev, "scatter return/length error\n"); + if (!is_sec1) { + if (v_lo & TALITOS_CCPSR_LO_GB) + dev_err(dev, "gather boundary error\n"); + if (v_lo & TALITOS_CCPSR_LO_GRL) + dev_err(dev, "gather return/length error\n"); + if (v_lo & TALITOS_CCPSR_LO_SB) + dev_err(dev, "scatter boundary error\n"); + if (v_lo & TALITOS_CCPSR_LO_SRL) + dev_err(dev, "scatter return/length error\n"); + } flush_channel(dev, ch, error, reset_ch); @@ -495,10 +604,10 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) reset_channel(dev, ch); } else { setbits32(priv->chan[ch].reg + TALITOS_CCCR, - TALITOS_CCCR_CONT); + TALITOS2_CCCR_CONT); setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & - TALITOS_CCCR_CONT) && --timeout) + TALITOS2_CCCR_CONT) && --timeout) cpu_relax(); if (timeout == 0) { dev_err(dev, "failed to restart channel %d\n", @@ -507,9 +616,14 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) } } } - if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) { - dev_err(dev, "done overflow, internal time out, or rngu error: " - "ISR 0x%08x_%08x\n", isr, isr_lo); + if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) || + (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) { + if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR)) + dev_err(dev, "TEA error: ISR 0x%08x_%08x\n", + isr, isr_lo); + else + dev_err(dev, "done overflow, internal time out, or " + "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo); /* purge request queues */ for (ch = 0; ch < priv->num_channels; ch++) @@ -520,8 +634,43 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) } } -#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ -static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ +#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ +static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \ +{ \ + struct device *dev = data; \ + struct talitos_private *priv = dev_get_drvdata(dev); \ + u32 isr, isr_lo; \ + unsigned long flags; \ + \ + spin_lock_irqsave(&priv->reg_lock, flags); \ + isr = in_be32(priv->reg + TALITOS_ISR); \ + isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ + /* Acknowledge interrupt */ \ + out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ + out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ + \ + if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + talitos_error(dev, isr & ch_err_mask, isr_lo); \ + } \ + else { \ + if (likely(isr & ch_done_mask)) { \ + /* mask further done interrupts. */ \ + setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ + /* done_task will unmask done interrupts at exit */ \ + tasklet_schedule(&priv->done_task[tlet]); \ + } \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + } \ + \ + return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ + IRQ_NONE; \ +} + +DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0) + +#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ +static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \ { \ struct device *dev = data; \ struct talitos_private *priv = dev_get_drvdata(dev); \ @@ -552,9 +701,12 @@ static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ IRQ_NONE; \ } -DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0) -DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0) -DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1) + +DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0) +DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR, + 0) +DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR, + 1) /* * hwrng @@ -567,7 +719,7 @@ static int talitos_rng_data_present(struct hwrng *rng, int wait) int i; for (i = 0; i < 20; i++) { - ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) & + ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) & TALITOS_RNGUSR_LO_OFL; if (ofl || !wait) break; @@ -583,8 +735,8 @@ static int talitos_rng_data_read(struct hwrng *rng, u32 *data) struct talitos_private *priv = dev_get_drvdata(dev); /* rng fifo requires 64-bit accesses */ - *data = in_be32(priv->reg + TALITOS_RNGU_FIFO); - *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO); + *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO); + *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO); return sizeof(u32); } @@ -595,8 +747,9 @@ static int talitos_rng_init(struct hwrng *rng) struct talitos_private *priv = dev_get_drvdata(dev); unsigned int timeout = TALITOS_TIMEOUT; - setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR); - while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD) + setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR); + while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO) + & TALITOS_RNGUSR_LO_RD) && --timeout) cpu_relax(); if (timeout == 0) { @@ -605,7 +758,7 @@ static int talitos_rng_init(struct hwrng *rng) } /* start generating */ - setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0); + setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0); return 0; } @@ -661,7 +814,7 @@ struct talitos_ahash_req_ctx { unsigned int first; unsigned int last; unsigned int to_hash_later; - u64 nbuf; + unsigned int nbuf; struct scatterlist bufsl[2]; struct scatterlist *psrc; }; @@ -712,9 +865,10 @@ badkey: * @dst_chained: whether dst is chained or not * @iv_dma: dma address of iv for checking continuity and link table * @dma_len: length of dma mapped link_tbl space - * @dma_link_tbl: bus physical address of link_tbl + * @dma_link_tbl: bus physical address of link_tbl/buf * @desc: h/w descriptor - * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) + * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) + * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) * * if decrypting (with authcheck), or either one of src_nents or dst_nents * is greater than 1, an integrity check value is concatenated to the end @@ -731,7 +885,10 @@ struct talitos_edesc { int dma_len; dma_addr_t dma_link_tbl; struct talitos_desc desc; - struct talitos_ptr link_tbl[0]; + union { + struct talitos_ptr link_tbl[0]; + u8 buf[0]; + }; }; static int talitos_map_sg(struct device *dev, struct scatterlist *sg, @@ -907,8 +1064,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, { int n_sg = sg_count; - while (n_sg--) { - to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg)); + while (sg && n_sg--) { + to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0); link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); link_tbl_ptr->j_extent = 0; link_tbl_ptr++; @@ -925,7 +1082,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, sg_count--; link_tbl_ptr--; } - be16_add_cpu(&link_tbl_ptr->len, cryptlen); + link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) + + cryptlen); /* tag end of link table */ link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; @@ -953,7 +1111,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* hmac key */ map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, - 0, DMA_TO_DEVICE); + DMA_TO_DEVICE); /* hmac data */ desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); @@ -962,7 +1120,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * - sizeof(struct talitos_ptr)); + sizeof(struct talitos_ptr), 0); desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; /* assoc_nents - 1 entries for assoc, 1 for IV */ @@ -973,7 +1131,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, tbl_ptr += sg_count - 1; tbl_ptr->j_extent = 0; tbl_ptr++; - to_talitos_ptr(tbl_ptr, edesc->iv_dma); + to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0); tbl_ptr->len = cpu_to_be16(ivsize); tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; @@ -982,14 +1140,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, } else { if (areq->assoclen) to_talitos_ptr(&desc->ptr[1], - sg_dma_address(areq->assoc)); + sg_dma_address(areq->assoc), 0); else - to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0); desc->ptr[1].j_extent = 0; } /* cipher iv */ - to_talitos_ptr(&desc->ptr[2], edesc->iv_dma); + to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); desc->ptr[2].len = cpu_to_be16(ivsize); desc->ptr[2].j_extent = 0; /* Sync needed for the aead_givencrypt case */ @@ -997,7 +1155,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, - (char *)&ctx->key + ctx->authkeylen, 0, + (char *)&ctx->key + ctx->authkeylen, DMA_TO_DEVICE); /* @@ -1015,7 +1173,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, edesc->src_chained); if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src)); + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); } else { sg_link_tbl_len = cryptlen; @@ -1026,14 +1184,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, &edesc->link_tbl[0]); if (sg_count > 1) { desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; - to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl); + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0); dma_sync_single_for_device(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); } else { /* Only one segment now, so no link tbl needed */ to_talitos_ptr(&desc->ptr[4], - sg_dma_address(areq->src)); + sg_dma_address(areq->src), 0); } } @@ -1047,13 +1205,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, DMA_FROM_DEVICE, edesc->dst_chained); if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst)); + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); } else { int tbl_off = edesc->src_nents + 1; struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + - tbl_off * sizeof(struct talitos_ptr)); + tbl_off * sizeof(struct talitos_ptr), 0); sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, tbl_ptr); @@ -1068,14 +1226,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + (tbl_off + edesc->dst_nents + 1 + edesc->assoc_nents) * - sizeof(struct talitos_ptr)); + sizeof(struct talitos_ptr), 0); desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); } /* iv out */ - map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, + map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, DMA_FROM_DEVICE); ret = talitos_submit(dev, ctx->ch, desc, callback, areq); @@ -1095,7 +1253,7 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) int sg_nents = 0; *chained = false; - while (nbytes > 0) { + while (nbytes > 0 && sg) { sg_nents++; nbytes -= sg->length; if (!sg_is_last(sg) && (sg + 1)->length == 0) @@ -1128,8 +1286,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, dma_addr_t iv_dma = 0; gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; - if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) { + if (cryptlen + authsize > max_len) { dev_err(dev, "length exceeds h/w max limit\n"); return ERR_PTR(-EINVAL); } @@ -1173,8 +1334,12 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, */ alloc_len = sizeof(struct talitos_edesc); if (assoc_nents || src_nents || dst_nents) { - dma_len = (src_nents + dst_nents + 2 + assoc_nents) * - sizeof(struct talitos_ptr) + authsize; + if (is_sec1) + dma_len = (src_nents ? cryptlen : 0) + + (dst_nents ? cryptlen : 0); + else + dma_len = (src_nents + dst_nents + 2 + assoc_nents) * + sizeof(struct talitos_ptr) + authsize; alloc_len += dma_len; } else { dma_len = 0; @@ -1327,16 +1492,43 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, return 0; } +static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, + struct scatterlist *dst, unsigned int len, + struct talitos_edesc *edesc) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + if (is_sec1) { + if (!edesc->src_nents) { + dma_unmap_sg(dev, src, 1, + dst != src ? DMA_TO_DEVICE + : DMA_BIDIRECTIONAL); + } + if (dst && edesc->dst_nents) { + dma_sync_single_for_device(dev, + edesc->dma_link_tbl + len, + len, DMA_FROM_DEVICE); + sg_copy_from_buffer(dst, edesc->dst_nents ? : 1, + edesc->buf + len, len); + } else if (dst && dst != src) { + dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE); + } + } else { + talitos_sg_unmap(dev, edesc, src, dst); + } +} + static void common_nonsnoop_unmap(struct device *dev, struct talitos_edesc *edesc, struct ablkcipher_request *areq) { unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + + unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, areq->src, areq->dst); - if (edesc->dma_len) dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); @@ -1358,6 +1550,102 @@ static void ablkcipher_done(struct device *dev, areq->base.complete(&areq->base, err); } +int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, + unsigned int len, struct talitos_edesc *edesc, + enum dma_data_direction dir, struct talitos_ptr *ptr) +{ + int sg_count; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + to_talitos_ptr_len(ptr, len, is_sec1); + + if (is_sec1) { + sg_count = edesc->src_nents ? : 1; + + if (sg_count == 1) { + dma_map_sg(dev, src, 1, dir); + to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); + } else { + sg_copy_to_buffer(src, sg_count, edesc->buf, len); + to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + len, DMA_TO_DEVICE); + } + } else { + to_talitos_ptr_extent_clear(ptr, is_sec1); + + sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, + edesc->src_chained); + + if (sg_count == 1) { + to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); + } else { + sg_count = sg_to_link_tbl(src, sg_count, len, + &edesc->link_tbl[0]); + if (sg_count > 1) { + to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + dma_sync_single_for_device(dev, + edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed*/ + to_talitos_ptr(ptr, sg_dma_address(src), + is_sec1); + } + } + } + return sg_count; +} + +void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, + unsigned int len, struct talitos_edesc *edesc, + enum dma_data_direction dir, + struct talitos_ptr *ptr, int sg_count) +{ + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); + + if (dir != DMA_NONE) + sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, + dir, edesc->dst_chained); + + to_talitos_ptr_len(ptr, len, is_sec1); + + if (is_sec1) { + if (sg_count == 1) { + if (dir != DMA_NONE) + dma_map_sg(dev, dst, 1, dir); + to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); + } else { + to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1); + dma_sync_single_for_device(dev, + edesc->dma_link_tbl + len, + len, DMA_FROM_DEVICE); + } + } else { + to_talitos_ptr_extent_clear(ptr, is_sec1); + + if (sg_count == 1) { + to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); + } else { + struct talitos_ptr *link_tbl_ptr = + &edesc->link_tbl[edesc->src_nents + 1]; + + to_talitos_ptr(ptr, edesc->dma_link_tbl + + (edesc->src_nents + 1) * + sizeof(struct talitos_ptr), 0); + ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; + sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); + dma_sync_single_for_device(dev, edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } + } +} + static int common_nonsnoop(struct talitos_edesc *edesc, struct ablkcipher_request *areq, void (*callback) (struct device *dev, @@ -1371,83 +1659,41 @@ static int common_nonsnoop(struct talitos_edesc *edesc, unsigned int cryptlen = areq->nbytes; unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); int sg_count, ret; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); /* first DWORD empty */ - desc->ptr[0].len = 0; - to_talitos_ptr(&desc->ptr[0], 0); - desc->ptr[0].j_extent = 0; + desc->ptr[0] = zero_entry; /* cipher iv */ - to_talitos_ptr(&desc->ptr[1], edesc->iv_dma); - desc->ptr[1].len = cpu_to_be16(ivsize); - desc->ptr[1].j_extent = 0; + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); + to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); + to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1); /* cipher key */ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, - (char *)&ctx->key, 0, DMA_TO_DEVICE); + (char *)&ctx->key, DMA_TO_DEVICE); /* * cipher in */ - desc->ptr[3].len = cpu_to_be16(cryptlen); - desc->ptr[3].j_extent = 0; - - sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, - (areq->src == areq->dst) ? DMA_BIDIRECTIONAL - : DMA_TO_DEVICE, - edesc->src_chained); - - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src)); - } else { - sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, - &edesc->link_tbl[0]); - if (sg_count > 1) { - to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); - desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; - dma_sync_single_for_device(dev, edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - } else { - /* Only one segment now, so no link tbl needed */ - to_talitos_ptr(&desc->ptr[3], - sg_dma_address(areq->src)); - } - } + sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc, + (areq->src == areq->dst) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE, + &desc->ptr[3]); /* cipher out */ - desc->ptr[4].len = cpu_to_be16(cryptlen); - desc->ptr[4].j_extent = 0; - - if (areq->src != areq->dst) - sg_count = talitos_map_sg(dev, areq->dst, - edesc->dst_nents ? : 1, - DMA_FROM_DEVICE, edesc->dst_chained); - - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst)); - } else { - struct talitos_ptr *link_tbl_ptr = - &edesc->link_tbl[edesc->src_nents + 1]; - - to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + - (edesc->src_nents + 1) * - sizeof(struct talitos_ptr)); - desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; - sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, - link_tbl_ptr); - dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, - edesc->dma_len, DMA_BIDIRECTIONAL); - } + map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc, + (areq->src == areq->dst) ? DMA_NONE + : DMA_FROM_DEVICE, + &desc->ptr[4], sg_count); /* iv out */ - map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0, + map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, DMA_FROM_DEVICE); /* last DWORD empty */ - desc->ptr[6].len = 0; - to_talitos_ptr(&desc->ptr[6], 0); - desc->ptr[6].j_extent = 0; + desc->ptr[6] = zero_entry; ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { @@ -1507,20 +1753,22 @@ static void common_nonsnoop_hash_unmap(struct device *dev, struct ahash_request *areq) { struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); + /* When using hashctx-in, must unmap it. */ - if (edesc->desc.ptr[1].len) + if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); - if (edesc->desc.ptr[2].len) + if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1)) unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); - talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); - if (edesc->dma_len) dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); @@ -1548,6 +1796,27 @@ static void ahash_done(struct device *dev, areq->base.complete(&areq->base, err); } +/* + * SEC1 doesn't like hashing of 0 sized message, so we do the padding + * ourself and submit a padded block + */ +void talitos_handle_buggy_hash(struct talitos_ctx *ctx, + struct talitos_edesc *edesc, + struct talitos_ptr *ptr) +{ + static u8 padded_hash[64] = { + 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }; + + pr_err_once("Bug in SEC1, padding ourself\n"); + edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD; + map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash), + (char *)padded_hash, DMA_TO_DEVICE); +} + static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct ahash_request *areq, unsigned int length, void (*callback) (struct device *dev, @@ -1559,7 +1828,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); struct device *dev = ctx->dev; struct talitos_desc *desc = &edesc->desc; - int sg_count, ret; + int ret; + struct talitos_private *priv = dev_get_drvdata(dev); + bool is_sec1 = has_ftr_sec1(priv); /* first DWORD empty */ desc->ptr[0] = zero_entry; @@ -1568,7 +1839,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, if (!req_ctx->first || req_ctx->swinit) { map_single_talitos_ptr(dev, &desc->ptr[1], req_ctx->hw_context_size, - (char *)req_ctx->hw_context, 0, + (char *)req_ctx->hw_context, DMA_TO_DEVICE); req_ctx->swinit = 0; } else { @@ -1580,38 +1851,15 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, /* HMAC key */ if (ctx->keylen) map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, - (char *)&ctx->key, 0, DMA_TO_DEVICE); + (char *)&ctx->key, DMA_TO_DEVICE); else desc->ptr[2] = zero_entry; /* * data in */ - desc->ptr[3].len = cpu_to_be16(length); - desc->ptr[3].j_extent = 0; - - sg_count = talitos_map_sg(dev, req_ctx->psrc, - edesc->src_nents ? : 1, - DMA_TO_DEVICE, edesc->src_chained); - - if (sg_count == 1) { - to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); - } else { - sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length, - &edesc->link_tbl[0]); - if (sg_count > 1) { - desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; - to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); - dma_sync_single_for_device(ctx->dev, - edesc->dma_link_tbl, - edesc->dma_len, - DMA_BIDIRECTIONAL); - } else { - /* Only one segment now, so no link tbl needed */ - to_talitos_ptr(&desc->ptr[3], - sg_dma_address(req_ctx->psrc)); - } - } + map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc, + DMA_TO_DEVICE, &desc->ptr[3]); /* fifth DWORD empty */ desc->ptr[4] = zero_entry; @@ -1620,15 +1868,18 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, if (req_ctx->last) map_single_talitos_ptr(dev, &desc->ptr[5], crypto_ahash_digestsize(tfm), - areq->result, 0, DMA_FROM_DEVICE); + areq->result, DMA_FROM_DEVICE); else map_single_talitos_ptr(dev, &desc->ptr[5], req_ctx->hw_context_size, - req_ctx->hw_context, 0, DMA_FROM_DEVICE); + req_ctx->hw_context, DMA_FROM_DEVICE); /* last DWORD empty */ desc->ptr[6] = zero_entry; + if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) + talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); + ret = talitos_submit(dev, ctx->ch, desc, callback, areq); if (ret != -EINPROGRESS) { common_nonsnoop_hash_unmap(dev, edesc, areq); @@ -2561,6 +2812,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, break; default: dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); + kfree(t_alg); return ERR_PTR(-EINVAL); } @@ -2581,29 +2833,35 @@ static int talitos_probe_irq(struct platform_device *ofdev) struct device_node *np = ofdev->dev.of_node; struct talitos_private *priv = dev_get_drvdata(dev); int err; + bool is_sec1 = has_ftr_sec1(priv); priv->irq[0] = irq_of_parse_and_map(np, 0); if (!priv->irq[0]) { dev_err(dev, "failed to map irq\n"); return -EINVAL; } + if (is_sec1) { + err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0, + dev_driver_string(dev), dev); + goto primary_out; + } priv->irq[1] = irq_of_parse_and_map(np, 1); /* get the primary irq line */ if (!priv->irq[1]) { - err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0, + err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0, dev_driver_string(dev), dev); goto primary_out; } - err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0, + err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0, dev_driver_string(dev), dev); if (err) goto primary_out; /* get the secondary irq line */ - err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0, + err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0, dev_driver_string(dev), dev); if (err) { dev_err(dev, "failed to request secondary irq\n"); @@ -2630,6 +2888,7 @@ static int talitos_probe(struct platform_device *ofdev) struct talitos_private *priv; const unsigned int *prop; int i, err; + int stride; priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); if (!priv) @@ -2643,20 +2902,6 @@ static int talitos_probe(struct platform_device *ofdev) spin_lock_init(&priv->reg_lock); - err = talitos_probe_irq(ofdev); - if (err) - goto err_out; - - if (!priv->irq[1]) { - tasklet_init(&priv->done_task[0], talitos_done_4ch, - (unsigned long)dev); - } else { - tasklet_init(&priv->done_task[0], talitos_done_ch0_2, - (unsigned long)dev); - tasklet_init(&priv->done_task[1], talitos_done_ch1_3, - (unsigned long)dev); - } - priv->reg = of_iomap(np, 0); if (!priv->reg) { dev_err(dev, "failed to of_iomap\n"); @@ -2696,6 +2941,53 @@ static int talitos_probe(struct platform_device *ofdev) TALITOS_FTR_SHA224_HWINIT | TALITOS_FTR_HMAC_OK; + if (of_device_is_compatible(np, "fsl,sec1.0")) + priv->features |= TALITOS_FTR_SEC1; + + if (of_device_is_compatible(np, "fsl,sec1.2")) { + priv->reg_deu = priv->reg + TALITOS12_DEU; + priv->reg_aesu = priv->reg + TALITOS12_AESU; + priv->reg_mdeu = priv->reg + TALITOS12_MDEU; + stride = TALITOS1_CH_STRIDE; + } else if (of_device_is_compatible(np, "fsl,sec1.0")) { + priv->reg_deu = priv->reg + TALITOS10_DEU; + priv->reg_aesu = priv->reg + TALITOS10_AESU; + priv->reg_mdeu = priv->reg + TALITOS10_MDEU; + priv->reg_afeu = priv->reg + TALITOS10_AFEU; + priv->reg_rngu = priv->reg + TALITOS10_RNGU; + priv->reg_pkeu = priv->reg + TALITOS10_PKEU; + stride = TALITOS1_CH_STRIDE; + } else { + priv->reg_deu = priv->reg + TALITOS2_DEU; + priv->reg_aesu = priv->reg + TALITOS2_AESU; + priv->reg_mdeu = priv->reg + TALITOS2_MDEU; + priv->reg_afeu = priv->reg + TALITOS2_AFEU; + priv->reg_rngu = priv->reg + TALITOS2_RNGU; + priv->reg_pkeu = priv->reg + TALITOS2_PKEU; + priv->reg_keu = priv->reg + TALITOS2_KEU; + priv->reg_crcu = priv->reg + TALITOS2_CRCU; + stride = TALITOS2_CH_STRIDE; + } + + err = talitos_probe_irq(ofdev); + if (err) + goto err_out; + + if (of_device_is_compatible(np, "fsl,sec1.0")) { + tasklet_init(&priv->done_task[0], talitos1_done_4ch, + (unsigned long)dev); + } else { + if (!priv->irq[1]) { + tasklet_init(&priv->done_task[0], talitos2_done_4ch, + (unsigned long)dev); + } else { + tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, + (unsigned long)dev); + tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, + (unsigned long)dev); + } + } + priv->chan = kzalloc(sizeof(struct talitos_channel) * priv->num_channels, GFP_KERNEL); if (!priv->chan) { @@ -2707,7 +2999,7 @@ static int talitos_probe(struct platform_device *ofdev) priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); for (i = 0; i < priv->num_channels; i++) { - priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1); + priv->chan[i].reg = priv->reg + stride * (i + 1); if (!priv->irq[1] || !(i & 1)) priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; @@ -2794,9 +3086,16 @@ err_out: } static const struct of_device_id talitos_match[] = { +#ifdef CONFIG_CRYPTO_DEV_TALITOS1 + { + .compatible = "fsl,sec1.0", + }, +#endif +#ifdef CONFIG_CRYPTO_DEV_TALITOS2 { .compatible = "fsl,sec2.0", }, +#endif {}, }; MODULE_DEVICE_TABLE(of, talitos_match); diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h index 61a14054aa39..314daf55e7f7 100644 --- a/drivers/crypto/talitos.h +++ b/drivers/crypto/talitos.h @@ -29,7 +29,8 @@ */ #define TALITOS_TIMEOUT 100000 -#define TALITOS_MAX_DATA_LEN 65535 +#define TALITOS1_MAX_DATA_LEN 32768 +#define TALITOS2_MAX_DATA_LEN 65535 #define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f) #define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf) @@ -37,9 +38,17 @@ /* descriptor pointer entry */ struct talitos_ptr { - __be16 len; /* length */ - u8 j_extent; /* jump to sg link table and/or extent */ - u8 eptr; /* extended address */ + union { + struct { /* SEC2 format */ + __be16 len; /* length */ + u8 j_extent; /* jump to sg link table and/or extent*/ + u8 eptr; /* extended address */ + }; + struct { /* SEC1 format */ + __be16 res; + __be16 len1; /* length */ + }; + }; __be32 ptr; /* address */ }; @@ -53,10 +62,16 @@ static const struct talitos_ptr zero_entry = { /* descriptor */ struct talitos_desc { __be32 hdr; /* header high bits */ - __be32 hdr_lo; /* header low bits */ + union { + __be32 hdr_lo; /* header low bits */ + __be32 hdr1; /* header for SEC1 */ + }; struct talitos_ptr ptr[7]; /* ptr/len pair array */ + __be32 next_desc; /* next descriptor (SEC1) */ }; +#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) + /** * talitos_request - descriptor submission request * @desc: descriptor pointer (kernel virtual) @@ -97,6 +112,14 @@ struct talitos_private { struct device *dev; struct platform_device *ofdev; void __iomem *reg; + void __iomem *reg_deu; + void __iomem *reg_aesu; + void __iomem *reg_mdeu; + void __iomem *reg_afeu; + void __iomem *reg_rngu; + void __iomem *reg_pkeu; + void __iomem *reg_keu; + void __iomem *reg_crcu; int irq[2]; /* SEC global registers lock */ @@ -144,49 +167,80 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 #define TALITOS_FTR_SHA224_HWINIT 0x00000004 #define TALITOS_FTR_HMAC_OK 0x00000008 +#define TALITOS_FTR_SEC1 0x00000010 + +/* + * If both CONFIG_CRYPTO_DEV_TALITOS1 and CONFIG_CRYPTO_DEV_TALITOS2 are + * defined, we check the features which are set according to the device tree. + * Otherwise, we answer true or false directly + */ +static inline bool has_ftr_sec1(struct talitos_private *priv) +{ +#if defined(CONFIG_CRYPTO_DEV_TALITOS1) && defined(CONFIG_CRYPTO_DEV_TALITOS2) + return priv->features & TALITOS_FTR_SEC1 ? true : false; +#elif defined(CONFIG_CRYPTO_DEV_TALITOS1) + return true; +#else + return false; +#endif +} /* * TALITOS_xxx_LO addresses point to the low data bits (32-63) of the register */ +#define ISR1_FORMAT(x) (((x) << 28) | ((x) << 16)) +#define ISR2_FORMAT(x) (((x) << 4) | (x)) + /* global register offset addresses */ #define TALITOS_MCR 0x1030 /* master control register */ #define TALITOS_MCR_RCA0 (1 << 15) /* remap channel 0 */ #define TALITOS_MCR_RCA1 (1 << 14) /* remap channel 1 */ #define TALITOS_MCR_RCA2 (1 << 13) /* remap channel 2 */ #define TALITOS_MCR_RCA3 (1 << 12) /* remap channel 3 */ -#define TALITOS_MCR_SWR 0x1 /* s/w reset */ +#define TALITOS1_MCR_SWR 0x1000000 /* s/w reset */ +#define TALITOS2_MCR_SWR 0x1 /* s/w reset */ #define TALITOS_MCR_LO 0x1034 #define TALITOS_IMR 0x1008 /* interrupt mask register */ -#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */ -#define TALITOS_IMR_DONE 0x00055 /* done IRQs */ +/* enable channel IRQs */ +#define TALITOS1_IMR_INIT ISR1_FORMAT(0xf) +#define TALITOS1_IMR_DONE ISR1_FORMAT(0x5) /* done IRQs */ +/* enable channel IRQs */ +#define TALITOS2_IMR_INIT (ISR2_FORMAT(0xf) | 0x10000) +#define TALITOS2_IMR_DONE ISR1_FORMAT(0x5) /* done IRQs */ #define TALITOS_IMR_LO 0x100C -#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ +#define TALITOS1_IMR_LO_INIT 0x2000000 /* allow RNGU error IRQs */ +#define TALITOS2_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */ #define TALITOS_ISR 0x1010 /* interrupt status register */ -#define TALITOS_ISR_4CHERR 0xaa /* 4 channel errors mask */ -#define TALITOS_ISR_4CHDONE 0x55 /* 4 channel done mask */ -#define TALITOS_ISR_CH_0_2_ERR 0x22 /* channels 0, 2 errors mask */ -#define TALITOS_ISR_CH_0_2_DONE 0x11 /* channels 0, 2 done mask */ -#define TALITOS_ISR_CH_1_3_ERR 0x88 /* channels 1, 3 errors mask */ -#define TALITOS_ISR_CH_1_3_DONE 0x44 /* channels 1, 3 done mask */ +#define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */ +#define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */ +#define TALITOS1_ISR_TEA_ERR 0x00000040 +#define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */ +#define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */ +#define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */ +#define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */ +#define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */ +#define TALITOS2_ISR_CH_1_3_DONE ISR2_FORMAT(0x4) /* ch 1, 3 done mask */ #define TALITOS_ISR_LO 0x1014 #define TALITOS_ICR 0x1018 /* interrupt clear register */ #define TALITOS_ICR_LO 0x101C /* channel register address stride */ #define TALITOS_CH_BASE_OFFSET 0x1000 /* default channel map base */ -#define TALITOS_CH_STRIDE 0x100 +#define TALITOS1_CH_STRIDE 0x1000 +#define TALITOS2_CH_STRIDE 0x100 /* channel configuration register */ #define TALITOS_CCCR 0x8 -#define TALITOS_CCCR_CONT 0x2 /* channel continue */ -#define TALITOS_CCCR_RESET 0x1 /* channel reset */ +#define TALITOS2_CCCR_CONT 0x2 /* channel continue on SEC2 */ +#define TALITOS2_CCCR_RESET 0x1 /* channel reset on SEC2 */ #define TALITOS_CCCR_LO 0xc #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ +#define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */ /* CCPSR: channel pointer status register */ #define TALITOS_CCPSR 0x10 @@ -224,37 +278,48 @@ extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, #define TALITOS_SCATTER 0xe0 #define TALITOS_SCATTER_LO 0xe4 +/* execution unit registers base */ +#define TALITOS2_DEU 0x2000 +#define TALITOS2_AESU 0x4000 +#define TALITOS2_MDEU 0x6000 +#define TALITOS2_AFEU 0x8000 +#define TALITOS2_RNGU 0xa000 +#define TALITOS2_PKEU 0xc000 +#define TALITOS2_KEU 0xe000 +#define TALITOS2_CRCU 0xf000 + +#define TALITOS12_AESU 0x4000 +#define TALITOS12_DEU 0x5000 +#define TALITOS12_MDEU 0x6000 + +#define TALITOS10_AFEU 0x8000 +#define TALITOS10_DEU 0xa000 +#define TALITOS10_MDEU 0xc000 +#define TALITOS10_RNGU 0xe000 +#define TALITOS10_PKEU 0x10000 +#define TALITOS10_AESU 0x12000 + /* execution unit interrupt status registers */ -#define TALITOS_DEUISR 0x2030 /* DES unit */ -#define TALITOS_DEUISR_LO 0x2034 -#define TALITOS_AESUISR 0x4030 /* AES unit */ -#define TALITOS_AESUISR_LO 0x4034 -#define TALITOS_MDEUISR 0x6030 /* message digest unit */ -#define TALITOS_MDEUISR_LO 0x6034 -#define TALITOS_MDEUICR 0x6038 /* interrupt control */ -#define TALITOS_MDEUICR_LO 0x603c +#define TALITOS_EUDSR 0x10 /* data size */ +#define TALITOS_EUDSR_LO 0x14 +#define TALITOS_EURCR 0x18 /* reset control*/ +#define TALITOS_EURCR_LO 0x1c +#define TALITOS_EUSR 0x28 /* rng status */ +#define TALITOS_EUSR_LO 0x2c +#define TALITOS_EUISR 0x30 +#define TALITOS_EUISR_LO 0x34 +#define TALITOS_EUICR 0x38 /* int. control */ +#define TALITOS_EUICR_LO 0x3c +#define TALITOS_EU_FIFO 0x800 /* output FIFO */ +#define TALITOS_EU_FIFO_LO 0x804 /* output FIFO */ +/* DES unit */ +#define TALITOS1_DEUICR_KPE 0x00200000 /* Key Parity Error */ +/* message digest unit */ #define TALITOS_MDEUICR_LO_ICE 0x4000 /* integrity check IRQ enable */ -#define TALITOS_AFEUISR 0x8030 /* arc4 unit */ -#define TALITOS_AFEUISR_LO 0x8034 -#define TALITOS_RNGUISR 0xa030 /* random number unit */ -#define TALITOS_RNGUISR_LO 0xa034 -#define TALITOS_RNGUSR 0xa028 /* rng status */ -#define TALITOS_RNGUSR_LO 0xa02c +/* random number unit */ #define TALITOS_RNGUSR_LO_RD 0x1 /* reset done */ #define TALITOS_RNGUSR_LO_OFL 0xff0000/* output FIFO length */ -#define TALITOS_RNGUDSR 0xa010 /* data size */ -#define TALITOS_RNGUDSR_LO 0xa014 -#define TALITOS_RNGU_FIFO 0xa800 /* output FIFO */ -#define TALITOS_RNGU_FIFO_LO 0xa804 /* output FIFO */ -#define TALITOS_RNGURCR 0xa018 /* reset control */ -#define TALITOS_RNGURCR_LO 0xa01c #define TALITOS_RNGURCR_LO_SR 0x1 /* software reset */ -#define TALITOS_PKEUISR 0xc030 /* public key unit */ -#define TALITOS_PKEUISR_LO 0xc034 -#define TALITOS_KEUISR 0xe030 /* kasumi unit */ -#define TALITOS_KEUISR_LO 0xe034 -#define TALITOS_CRCUISR 0xf030 /* cyclic redundancy check unit*/ -#define TALITOS_CRCUISR_LO 0xf034 #define TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 0x28 #define TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 0x48 diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig index b35e5c4b025a..30796441b0a6 100644 --- a/drivers/crypto/ux500/Kconfig +++ b/drivers/crypto/ux500/Kconfig @@ -7,6 +7,8 @@ config CRYPTO_DEV_UX500_CRYP tristate "UX500 crypto driver for CRYP block" depends on CRYPTO_DEV_UX500 + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER select CRYPTO_DES help This selects the crypto driver for the UX500_CRYP hardware. It supports @@ -16,7 +18,6 @@ config CRYPTO_DEV_UX500_HASH tristate "UX500 crypto driver for HASH block" depends on CRYPTO_DEV_UX500 select CRYPTO_HASH - select CRYPTO_HMAC help This selects the hash driver for the UX500_HASH hardware. Depends on UX500/STM DMA if running in DMA mode. @@ -24,7 +25,6 @@ config CRYPTO_DEV_UX500_HASH config CRYPTO_DEV_UX500_DEBUG bool "Activate ux500 platform debug-mode for crypto and hash block" depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH - default n help Say Y if you want to add debug prints to ux500_hash and ux500_cryp devices. diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index 771babf16aa0..89d8208d9851 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig @@ -1,6 +1,6 @@ config CRYPTO_DEV_VMX_ENCRYPT tristate "Encryption acceleration support on P8 CPU" - depends on PPC64 && CRYPTO_DEV_VMX + depends on CRYPTO_DEV_VMX default y help Support for VMX cryptographic acceleration instructions on Power8 CPU. diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile index c699c6e6c82e..d28ab96a2475 100644 --- a/drivers/crypto/vmx/Makefile +++ b/drivers/crypto/vmx/Makefile @@ -4,7 +4,7 @@ vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o gha ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) TARGET := linux-ppc64le else -TARGET := linux-pcc64 +TARGET := linux-ppc64 endif quiet_cmd_perl = PERL $@ diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index ab300ea19434..e79e567e43aa 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -30,110 +30,118 @@ #include "aesp8-ppc.h" struct p8_aes_ctx { - struct crypto_cipher *fallback; - struct aes_key enc_key; - struct aes_key dec_key; + struct crypto_cipher *fallback; + struct aes_key enc_key; + struct aes_key dec_key; }; static int p8_aes_init(struct crypto_tfm *tfm) { - const char *alg; - struct crypto_cipher *fallback; - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - - fallback = crypto_alloc_cipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); - - crypto_cipher_set_flags(fallback, - crypto_cipher_get_flags((struct crypto_cipher *) tfm)); - ctx->fallback = fallback; - - return 0; + const char *alg; + struct crypto_cipher *fallback; + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_cipher_set_flags(fallback, + crypto_cipher_get_flags((struct + crypto_cipher *) + tfm)); + ctx->fallback = fallback; + + return 0; } static void p8_aes_exit(struct crypto_tfm *tfm) { - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_cipher(ctx->fallback); - ctx->fallback = NULL; - } + if (ctx->fallback) { + crypto_free_cipher(ctx->fallback); + ctx->fallback = NULL; + } } static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - int ret; - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - pagefault_disable(); - enable_kernel_altivec(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); - ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); - pagefault_enable(); - - ret += crypto_cipher_setkey(ctx->fallback, key, keylen); - return ret; + int ret; + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); + preempt_enable(); + + ret += crypto_cipher_setkey(ctx->fallback, key, keylen); + return ret; } static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - if (in_interrupt()) { - crypto_cipher_encrypt_one(ctx->fallback, dst, src); - } else { - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_encrypt(src, dst, &ctx->enc_key); - pagefault_enable(); - } + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (in_interrupt()) { + crypto_cipher_encrypt_one(ctx->fallback, dst, src); + } else { + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_encrypt(src, dst, &ctx->enc_key); + pagefault_enable(); + preempt_enable(); + } } static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); - - if (in_interrupt()) { - crypto_cipher_decrypt_one(ctx->fallback, dst, src); - } else { - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_decrypt(src, dst, &ctx->dec_key); - pagefault_enable(); - } + struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); + + if (in_interrupt()) { + crypto_cipher_decrypt_one(ctx->fallback, dst, src); + } else { + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_decrypt(src, dst, &ctx->dec_key); + pagefault_enable(); + preempt_enable(); + } } struct crypto_alg p8_aes_alg = { - .cra_name = "aes", - .cra_driver_name = "p8_aes", - .cra_module = THIS_MODULE, - .cra_priority = 1000, - .cra_type = NULL, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK, - .cra_alignmask = 0, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct p8_aes_ctx), - .cra_init = p8_aes_init, - .cra_exit = p8_aes_exit, - .cra_cipher = { - .cia_min_keysize = AES_MIN_KEY_SIZE, - .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = p8_aes_setkey, - .cia_encrypt = p8_aes_encrypt, - .cia_decrypt = p8_aes_decrypt, - }, + .cra_name = "aes", + .cra_driver_name = "p8_aes", + .cra_module = THIS_MODULE, + .cra_priority = 1000, + .cra_type = NULL, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_aes_ctx), + .cra_init = p8_aes_init, + .cra_exit = p8_aes_exit, + .cra_cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = p8_aes_setkey, + .cia_encrypt = p8_aes_encrypt, + .cia_decrypt = p8_aes_decrypt, + }, }; - diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index 1a559b7dddb5..7299995c78ec 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -31,154 +31,168 @@ #include "aesp8-ppc.h" struct p8_aes_cbc_ctx { - struct crypto_blkcipher *fallback; - struct aes_key enc_key; - struct aes_key dec_key; + struct crypto_blkcipher *fallback; + struct aes_key enc_key; + struct aes_key dec_key; }; static int p8_aes_cbc_init(struct crypto_tfm *tfm) { - const char *alg; - struct crypto_blkcipher *fallback; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); - - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - - fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); - - crypto_blkcipher_set_flags(fallback, - crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm)); - ctx->fallback = fallback; - - return 0; + const char *alg; + struct crypto_blkcipher *fallback; + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = + crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_blkcipher_set_flags( + fallback, + crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + ctx->fallback = fallback; + + return 0; } static void p8_aes_cbc_exit(struct crypto_tfm *tfm) { - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_blkcipher(ctx->fallback); - ctx->fallback = NULL; - } + if (ctx->fallback) { + crypto_free_blkcipher(ctx->fallback); + ctx->fallback = NULL; + } } static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - int ret; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); - - pagefault_disable(); - enable_kernel_altivec(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); - ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); - pagefault_enable(); - - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); - return ret; + int ret; + struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); + preempt_enable(); + + ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + return ret; } static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - int ret; - struct blkcipher_walk walk; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( - crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; - - if (in_interrupt()) { - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); - } else { - pagefault_disable(); - enable_kernel_altivec(); - - blkcipher_walk_init(&walk, dst, src, nbytes); - ret = blkcipher_walk_virt(desc, &walk); - while ((nbytes = walk.nbytes)) { - aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, - nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); + int ret; + struct blkcipher_walk walk; + struct p8_aes_cbc_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, + nbytes); + } else { + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { + aes_p8_cbc_encrypt(walk.src.virt.addr, + walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, + &ctx->enc_key, walk.iv, 1); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); - } + } - pagefault_enable(); - } + pagefault_enable(); + preempt_enable(); + } - return ret; + return ret; } static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - int ret; - struct blkcipher_walk walk; - struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( - crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; - - if (in_interrupt()) { - ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes); - } else { - pagefault_disable(); - enable_kernel_altivec(); - - blkcipher_walk_init(&walk, dst, src, nbytes); - ret = blkcipher_walk_virt(desc, &walk); - while ((nbytes = walk.nbytes)) { - aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, - nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); + int ret; + struct blkcipher_walk walk; + struct p8_aes_cbc_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, + nbytes); + } else { + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { + aes_p8_cbc_encrypt(walk.src.virt.addr, + walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, + &ctx->dec_key, walk.iv, 0); nbytes &= AES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, &walk, nbytes); } - pagefault_enable(); - } + pagefault_enable(); + preempt_enable(); + } - return ret; + return ret; } struct crypto_alg p8_aes_cbc_alg = { - .cra_name = "cbc(aes)", - .cra_driver_name = "p8_aes_cbc", - .cra_module = THIS_MODULE, - .cra_priority = 1000, - .cra_type = &crypto_blkcipher_type, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, - .cra_alignmask = 0, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), - .cra_init = p8_aes_cbc_init, - .cra_exit = p8_aes_cbc_exit, - .cra_blkcipher = { - .ivsize = 0, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = p8_aes_cbc_setkey, - .encrypt = p8_aes_cbc_encrypt, - .decrypt = p8_aes_cbc_decrypt, - }, + .cra_name = "cbc(aes)", + .cra_driver_name = "p8_aes_cbc", + .cra_module = THIS_MODULE, + .cra_priority = 1000, + .cra_type = &crypto_blkcipher_type, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), + .cra_init = p8_aes_cbc_init, + .cra_exit = p8_aes_cbc_exit, + .cra_blkcipher = { + .ivsize = 0, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = p8_aes_cbc_setkey, + .encrypt = p8_aes_cbc_encrypt, + .decrypt = p8_aes_cbc_decrypt, + }, }; - diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 96dbee4bf4a6..7adae42a7b79 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -30,138 +30,147 @@ #include "aesp8-ppc.h" struct p8_aes_ctr_ctx { - struct crypto_blkcipher *fallback; - struct aes_key enc_key; + struct crypto_blkcipher *fallback; + struct aes_key enc_key; }; static int p8_aes_ctr_init(struct crypto_tfm *tfm) { - const char *alg; - struct crypto_blkcipher *fallback; - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - - fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); - - crypto_blkcipher_set_flags(fallback, - crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm)); - ctx->fallback = fallback; - - return 0; + const char *alg; + struct crypto_blkcipher *fallback; + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = + crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); + + crypto_blkcipher_set_flags( + fallback, + crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); + ctx->fallback = fallback; + + return 0; } static void p8_aes_ctr_exit(struct crypto_tfm *tfm) { - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_blkcipher(ctx->fallback); - ctx->fallback = NULL; - } + if (ctx->fallback) { + crypto_free_blkcipher(ctx->fallback); + ctx->fallback = NULL; + } } static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - int ret; - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - pagefault_disable(); - enable_kernel_altivec(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); - pagefault_enable(); + pagefault_disable(); + enable_kernel_altivec(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + pagefault_enable(); - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); - return ret; + ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); + return ret; } static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, - struct blkcipher_walk *walk) + struct blkcipher_walk *walk) { - u8 *ctrblk = walk->iv; - u8 keystream[AES_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - unsigned int nbytes = walk->nbytes; - - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); - pagefault_enable(); - - crypto_xor(keystream, src, nbytes); - memcpy(dst, keystream, nbytes); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + u8 *ctrblk = walk->iv; + u8 keystream[AES_BLOCK_SIZE]; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + unsigned int nbytes = walk->nbytes; + + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); + pagefault_enable(); + + crypto_xor(keystream, src, nbytes); + memcpy(dst, keystream, nbytes); + crypto_inc(ctrblk, AES_BLOCK_SIZE); } static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - int ret; - struct blkcipher_walk walk; - struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx( - crypto_blkcipher_tfm(desc->tfm)); - struct blkcipher_desc fallback_desc = { - .tfm = ctx->fallback, - .info = desc->info, - .flags = desc->flags - }; - - if (in_interrupt()) { - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes); - } else { - blkcipher_walk_init(&walk, dst, src, nbytes); - ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { - pagefault_disable(); - enable_kernel_altivec(); - aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, - (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); - pagefault_enable(); - - crypto_inc(walk.iv, AES_BLOCK_SIZE); - nbytes &= AES_BLOCK_SIZE - 1; - ret = blkcipher_walk_done(desc, &walk, nbytes); - } - if (walk.nbytes) { - p8_aes_ctr_final(ctx, &walk); - ret = blkcipher_walk_done(desc, &walk, 0); - } - } - - return ret; + int ret; + struct blkcipher_walk walk; + struct p8_aes_ctr_ctx *ctx = + crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); + struct blkcipher_desc fallback_desc = { + .tfm = ctx->fallback, + .info = desc->info, + .flags = desc->flags + }; + + if (in_interrupt()) { + ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, + nbytes); + } else { + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + pagefault_disable(); + enable_kernel_altivec(); + aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, + walk.dst.virt.addr, + (nbytes & + AES_BLOCK_MASK) / + AES_BLOCK_SIZE, + &ctx->enc_key, + walk.iv); + pagefault_enable(); + + crypto_inc(walk.iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } + if (walk.nbytes) { + p8_aes_ctr_final(ctx, &walk); + ret = blkcipher_walk_done(desc, &walk, 0); + } + } + + return ret; } struct crypto_alg p8_aes_ctr_alg = { - .cra_name = "ctr(aes)", - .cra_driver_name = "p8_aes_ctr", - .cra_module = THIS_MODULE, - .cra_priority = 1000, - .cra_type = &crypto_blkcipher_type, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, - .cra_alignmask = 0, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), - .cra_init = p8_aes_ctr_init, - .cra_exit = p8_aes_ctr_exit, - .cra_blkcipher = { - .ivsize = 0, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = p8_aes_ctr_setkey, - .encrypt = p8_aes_ctr_crypt, - .decrypt = p8_aes_ctr_crypt, - }, + .cra_name = "ctr(aes)", + .cra_driver_name = "p8_aes_ctr", + .cra_module = THIS_MODULE, + .cra_priority = 1000, + .cra_type = &crypto_blkcipher_type, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, + .cra_alignmask = 0, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), + .cra_init = p8_aes_ctr_init, + .cra_exit = p8_aes_ctr_exit, + .cra_blkcipher = { + .ivsize = 0, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = p8_aes_ctr_setkey, + .encrypt = p8_aes_ctr_crypt, + .decrypt = p8_aes_ctr_crypt, + }, }; diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h index e963945a83e1..4cd34ee54a94 100644 --- a/drivers/crypto/vmx/aesp8-ppc.h +++ b/drivers/crypto/vmx/aesp8-ppc.h @@ -4,17 +4,18 @@ #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1)) struct aes_key { - u8 key[AES_MAX_KEYLENGTH]; - int rounds; + u8 key[AES_MAX_KEYLENGTH]; + int rounds; }; int aes_p8_set_encrypt_key(const u8 *userKey, const int bits, - struct aes_key *key); + struct aes_key *key); int aes_p8_set_decrypt_key(const u8 *userKey, const int bits, - struct aes_key *key); + struct aes_key *key); void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key); -void aes_p8_decrypt(const u8 *in, u8 *out,const struct aes_key *key); +void aes_p8_decrypt(const u8 *in, u8 *out, const struct aes_key *key); void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len, - const struct aes_key *key, u8 *iv, const int enc); + const struct aes_key *key, u8 *iv, const int enc); void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out, - size_t len, const struct aes_key *key, const u8 *iv); + size_t len, const struct aes_key *key, + const u8 *iv); diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index d0ffe277af5c..b5e29002b666 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -39,176 +39,188 @@ void gcm_init_p8(u128 htable[16], const u64 Xi[2]); void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], - const u8 *in,size_t len); + const u8 *in, size_t len); struct p8_ghash_ctx { - u128 htable[16]; - struct crypto_shash *fallback; + u128 htable[16]; + struct crypto_shash *fallback; }; struct p8_ghash_desc_ctx { - u64 shash[2]; - u8 buffer[GHASH_DIGEST_SIZE]; - int bytes; - struct shash_desc fallback_desc; + u64 shash[2]; + u8 buffer[GHASH_DIGEST_SIZE]; + int bytes; + struct shash_desc fallback_desc; }; static int p8_ghash_init_tfm(struct crypto_tfm *tfm) { - const char *alg; - struct crypto_shash *fallback; - struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - - if (!(alg = crypto_tfm_alg_name(tfm))) { - printk(KERN_ERR "Failed to get algorithm name.\n"); - return -ENOENT; - } - - fallback = crypto_alloc_shash(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(fallback)) { - printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", - alg, PTR_ERR(fallback)); - return PTR_ERR(fallback); - } - printk(KERN_INFO "Using '%s' as fallback implementation.\n", - crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); - - crypto_shash_set_flags(fallback, - crypto_shash_get_flags((struct crypto_shash *) tfm)); - ctx->fallback = fallback; - - shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) - + crypto_shash_descsize(fallback); - - return 0; + const char *alg; + struct crypto_shash *fallback; + struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!(alg = crypto_tfm_alg_name(tfm))) { + printk(KERN_ERR "Failed to get algorithm name.\n"); + return -ENOENT; + } + + fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback)) { + printk(KERN_ERR + "Failed to allocate transformation for '%s': %ld\n", + alg, PTR_ERR(fallback)); + return PTR_ERR(fallback); + } + printk(KERN_INFO "Using '%s' as fallback implementation.\n", + crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback))); + + crypto_shash_set_flags(fallback, + crypto_shash_get_flags((struct crypto_shash + *) tfm)); + ctx->fallback = fallback; + + shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx) + + crypto_shash_descsize(fallback); + + return 0; } static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) { - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_shash(ctx->fallback); - ctx->fallback = NULL; - } + if (ctx->fallback) { + crypto_free_shash(ctx->fallback); + ctx->fallback = NULL; + } } static int p8_ghash_init(struct shash_desc *desc) { - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); - struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - dctx->bytes = 0; - memset(dctx->shash, 0, GHASH_DIGEST_SIZE); - dctx->fallback_desc.tfm = ctx->fallback; - dctx->fallback_desc.flags = desc->flags; - return crypto_shash_init(&dctx->fallback_desc); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + dctx->bytes = 0; + memset(dctx->shash, 0, GHASH_DIGEST_SIZE); + dctx->fallback_desc.tfm = ctx->fallback; + dctx->fallback_desc.flags = desc->flags; + return crypto_shash_init(&dctx->fallback_desc); } static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) + unsigned int keylen) { - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); - - if (keylen != GHASH_KEY_LEN) - return -EINVAL; - - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_init_p8(ctx->htable, (const u64 *) key); - pagefault_enable(); - return crypto_shash_setkey(ctx->fallback, key, keylen); + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); + + if (keylen != GHASH_KEY_LEN) + return -EINVAL; + + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_init_p8(ctx->htable, (const u64 *) key); + pagefault_enable(); + preempt_enable(); + return crypto_shash_setkey(ctx->fallback, key, keylen); } static int p8_ghash_update(struct shash_desc *desc, - const u8 *src, unsigned int srclen) + const u8 *src, unsigned int srclen) { - unsigned int len; - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); - struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - if (IN_INTERRUPT) { - return crypto_shash_update(&dctx->fallback_desc, src, srclen); - } else { - if (dctx->bytes) { - if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { - memcpy(dctx->buffer + dctx->bytes, src, srclen); - dctx->bytes += srclen; - return 0; - } - memcpy(dctx->buffer + dctx->bytes, src, - GHASH_DIGEST_SIZE - dctx->bytes); - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, - GHASH_DIGEST_SIZE); - pagefault_enable(); - src += GHASH_DIGEST_SIZE - dctx->bytes; - srclen -= GHASH_DIGEST_SIZE - dctx->bytes; - dctx->bytes = 0; - } - len = srclen & ~(GHASH_DIGEST_SIZE - 1); - if (len) { - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_ghash_p8(dctx->shash, ctx->htable, src, len); - pagefault_enable(); - src += len; - srclen -= len; - } - if (srclen) { - memcpy(dctx->buffer, src, srclen); - dctx->bytes = srclen; - } - return 0; - } + unsigned int len; + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + if (IN_INTERRUPT) { + return crypto_shash_update(&dctx->fallback_desc, src, + srclen); + } else { + if (dctx->bytes) { + if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { + memcpy(dctx->buffer + dctx->bytes, src, + srclen); + dctx->bytes += srclen; + return 0; + } + memcpy(dctx->buffer + dctx->bytes, src, + GHASH_DIGEST_SIZE - dctx->bytes); + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, + dctx->buffer, GHASH_DIGEST_SIZE); + pagefault_enable(); + preempt_enable(); + src += GHASH_DIGEST_SIZE - dctx->bytes; + srclen -= GHASH_DIGEST_SIZE - dctx->bytes; + dctx->bytes = 0; + } + len = srclen & ~(GHASH_DIGEST_SIZE - 1); + if (len) { + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, src, len); + pagefault_enable(); + preempt_enable(); + src += len; + srclen -= len; + } + if (srclen) { + memcpy(dctx->buffer, src, srclen); + dctx->bytes = srclen; + } + return 0; + } } static int p8_ghash_final(struct shash_desc *desc, u8 *out) { - int i; - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); - struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); - - if (IN_INTERRUPT) { - return crypto_shash_final(&dctx->fallback_desc, out); - } else { - if (dctx->bytes) { - for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) - dctx->buffer[i] = 0; - pagefault_disable(); - enable_kernel_altivec(); - enable_kernel_fp(); - gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, - GHASH_DIGEST_SIZE); - pagefault_enable(); - dctx->bytes = 0; - } - memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); - return 0; - } + int i; + struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); + struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); + + if (IN_INTERRUPT) { + return crypto_shash_final(&dctx->fallback_desc, out); + } else { + if (dctx->bytes) { + for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) + dctx->buffer[i] = 0; + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, + dctx->buffer, GHASH_DIGEST_SIZE); + pagefault_enable(); + preempt_enable(); + dctx->bytes = 0; + } + memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); + return 0; + } } struct shash_alg p8_ghash_alg = { - .digestsize = GHASH_DIGEST_SIZE, - .init = p8_ghash_init, - .update = p8_ghash_update, - .final = p8_ghash_final, - .setkey = p8_ghash_setkey, - .descsize = sizeof(struct p8_ghash_desc_ctx), - .base = { - .cra_name = "ghash", - .cra_driver_name = "p8_ghash", - .cra_priority = 1000, - .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = GHASH_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct p8_ghash_ctx), - .cra_module = THIS_MODULE, - .cra_init = p8_ghash_init_tfm, - .cra_exit = p8_ghash_exit_tfm, - }, + .digestsize = GHASH_DIGEST_SIZE, + .init = p8_ghash_init, + .update = p8_ghash_update, + .final = p8_ghash_final, + .setkey = p8_ghash_setkey, + .descsize = sizeof(struct p8_ghash_desc_ctx), + .base = { + .cra_name = "ghash", + .cra_driver_name = "p8_ghash", + .cra_priority = 1000, + .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = GHASH_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct p8_ghash_ctx), + .cra_module = THIS_MODULE, + .cra_init = p8_ghash_init_tfm, + .cra_exit = p8_ghash_exit_tfm, + }, }; diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index 44d8d5cfe40d..e163d5770438 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c @@ -32,57 +32,57 @@ extern struct crypto_alg p8_aes_alg; extern struct crypto_alg p8_aes_cbc_alg; extern struct crypto_alg p8_aes_ctr_alg; static struct crypto_alg *algs[] = { - &p8_aes_alg, - &p8_aes_cbc_alg, - &p8_aes_ctr_alg, - NULL, + &p8_aes_alg, + &p8_aes_cbc_alg, + &p8_aes_ctr_alg, + NULL, }; int __init p8_init(void) { - int ret = 0; - struct crypto_alg **alg_it; + int ret = 0; + struct crypto_alg **alg_it; - if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) - return -ENODEV; + if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) + return -ENODEV; - for (alg_it = algs; *alg_it; alg_it++) { - ret = crypto_register_alg(*alg_it); - printk(KERN_INFO "crypto_register_alg '%s' = %d\n", - (*alg_it)->cra_name, ret); - if (ret) { - for (alg_it--; alg_it >= algs; alg_it--) - crypto_unregister_alg(*alg_it); - break; - } - } - if (ret) - return ret; + for (alg_it = algs; *alg_it; alg_it++) { + ret = crypto_register_alg(*alg_it); + printk(KERN_INFO "crypto_register_alg '%s' = %d\n", + (*alg_it)->cra_name, ret); + if (ret) { + for (alg_it--; alg_it >= algs; alg_it--) + crypto_unregister_alg(*alg_it); + break; + } + } + if (ret) + return ret; - ret = crypto_register_shash(&p8_ghash_alg); - if (ret) { - for (alg_it = algs; *alg_it; alg_it++) - crypto_unregister_alg(*alg_it); - } - return ret; + ret = crypto_register_shash(&p8_ghash_alg); + if (ret) { + for (alg_it = algs; *alg_it; alg_it++) + crypto_unregister_alg(*alg_it); + } + return ret; } void __exit p8_exit(void) { - struct crypto_alg **alg_it; + struct crypto_alg **alg_it; - for (alg_it = algs; *alg_it; alg_it++) { - printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); - crypto_unregister_alg(*alg_it); - } - crypto_unregister_shash(&p8_ghash_alg); + for (alg_it = algs; *alg_it; alg_it++) { + printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name); + crypto_unregister_alg(*alg_it); + } + crypto_unregister_shash(&p8_ghash_alg); } module_init(p8_init); module_exit(p8_exit); MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>"); -MODULE_DESCRIPTION("IBM VMX cryptogaphic acceleration instructions support on Power 8"); +MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions " + "support on Power 8"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); - diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 6de2e677be04..74d9db05a5ad 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/io.h> #include <linux/seq_file.h> +#include <linux/vmalloc.h> #include "mic_x100_dma.h" diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index c22606fe3d44..6bac03999fd4 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1611,7 +1611,6 @@ static struct scsi_host_template scsi_driver_template = { .this_id = -1, .sg_tablesize = SG_ALL, .use_clustering = ENABLE_CLUSTERING, - .cmd_per_lun = 1, .can_queue = 1, .sdev_attrs = sbp2_scsi_sysfs_attrs, }; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 8de4da5c9ab6..54071c148340 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -18,6 +18,11 @@ config EFI_VARS Subsequent efibootmgr releases may be found at: <http://github.com/vathpela/efibootmgr> +config EFI_ESRT + bool + depends on EFI && !IA64 + default y + config EFI_VARS_PSTORE tristate "Register efivars backend for pstore" depends on EFI_VARS && PSTORE diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index d8be608a9f3b..6fd3da938717 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -3,6 +3,7 @@ # obj-$(CONFIG_EFI) += efi.o vars.o reboot.o obj-$(CONFIG_EFI_VARS) += efivars.o +obj-$(CONFIG_EFI_ESRT) += esrt.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 3061bb8629dc..ca617f40574a 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -39,6 +39,7 @@ struct efi __read_mostly efi = { .fw_vendor = EFI_INVALID_TABLE_ADDR, .runtime = EFI_INVALID_TABLE_ADDR, .config_table = EFI_INVALID_TABLE_ADDR, + .esrt = EFI_INVALID_TABLE_ADDR, }; EXPORT_SYMBOL(efi); @@ -64,7 +65,7 @@ static int __init parse_efi_cmdline(char *str) } early_param("efi", parse_efi_cmdline); -static struct kobject *efi_kobj; +struct kobject *efi_kobj; static struct kobject *efivars_kobj; /* @@ -85,10 +86,15 @@ static ssize_t systab_show(struct kobject *kobj, str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); if (efi.acpi != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); - if (efi.smbios != EFI_INVALID_TABLE_ADDR) - str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); + /* + * If both SMBIOS and SMBIOS3 entry points are implemented, the + * SMBIOS3 entry point shall be preferred, so we list it first to + * let applications stop parsing after the first match. + */ if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3); + if (efi.smbios != EFI_INVALID_TABLE_ADDR) + str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); if (efi.hcdp != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp); if (efi.boot_info != EFI_INVALID_TABLE_ADDR) @@ -232,6 +238,84 @@ err_put: subsys_initcall(efisubsys_init); +/* + * Find the efi memory descriptor for a given physical address. Given a + * physicall address, determine if it exists within an EFI Memory Map entry, + * and if so, populate the supplied memory descriptor with the appropriate + * data. + */ +int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) +{ + struct efi_memory_map *map = efi.memmap; + void *p, *e; + + if (!efi_enabled(EFI_MEMMAP)) { + pr_err_once("EFI_MEMMAP is not enabled.\n"); + return -EINVAL; + } + + if (!map) { + pr_err_once("efi.memmap is not set.\n"); + return -EINVAL; + } + if (!out_md) { + pr_err_once("out_md is null.\n"); + return -EINVAL; + } + if (WARN_ON_ONCE(!map->phys_map)) + return -EINVAL; + if (WARN_ON_ONCE(map->nr_map == 0) || WARN_ON_ONCE(map->desc_size == 0)) + return -EINVAL; + + e = map->phys_map + map->nr_map * map->desc_size; + for (p = map->phys_map; p < e; p += map->desc_size) { + efi_memory_desc_t *md; + u64 size; + u64 end; + + /* + * If a driver calls this after efi_free_boot_services, + * ->map will be NULL, and the target may also not be mapped. + * So just always get our own virtual map on the CPU. + * + */ + md = early_memremap((phys_addr_t)p, sizeof (*md)); + if (!md) { + pr_err_once("early_memremap(%p, %zu) failed.\n", + p, sizeof (*md)); + return -ENOMEM; + } + + if (!(md->attribute & EFI_MEMORY_RUNTIME) && + md->type != EFI_BOOT_SERVICES_DATA && + md->type != EFI_RUNTIME_SERVICES_DATA) { + early_memunmap(md, sizeof (*md)); + continue; + } + + size = md->num_pages << EFI_PAGE_SHIFT; + end = md->phys_addr + size; + if (phys_addr >= md->phys_addr && phys_addr < end) { + memcpy(out_md, md, sizeof(*out_md)); + early_memunmap(md, sizeof (*md)); + return 0; + } + + early_memunmap(md, sizeof (*md)); + } + pr_err_once("requested map not found.\n"); + return -ENOENT; +} + +/* + * Calculate the highest address of an efi memory descriptor. + */ +u64 __init efi_mem_desc_end(efi_memory_desc_t *md) +{ + u64 size = md->num_pages << EFI_PAGE_SHIFT; + u64 end = md->phys_addr + size; + return end; +} /* * We can't ioremap data in EFI boot services RAM, because we've already mapped @@ -274,6 +358,7 @@ static __initdata efi_config_table_type_t common_tables[] = { {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios}, {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3}, {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga}, + {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt}, {NULL_GUID, NULL, NULL}, }; diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 7b2e0496e0c0..756eca8c4cf8 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -535,7 +535,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, * efivar_create_sysfs_entry - create a new entry in sysfs * @new_var: efivar entry to create * - * Returns 1 on failure, 0 on success + * Returns 0 on success, negative error code on failure */ static int efivar_create_sysfs_entry(struct efivar_entry *new_var) @@ -544,6 +544,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) char *short_name; unsigned long variable_name_size; efi_char16_t *variable_name; + int ret; variable_name = new_var->var.VariableName; variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); @@ -558,7 +559,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) short_name = kzalloc(short_name_size, GFP_KERNEL); if (!short_name) - return 1; + return -ENOMEM; /* Convert Unicode to normal chars (assume top bits are 0), ala UTF-8 */ @@ -574,11 +575,11 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) new_var->kobj.kset = efivars_kset; - i = kobject_init_and_add(&new_var->kobj, &efivar_ktype, + ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, NULL, "%s", short_name); kfree(short_name); - if (i) - return 1; + if (ret) + return ret; kobject_uevent(&new_var->kobj, KOBJ_ADD); efivar_entry_add(new_var, &efivar_sysfs_list); diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c new file mode 100644 index 000000000000..a5b95d61ae71 --- /dev/null +++ b/drivers/firmware/efi/esrt.c @@ -0,0 +1,471 @@ +/* + * esrt.c + * + * This module exports EFI System Resource Table (ESRT) entries into userspace + * through the sysfs file system. The ESRT provides a read-only catalog of + * system components for which the system accepts firmware upgrades via UEFI's + * "Capsule Update" feature. This module allows userland utilities to evaluate + * what firmware updates can be applied to this system, and potentially arrange + * for those updates to occur. + * + * Data is currently found below /sys/firmware/efi/esrt/... + */ +#define pr_fmt(fmt) "esrt: " fmt + +#include <linux/capability.h> +#include <linux/device.h> +#include <linux/efi.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/list.h> +#include <linux/memblock.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include <asm/io.h> +#include <asm/early_ioremap.h> + +struct efi_system_resource_entry_v1 { + efi_guid_t fw_class; + u32 fw_type; + u32 fw_version; + u32 lowest_supported_fw_version; + u32 capsule_flags; + u32 last_attempt_version; + u32 last_attempt_status; +}; + +/* + * _count and _version are what they seem like. _max is actually just + * accounting info for the firmware when creating the table; it should never + * have been exposed to us. To wit, the spec says: + * The maximum number of resource array entries that can be within the + * table without reallocating the table, must not be zero. + * Since there's no guidance about what that means in terms of memory layout, + * it means nothing to us. + */ +struct efi_system_resource_table { + u32 fw_resource_count; + u32 fw_resource_count_max; + u64 fw_resource_version; + u8 entries[]; +}; + +static phys_addr_t esrt_data; +static size_t esrt_data_size; + +static struct efi_system_resource_table *esrt; + +struct esre_entry { + union { + struct efi_system_resource_entry_v1 *esre1; + } esre; + + struct kobject kobj; + struct list_head list; +}; + +/* global list of esre_entry. */ +static LIST_HEAD(entry_list); + +/* entry attribute */ +struct esre_attribute { + struct attribute attr; + ssize_t (*show)(struct esre_entry *entry, char *buf); + ssize_t (*store)(struct esre_entry *entry, + const char *buf, size_t count); +}; + +static struct esre_entry *to_entry(struct kobject *kobj) +{ + return container_of(kobj, struct esre_entry, kobj); +} + +static struct esre_attribute *to_attr(struct attribute *attr) +{ + return container_of(attr, struct esre_attribute, attr); +} + +static ssize_t esre_attr_show(struct kobject *kobj, + struct attribute *_attr, char *buf) +{ + struct esre_entry *entry = to_entry(kobj); + struct esre_attribute *attr = to_attr(_attr); + + /* Don't tell normal users what firmware versions we've got... */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + return attr->show(entry, buf); +} + +static const struct sysfs_ops esre_attr_ops = { + .show = esre_attr_show, +}; + +/* Generic ESRT Entry ("ESRE") support. */ +static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf) +{ + char *str = buf; + + efi_guid_to_str(&entry->esre.esre1->fw_class, str); + str += strlen(str); + str += sprintf(str, "\n"); + + return str - buf; +} + +static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400, + esre_fw_class_show, NULL); + +#define esre_attr_decl(name, size, fmt) \ +static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \ +{ \ + return sprintf(buf, fmt "\n", \ + le##size##_to_cpu(entry->esre.esre1->name)); \ +} \ +\ +static struct esre_attribute esre_##name = __ATTR(name, 0400, \ + esre_##name##_show, NULL) + +esre_attr_decl(fw_type, 32, "%u"); +esre_attr_decl(fw_version, 32, "%u"); +esre_attr_decl(lowest_supported_fw_version, 32, "%u"); +esre_attr_decl(capsule_flags, 32, "0x%x"); +esre_attr_decl(last_attempt_version, 32, "%u"); +esre_attr_decl(last_attempt_status, 32, "%u"); + +static struct attribute *esre1_attrs[] = { + &esre_fw_class.attr, + &esre_fw_type.attr, + &esre_fw_version.attr, + &esre_lowest_supported_fw_version.attr, + &esre_capsule_flags.attr, + &esre_last_attempt_version.attr, + &esre_last_attempt_status.attr, + NULL +}; +static void esre_release(struct kobject *kobj) +{ + struct esre_entry *entry = to_entry(kobj); + + list_del(&entry->list); + kfree(entry); +} + +static struct kobj_type esre1_ktype = { + .release = esre_release, + .sysfs_ops = &esre_attr_ops, + .default_attrs = esre1_attrs, +}; + + +static struct kobject *esrt_kobj; +static struct kset *esrt_kset; + +static int esre_create_sysfs_entry(void *esre, int entry_num) +{ + struct esre_entry *entry; + char name[20]; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + sprintf(name, "entry%d", entry_num); + + entry->kobj.kset = esrt_kset; + + if (esrt->fw_resource_version == 1) { + int rc = 0; + + entry->esre.esre1 = esre; + rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, + "%s", name); + if (rc) { + kfree(entry); + return rc; + } + } + + list_add_tail(&entry->list, &entry_list); + return 0; +} + +/* support for displaying ESRT fields at the top level */ +#define esrt_attr_decl(name, size, fmt) \ +static ssize_t esrt_##name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf)\ +{ \ + return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \ +} \ +\ +static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \ + esrt_##name##_show, NULL) + +esrt_attr_decl(fw_resource_count, 32, "%u"); +esrt_attr_decl(fw_resource_count_max, 32, "%u"); +esrt_attr_decl(fw_resource_version, 64, "%llu"); + +static struct attribute *esrt_attrs[] = { + &esrt_fw_resource_count.attr, + &esrt_fw_resource_count_max.attr, + &esrt_fw_resource_version.attr, + NULL, +}; + +static inline int esrt_table_exists(void) +{ + if (!efi_enabled(EFI_CONFIG_TABLES)) + return 0; + if (efi.esrt == EFI_INVALID_TABLE_ADDR) + return 0; + return 1; +} + +static umode_t esrt_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + if (!esrt_table_exists()) + return 0; + return attr->mode; +} + +static struct attribute_group esrt_attr_group = { + .attrs = esrt_attrs, + .is_visible = esrt_attr_is_visible, +}; + +/* + * remap the table, copy it to kmalloced pages, and unmap it. + */ +void __init efi_esrt_init(void) +{ + void *va; + struct efi_system_resource_table tmpesrt; + struct efi_system_resource_entry_v1 *v1_entries; + size_t size, max, entry_size, entries_size; + efi_memory_desc_t md; + int rc; + phys_addr_t end; + + pr_debug("esrt-init: loading.\n"); + if (!esrt_table_exists()) + return; + + rc = efi_mem_desc_lookup(efi.esrt, &md); + if (rc < 0) { + pr_err("ESRT header is not in the memory map.\n"); + return; + } + + max = efi_mem_desc_end(&md); + if (max < efi.esrt) { + pr_err("EFI memory descriptor is invalid. (esrt: %p max: %p)\n", + (void *)efi.esrt, (void *)max); + return; + } + + size = sizeof(*esrt); + max -= efi.esrt; + + if (max < size) { + pr_err("ESRT header doen't fit on single memory map entry. (size: %zu max: %zu)\n", + size, max); + return; + } + + va = early_memremap(efi.esrt, size); + if (!va) { + pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt, + size); + return; + } + + memcpy(&tmpesrt, va, sizeof(tmpesrt)); + + if (tmpesrt.fw_resource_version == 1) { + entry_size = sizeof (*v1_entries); + } else { + pr_err("Unsupported ESRT version %lld.\n", + tmpesrt.fw_resource_version); + return; + } + + if (tmpesrt.fw_resource_count > 0 && max - size < entry_size) { + pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n", + max - size, entry_size); + goto err_memunmap; + } + + /* + * The format doesn't really give us any boundary to test here, + * so I'm making up 128 as the max number of individually updatable + * components we support. + * 128 should be pretty excessive, but there's still some chance + * somebody will do that someday and we'll need to raise this. + */ + if (tmpesrt.fw_resource_count > 128) { + pr_err("ESRT says fw_resource_count has very large value %d.\n", + tmpesrt.fw_resource_count); + goto err_memunmap; + } + + /* + * We know it can't be larger than N * sizeof() here, and N is limited + * by the previous test to a small number, so there's no overflow. + */ + entries_size = tmpesrt.fw_resource_count * entry_size; + if (max < size + entries_size) { + pr_err("ESRT does not fit on single memory map entry (size: %zu max: %zu)\n", + size, max); + goto err_memunmap; + } + + /* remap it with our (plausible) new pages */ + early_memunmap(va, size); + size += entries_size; + va = early_memremap(efi.esrt, size); + if (!va) { + pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt, + size); + return; + } + + esrt_data = (phys_addr_t)efi.esrt; + esrt_data_size = size; + + end = esrt_data + size; + pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end); + memblock_reserve(esrt_data, esrt_data_size); + + pr_debug("esrt-init: loaded.\n"); +err_memunmap: + early_memunmap(va, size); +} + +static int __init register_entries(void) +{ + struct efi_system_resource_entry_v1 *v1_entries = (void *)esrt->entries; + int i, rc; + + if (!esrt_table_exists()) + return 0; + + for (i = 0; i < le32_to_cpu(esrt->fw_resource_count); i++) { + void *esre = NULL; + if (esrt->fw_resource_version == 1) { + esre = &v1_entries[i]; + } else { + pr_err("Unsupported ESRT version %lld.\n", + esrt->fw_resource_version); + return -EINVAL; + } + + rc = esre_create_sysfs_entry(esre, i); + if (rc < 0) { + pr_err("ESRT entry creation failed with error %d.\n", + rc); + return rc; + } + } + return 0; +} + +static void cleanup_entry_list(void) +{ + struct esre_entry *entry, *next; + + list_for_each_entry_safe(entry, next, &entry_list, list) { + kobject_put(&entry->kobj); + } +} + +static int __init esrt_sysfs_init(void) +{ + int error; + struct efi_system_resource_table __iomem *ioesrt; + + pr_debug("esrt-sysfs: loading.\n"); + if (!esrt_data || !esrt_data_size) + return -ENOSYS; + + ioesrt = ioremap(esrt_data, esrt_data_size); + if (!ioesrt) { + pr_err("ioremap(%pa, %zu) failed.\n", &esrt_data, + esrt_data_size); + return -ENOMEM; + } + + esrt = kmalloc(esrt_data_size, GFP_KERNEL); + if (!esrt) { + pr_err("kmalloc failed. (wanted %zu bytes)\n", esrt_data_size); + iounmap(ioesrt); + return -ENOMEM; + } + + memcpy_fromio(esrt, ioesrt, esrt_data_size); + + esrt_kobj = kobject_create_and_add("esrt", efi_kobj); + if (!esrt_kobj) { + pr_err("Firmware table registration failed.\n"); + error = -ENOMEM; + goto err; + } + + error = sysfs_create_group(esrt_kobj, &esrt_attr_group); + if (error) { + pr_err("Sysfs attribute export failed with error %d.\n", + error); + goto err_remove_esrt; + } + + esrt_kset = kset_create_and_add("entries", NULL, esrt_kobj); + if (!esrt_kset) { + pr_err("kset creation failed.\n"); + error = -ENOMEM; + goto err_remove_group; + } + + error = register_entries(); + if (error) + goto err_cleanup_list; + + memblock_remove(esrt_data, esrt_data_size); + + pr_debug("esrt-sysfs: loaded.\n"); + + return 0; +err_cleanup_list: + cleanup_entry_list(); + kset_unregister(esrt_kset); +err_remove_group: + sysfs_remove_group(esrt_kobj, &esrt_attr_group); +err_remove_esrt: + kobject_put(esrt_kobj); +err: + kfree(esrt); + esrt = NULL; + return error; +} + +static void __exit esrt_sysfs_exit(void) +{ + pr_debug("esrt-sysfs: unloading.\n"); + cleanup_entry_list(); + kset_unregister(esrt_kset); + sysfs_remove_group(esrt_kobj, &esrt_attr_group); + kfree(esrt); + esrt = NULL; + kobject_del(esrt_kobj); + kobject_put(esrt_kobj); +} + +module_init(esrt_sysfs_init); +module_exit(esrt_sysfs_exit); + +MODULE_AUTHOR("Peter Jones <pjones@redhat.com>"); +MODULE_DESCRIPTION("EFI System Resource Table support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index caefe806db5e..8f1fe739c985 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -126,6 +126,14 @@ config GPIO_BCM_KONA help Turn on GPIO support for Broadcom "Kona" chips. +config GPIO_BRCMSTB + tristate "BRCMSTB GPIO support" + default y if ARCH_BRCMSTB + depends on OF_GPIO && (ARCH_BRCMSTB || COMPILE_TEST) + select GPIO_GENERIC + help + Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs. + config GPIO_CLPS711X tristate "CLPS711X GPIO support" depends on ARCH_CLPS711X || COMPILE_TEST @@ -159,6 +167,14 @@ config GPIO_EP93XX depends on ARCH_EP93XX select GPIO_GENERIC +config GPIO_ETRAXFS + bool "Axis ETRAX FS General I/O" + depends on CRIS || COMPILE_TEST + depends on OF + select GPIO_GENERIC + help + Say yes here to support the GPIO controller on Axis ETRAX FS SoCs. + config GPIO_F7188X tristate "F71869, F71869A, F71882FG and F71889F GPIO support" depends on X86 @@ -230,6 +246,14 @@ config GPIO_LOONGSON help driver for GPIO functionality on Loongson-2F/3A/3B processors. +config GPIO_LPC18XX + bool "NXP LPC18XX/43XX GPIO support" + default y if ARCH_LPC18XX + depends on OF_GPIO && (ARCH_LPC18XX || COMPILE_TEST) + help + Select this option to enable GPIO driver for + NXP LPC18XX/43XX devices. + config GPIO_LYNXPOINT tristate "Intel Lynxpoint GPIO support" depends on ACPI && X86 @@ -308,7 +332,7 @@ config GPIO_OCTEON family of SOCs. config GPIO_OMAP - bool "TI OMAP GPIO support" if COMPILE_TEST && !ARCH_OMAP2PLUS + tristate "TI OMAP GPIO support" if ARCH_OMAP2PLUS || COMPILE_TEST default y if ARCH_OMAP depends on ARM select GENERIC_IRQ_CHIP @@ -488,6 +512,17 @@ config GPIO_XILINX help Say yes here to support the Xilinx FPGA GPIO device +config GPIO_XLP + tristate "Netlogic XLP GPIO support" + depends on CPU_XLP + select GPIOLIB_IRQCHIP + help + This driver provides support for GPIO interface on Netlogic XLP MIPS64 + SoCs. Currently supported XLP variants are XLP8XX, XLP3XX, XLP2XX, + XLP9XX and XLP5XX. + + If unsure, say N. + config GPIO_XTENSA bool "Xtensa GPIO32 support" depends on XTENSA @@ -505,7 +540,7 @@ config GPIO_ZEVIO config GPIO_ZYNQ tristate "Xilinx Zynq GPIO support" - depends on ARCH_ZYNQ + depends on ARCH_ZYNQ || ARCH_ZYNQMP select GPIOLIB_IRQCHIP help Say yes here to support Xilinx Zynq GPIO controller. diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index f71bb971329c..f82cd678ce08 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o +obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o @@ -32,6 +33,7 @@ obj-$(CONFIG_GPIO_DLN2) += gpio-dln2.o obj-$(CONFIG_GPIO_DWAPB) += gpio-dwapb.o obj-$(CONFIG_GPIO_EM) += gpio-em.o obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o +obj-$(CONFIG_GPIO_ETRAXFS) += gpio-etraxfs.o obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o @@ -44,6 +46,7 @@ obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o obj-$(CONFIG_GPIO_INTEL_MID) += gpio-intel-mid.o obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o +obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o @@ -109,6 +112,7 @@ obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o obj-$(CONFIG_GPIO_XGENE) += gpio-xgene.o obj-$(CONFIG_GPIO_XGENE_SB) += gpio-xgene-sb.o obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o +obj-$(CONFIG_GPIO_XLP) += gpio-xlp.o obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 449fb46cb8a0..0f3d336d6303 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -107,7 +107,8 @@ static int altera_gpio_irq_set_type(struct irq_data *d, return -EINVAL; } -static unsigned int altera_gpio_irq_startup(struct irq_data *d) { +static unsigned int altera_gpio_irq_startup(struct irq_data *d) +{ altera_gpio_irq_unmask(d); return 0; diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index b164ce837b43..a6e79225886d 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -122,6 +122,16 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio, spin_unlock_irqrestore(&kona_gpio->lock, flags); } +static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio) +{ + struct bcm_kona_gpio *kona_gpio = to_kona_gpio(chip); + void __iomem *reg_base = kona_gpio->reg_base; + u32 val; + + val = readl(reg_base + GPIO_CONTROL(gpio)) & GPIO_GPCTR0_IOTR_MASK; + return val ? GPIOF_DIR_IN : GPIOF_DIR_OUT; +} + static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value) { struct bcm_kona_gpio *kona_gpio; @@ -135,12 +145,8 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value) reg_base = kona_gpio->reg_base; spin_lock_irqsave(&kona_gpio->lock, flags); - /* determine the GPIO pin direction */ - val = readl(reg_base + GPIO_CONTROL(gpio)); - val &= GPIO_GPCTR0_IOTR_MASK; - /* this function only applies to output pin */ - if (GPIO_GPCTR0_IOTR_CMD_INPUT == val) + if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN) goto out; reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id); @@ -166,13 +172,12 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio) reg_base = kona_gpio->reg_base; spin_lock_irqsave(&kona_gpio->lock, flags); - /* determine the GPIO pin direction */ - val = readl(reg_base + GPIO_CONTROL(gpio)); - val &= GPIO_GPCTR0_IOTR_MASK; + if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN) + reg_offset = GPIO_IN_STATUS(bank_id); + else + reg_offset = GPIO_OUT_STATUS(bank_id); /* read the GPIO bank status */ - reg_offset = (GPIO_GPCTR0_IOTR_CMD_INPUT == val) ? - GPIO_IN_STATUS(bank_id) : GPIO_OUT_STATUS(bank_id); val = readl(reg_base + reg_offset); spin_unlock_irqrestore(&kona_gpio->lock, flags); @@ -310,6 +315,7 @@ static struct gpio_chip template_chip = { .owner = THIS_MODULE, .request = bcm_kona_gpio_request, .free = bcm_kona_gpio_free, + .get_direction = bcm_kona_gpio_get_dir, .direction_input = bcm_kona_gpio_direction_input, .get = bcm_kona_gpio_get, .direction_output = bcm_kona_gpio_direction_output, diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c new file mode 100644 index 000000000000..7a3cb1fa0a76 --- /dev/null +++ b/drivers/gpio/gpio-brcmstb.c @@ -0,0 +1,252 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/bitops.h> +#include <linux/gpio/driver.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/module.h> +#include <linux/basic_mmio_gpio.h> + +#define GIO_BANK_SIZE 0x20 +#define GIO_ODEN(bank) (((bank) * GIO_BANK_SIZE) + 0x00) +#define GIO_DATA(bank) (((bank) * GIO_BANK_SIZE) + 0x04) +#define GIO_IODIR(bank) (((bank) * GIO_BANK_SIZE) + 0x08) +#define GIO_EC(bank) (((bank) * GIO_BANK_SIZE) + 0x0c) +#define GIO_EI(bank) (((bank) * GIO_BANK_SIZE) + 0x10) +#define GIO_MASK(bank) (((bank) * GIO_BANK_SIZE) + 0x14) +#define GIO_LEVEL(bank) (((bank) * GIO_BANK_SIZE) + 0x18) +#define GIO_STAT(bank) (((bank) * GIO_BANK_SIZE) + 0x1c) + +struct brcmstb_gpio_bank { + struct list_head node; + int id; + struct bgpio_chip bgc; + struct brcmstb_gpio_priv *parent_priv; + u32 width; +}; + +struct brcmstb_gpio_priv { + struct list_head bank_list; + void __iomem *reg_base; + int num_banks; + struct platform_device *pdev; + int gpio_base; +}; + +#define MAX_GPIO_PER_BANK 32 +#define GPIO_BANK(gpio) ((gpio) >> 5) +/* assumes MAX_GPIO_PER_BANK is a multiple of 2 */ +#define GPIO_BIT(gpio) ((gpio) & (MAX_GPIO_PER_BANK - 1)) + +static inline struct brcmstb_gpio_bank * +brcmstb_gpio_gc_to_bank(struct gpio_chip *gc) +{ + struct bgpio_chip *bgc = to_bgpio_chip(gc); + return container_of(bgc, struct brcmstb_gpio_bank, bgc); +} + +static inline struct brcmstb_gpio_priv * +brcmstb_gpio_gc_to_priv(struct gpio_chip *gc) +{ + struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc); + return bank->parent_priv; +} + +/* Make sure that the number of banks matches up between properties */ +static int brcmstb_gpio_sanity_check_banks(struct device *dev, + struct device_node *np, struct resource *res) +{ + int res_num_banks = resource_size(res) / GIO_BANK_SIZE; + int num_banks = + of_property_count_u32_elems(np, "brcm,gpio-bank-widths"); + + if (res_num_banks != num_banks) { + dev_err(dev, "Mismatch in banks: res had %d, bank-widths had %d\n", + res_num_banks, num_banks); + return -EINVAL; + } else { + return 0; + } +} + +static int brcmstb_gpio_remove(struct platform_device *pdev) +{ + struct brcmstb_gpio_priv *priv = platform_get_drvdata(pdev); + struct list_head *pos; + struct brcmstb_gpio_bank *bank; + int ret = 0; + + list_for_each(pos, &priv->bank_list) { + bank = list_entry(pos, struct brcmstb_gpio_bank, node); + ret = bgpio_remove(&bank->bgc); + if (ret) + dev_err(&pdev->dev, "gpiochip_remove fail in cleanup"); + } + return ret; +} + +static int brcmstb_gpio_of_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, u32 *flags) +{ + struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc); + struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc); + int offset; + + if (gc->of_gpio_n_cells != 2) { + WARN_ON(1); + return -EINVAL; + } + + if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells)) + return -EINVAL; + + offset = gpiospec->args[0] - (gc->base - priv->gpio_base); + if (offset >= gc->ngpio) + return -EINVAL; + + if (unlikely(offset >= bank->width)) { + dev_warn_ratelimited(&priv->pdev->dev, + "Received request for invalid GPIO offset %d\n", + gpiospec->args[0]); + } + + if (flags) + *flags = gpiospec->args[1]; + + return offset; +} + +static int brcmstb_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + void __iomem *reg_base; + struct brcmstb_gpio_priv *priv; + struct resource *res; + struct property *prop; + const __be32 *p; + u32 bank_width; + int err; + static int gpio_base; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(reg_base)) + return PTR_ERR(reg_base); + + priv->gpio_base = gpio_base; + priv->reg_base = reg_base; + priv->pdev = pdev; + + INIT_LIST_HEAD(&priv->bank_list); + if (brcmstb_gpio_sanity_check_banks(dev, np, res)) + return -EINVAL; + + of_property_for_each_u32(np, "brcm,gpio-bank-widths", prop, p, + bank_width) { + struct brcmstb_gpio_bank *bank; + struct bgpio_chip *bgc; + struct gpio_chip *gc; + + bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); + if (!bank) { + err = -ENOMEM; + goto fail; + } + + bank->parent_priv = priv; + bank->id = priv->num_banks; + if (bank_width <= 0 || bank_width > MAX_GPIO_PER_BANK) { + dev_err(dev, "Invalid bank width %d\n", bank_width); + goto fail; + } else { + bank->width = bank_width; + } + + /* + * Regs are 4 bytes wide, have data reg, no set/clear regs, + * and direction bits have 0 = output and 1 = input + */ + bgc = &bank->bgc; + err = bgpio_init(bgc, dev, 4, + reg_base + GIO_DATA(bank->id), + NULL, NULL, NULL, + reg_base + GIO_IODIR(bank->id), 0); + if (err) { + dev_err(dev, "bgpio_init() failed\n"); + goto fail; + } + + gc = &bgc->gc; + gc->of_node = np; + gc->owner = THIS_MODULE; + gc->label = np->full_name; + gc->base = gpio_base; + gc->of_gpio_n_cells = 2; + gc->of_xlate = brcmstb_gpio_of_xlate; + /* not all ngpio lines are valid, will use bank width later */ + gc->ngpio = MAX_GPIO_PER_BANK; + + err = gpiochip_add(gc); + if (err) { + dev_err(dev, "Could not add gpiochip for bank %d\n", + bank->id); + goto fail; + } + gpio_base += gc->ngpio; + dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id, + gc->base, gc->ngpio, bank->width); + + /* Everything looks good, so add bank to list */ + list_add(&bank->node, &priv->bank_list); + + priv->num_banks++; + } + + dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n", + priv->num_banks, priv->gpio_base, gpio_base - 1); + + platform_set_drvdata(pdev, priv); + + return 0; + +fail: + (void) brcmstb_gpio_remove(pdev); + return err; +} + +static const struct of_device_id brcmstb_gpio_of_match[] = { + { .compatible = "brcm,brcmstb-gpio" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, brcmstb_gpio_of_match); + +static struct platform_driver brcmstb_gpio_driver = { + .driver = { + .name = "brcmstb-gpio", + .of_match_table = brcmstb_gpio_of_match, + }, + .probe = brcmstb_gpio_probe, + .remove = brcmstb_gpio_remove, +}; +module_platform_driver(brcmstb_gpio_driver); + +MODULE_AUTHOR("Gregory Fong"); +MODULE_DESCRIPTION("Driver for Broadcom BRCMSTB SoC UPG GPIO"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index 91a7ffe83135..fddd204dc9b6 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c @@ -16,6 +16,7 @@ */ #include <linux/interrupt.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/seq_file.h> @@ -94,9 +95,8 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type) { int reg; - if (gpio == 94) { + if (gpio == 94) return GPIOPANELCTL; - } if (reg_type == CTRL_IN) { if (gpio < 8) @@ -255,6 +255,7 @@ static struct irq_chip crystalcove_irqchip = { .irq_set_type = crystalcove_irq_type, .irq_bus_lock = crystalcove_bus_lock, .irq_bus_sync_unlock = crystalcove_bus_sync_unlock, + .flags = IRQCHIP_SKIP_SET_WAKE, }; static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c index dbdb4de82c6d..6685712c15cf 100644 --- a/drivers/gpio/gpio-dln2.c +++ b/drivers/gpio/gpio-dln2.c @@ -466,7 +466,6 @@ static int dln2_gpio_probe(struct platform_device *pdev) dln2->gpio.owner = THIS_MODULE; dln2->gpio.base = -1; dln2->gpio.ngpio = pins; - dln2->gpio.exported = true; dln2->gpio.can_sleep = true; dln2->gpio.irq_not_threaded = true; dln2->gpio.set = dln2_gpio_set; diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c new file mode 100644 index 000000000000..28071f4a5672 --- /dev/null +++ b/drivers/gpio/gpio-etraxfs.c @@ -0,0 +1,176 @@ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/gpio.h> +#include <linux/of_gpio.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/basic_mmio_gpio.h> + +#define ETRAX_FS_rw_pa_dout 0 +#define ETRAX_FS_r_pa_din 4 +#define ETRAX_FS_rw_pa_oe 8 +#define ETRAX_FS_rw_intr_cfg 12 +#define ETRAX_FS_rw_intr_mask 16 +#define ETRAX_FS_rw_ack_intr 20 +#define ETRAX_FS_r_intr 24 +#define ETRAX_FS_rw_pb_dout 32 +#define ETRAX_FS_r_pb_din 36 +#define ETRAX_FS_rw_pb_oe 40 +#define ETRAX_FS_rw_pc_dout 48 +#define ETRAX_FS_r_pc_din 52 +#define ETRAX_FS_rw_pc_oe 56 +#define ETRAX_FS_rw_pd_dout 64 +#define ETRAX_FS_r_pd_din 68 +#define ETRAX_FS_rw_pd_oe 72 +#define ETRAX_FS_rw_pe_dout 80 +#define ETRAX_FS_r_pe_din 84 +#define ETRAX_FS_rw_pe_oe 88 + +struct etraxfs_gpio_port { + const char *label; + unsigned int oe; + unsigned int dout; + unsigned int din; + unsigned int ngpio; +}; + +struct etraxfs_gpio_info { + unsigned int num_ports; + const struct etraxfs_gpio_port *ports; +}; + +static const struct etraxfs_gpio_port etraxfs_gpio_etraxfs_ports[] = { + { + .label = "A", + .ngpio = 8, + .oe = ETRAX_FS_rw_pa_oe, + .dout = ETRAX_FS_rw_pa_dout, + .din = ETRAX_FS_r_pa_din, + }, + { + .label = "B", + .ngpio = 18, + .oe = ETRAX_FS_rw_pb_oe, + .dout = ETRAX_FS_rw_pb_dout, + .din = ETRAX_FS_r_pb_din, + }, + { + .label = "C", + .ngpio = 18, + .oe = ETRAX_FS_rw_pc_oe, + .dout = ETRAX_FS_rw_pc_dout, + .din = ETRAX_FS_r_pc_din, + }, + { + .label = "D", + .ngpio = 18, + .oe = ETRAX_FS_rw_pd_oe, + .dout = ETRAX_FS_rw_pd_dout, + .din = ETRAX_FS_r_pd_din, + }, + { + .label = "E", + .ngpio = 18, + .oe = ETRAX_FS_rw_pe_oe, + .dout = ETRAX_FS_rw_pe_dout, + .din = ETRAX_FS_r_pe_din, + }, +}; + +static const struct etraxfs_gpio_info etraxfs_gpio_etraxfs = { + .num_ports = ARRAY_SIZE(etraxfs_gpio_etraxfs_ports), + .ports = etraxfs_gpio_etraxfs_ports, +}; + +static int etraxfs_gpio_of_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + /* + * Port numbers are A to E, and the properties are integers, so we + * specify them as 0xA - 0xE. + */ + if (gc->label[0] - 'A' + 0xA != gpiospec->args[2]) + return -EINVAL; + + return of_gpio_simple_xlate(gc, gpiospec, flags); +} + +static const struct of_device_id etraxfs_gpio_of_table[] = { + { + .compatible = "axis,etraxfs-gio", + .data = &etraxfs_gpio_etraxfs, + }, + {}, +}; + +static int etraxfs_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct etraxfs_gpio_info *info; + const struct of_device_id *match; + struct bgpio_chip *chips; + struct resource *res; + void __iomem *regs; + int ret; + int i; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(dev, res); + if (!regs) + return -ENOMEM; + + match = of_match_node(etraxfs_gpio_of_table, dev->of_node); + if (!match) + return -EINVAL; + + info = match->data; + + chips = devm_kzalloc(dev, sizeof(*chips) * info->num_ports, GFP_KERNEL); + if (!chips) + return -ENOMEM; + + for (i = 0; i < info->num_ports; i++) { + struct bgpio_chip *bgc = &chips[i]; + const struct etraxfs_gpio_port *port = &info->ports[i]; + + ret = bgpio_init(bgc, dev, 4, + regs + port->din, /* dat */ + regs + port->dout, /* set */ + NULL, /* clr */ + regs + port->oe, /* dirout */ + NULL, /* dirin */ + BGPIOF_UNREADABLE_REG_SET); + if (ret) + return ret; + + bgc->gc.ngpio = port->ngpio; + bgc->gc.label = port->label; + + bgc->gc.of_node = dev->of_node; + bgc->gc.of_gpio_n_cells = 3; + bgc->gc.of_xlate = etraxfs_gpio_of_xlate; + + ret = gpiochip_add(&bgc->gc); + if (ret) + dev_err(dev, "Unable to register port %s\n", + bgc->gc.label); + } + + return 0; +} + +static struct platform_driver etraxfs_gpio_driver = { + .driver = { + .name = "etraxfs-gpio", + .of_match_table = of_match_ptr(etraxfs_gpio_of_table), + }, + .probe = etraxfs_gpio_probe, +}; + +static int __init etraxfs_gpio_init(void) +{ + return platform_driver_register(&etraxfs_gpio_driver); +} + +device_initcall(etraxfs_gpio_init); diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c index dbda8433c4f7..5e3c4fa67d82 100644 --- a/drivers/gpio/gpio-f7188x.c +++ b/drivers/gpio/gpio-f7188x.c @@ -172,7 +172,7 @@ static struct f7188x_gpio_bank f71869a_gpio_bank[] = { }; static struct f7188x_gpio_bank f71882_gpio_bank[] = { - F7188X_GPIO_BANK(0 , 8, 0xF0), + F7188X_GPIO_BANK(0, 8, 0xF0), F7188X_GPIO_BANK(10, 8, 0xE0), F7188X_GPIO_BANK(20, 8, 0xD0), F7188X_GPIO_BANK(30, 4, 0xC0), @@ -180,7 +180,7 @@ static struct f7188x_gpio_bank f71882_gpio_bank[] = { }; static struct f7188x_gpio_bank f71889_gpio_bank[] = { - F7188X_GPIO_BANK(0 , 7, 0xF0), + F7188X_GPIO_BANK(0, 7, 0xF0), F7188X_GPIO_BANK(10, 7, 0xE0), F7188X_GPIO_BANK(20, 8, 0xD0), F7188X_GPIO_BANK(30, 8, 0xC0), diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index b92a690f5765..9bda3727fac1 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -135,6 +135,17 @@ static unsigned long bgpio_pin2mask_be(struct bgpio_chip *bgc, return 1 << (bgc->bits - 1 - pin); } +static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio) +{ + struct bgpio_chip *bgc = to_bgpio_chip(gc); + unsigned long pinmask = bgc->pin2mask(bgc, gpio); + + if (bgc->dir & pinmask) + return bgc->read_reg(bgc->reg_set) & pinmask; + else + return bgc->read_reg(bgc->reg_dat) & pinmask; +} + static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) { struct bgpio_chip *bgc = to_bgpio_chip(gc); @@ -416,7 +427,8 @@ static int bgpio_setup_accessors(struct device *dev, static int bgpio_setup_io(struct bgpio_chip *bgc, void __iomem *dat, void __iomem *set, - void __iomem *clr) + void __iomem *clr, + unsigned long flags) { bgc->reg_dat = dat; @@ -437,7 +449,11 @@ static int bgpio_setup_io(struct bgpio_chip *bgc, bgc->gc.set_multiple = bgpio_set_multiple; } - bgc->gc.get = bgpio_get; + if (!(flags & BGPIOF_UNREADABLE_REG_SET) && + (flags & BGPIOF_READ_OUTPUT_REG_SET)) + bgc->gc.get = bgpio_get_set; + else + bgc->gc.get = bgpio_get; return 0; } @@ -500,7 +516,7 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev, bgc->gc.ngpio = bgc->bits; bgc->gc.request = bgpio_request; - ret = bgpio_setup_io(bgc, dat, set, clr); + ret = bgpio_setup_io(bgc, dat, set, clr, flags); if (ret) return ret; diff --git a/drivers/gpio/gpio-it8761e.c b/drivers/gpio/gpio-it8761e.c index dadfc245cf09..30a8f24c92c5 100644 --- a/drivers/gpio/gpio-it8761e.c +++ b/drivers/gpio/gpio-it8761e.c @@ -123,7 +123,7 @@ static void it8761e_gpio_set(struct gpio_chip *gc, curr_vals = inb(reg); if (val) - outb(curr_vals | (1 << bit) , reg); + outb(curr_vals | (1 << bit), reg); else outb(curr_vals & ~(1 << bit), reg); diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c new file mode 100644 index 000000000000..eb68603136b0 --- /dev/null +++ b/drivers/gpio/gpio-lpc18xx.c @@ -0,0 +1,180 @@ +/* + * GPIO driver for NXP LPC18xx/43xx. + * + * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/clk.h> +#include <linux/gpio/driver.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> + +/* LPC18xx GPIO register offsets */ +#define LPC18XX_REG_DIR(n) (0x2000 + n * sizeof(u32)) + +#define LPC18XX_MAX_PORTS 8 +#define LPC18XX_PINS_PER_PORT 32 + +struct lpc18xx_gpio_chip { + struct gpio_chip gpio; + void __iomem *base; + struct clk *clk; + spinlock_t lock; +}; + +static inline struct lpc18xx_gpio_chip *to_lpc18xx_gpio(struct gpio_chip *chip) +{ + return container_of(chip, struct lpc18xx_gpio_chip, gpio); +} + +static int lpc18xx_gpio_request(struct gpio_chip *chip, unsigned offset) +{ + return pinctrl_request_gpio(offset); +} + +static void lpc18xx_gpio_free(struct gpio_chip *chip, unsigned offset) +{ + pinctrl_free_gpio(offset); +} + +static void lpc18xx_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +{ + struct lpc18xx_gpio_chip *gc = to_lpc18xx_gpio(chip); + writeb(value ? 1 : 0, gc->base + offset); +} + +static int lpc18xx_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + struct lpc18xx_gpio_chip *gc = to_lpc18xx_gpio(chip); + return !!readb(gc->base + offset); +} + +static int lpc18xx_gpio_direction(struct gpio_chip *chip, unsigned offset, + bool out) +{ + struct lpc18xx_gpio_chip *gc = to_lpc18xx_gpio(chip); + unsigned long flags; + u32 port, pin, dir; + + port = offset / LPC18XX_PINS_PER_PORT; + pin = offset % LPC18XX_PINS_PER_PORT; + + spin_lock_irqsave(&gc->lock, flags); + dir = readl(gc->base + LPC18XX_REG_DIR(port)); + if (out) + dir |= BIT(pin); + else + dir &= ~BIT(pin); + writel(dir, gc->base + LPC18XX_REG_DIR(port)); + spin_unlock_irqrestore(&gc->lock, flags); + + return 0; +} + +static int lpc18xx_gpio_direction_input(struct gpio_chip *chip, + unsigned offset) +{ + return lpc18xx_gpio_direction(chip, offset, false); +} + +static int lpc18xx_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + lpc18xx_gpio_set(chip, offset, value); + return lpc18xx_gpio_direction(chip, offset, true); +} + +static struct gpio_chip lpc18xx_chip = { + .label = "lpc18xx/43xx-gpio", + .request = lpc18xx_gpio_request, + .free = lpc18xx_gpio_free, + .direction_input = lpc18xx_gpio_direction_input, + .direction_output = lpc18xx_gpio_direction_output, + .set = lpc18xx_gpio_set, + .get = lpc18xx_gpio_get, + .ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT, + .owner = THIS_MODULE, +}; + +static int lpc18xx_gpio_probe(struct platform_device *pdev) +{ + struct lpc18xx_gpio_chip *gc; + struct resource *res; + int ret; + + gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL); + if (!gc) + return -ENOMEM; + + gc->gpio = lpc18xx_chip; + platform_set_drvdata(pdev, gc); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gc->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gc->base)) + return PTR_ERR(gc->base); + + gc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(gc->clk)) { + dev_err(&pdev->dev, "input clock not found\n"); + return PTR_ERR(gc->clk); + } + + ret = clk_prepare_enable(gc->clk); + if (ret) { + dev_err(&pdev->dev, "unable to enable clock\n"); + return ret; + } + + spin_lock_init(&gc->lock); + + gc->gpio.dev = &pdev->dev; + + ret = gpiochip_add(&gc->gpio); + if (ret) { + dev_err(&pdev->dev, "failed to add gpio chip\n"); + clk_disable_unprepare(gc->clk); + return ret; + } + + return 0; +} + +static int lpc18xx_gpio_remove(struct platform_device *pdev) +{ + struct lpc18xx_gpio_chip *gc = platform_get_drvdata(pdev); + + gpiochip_remove(&gc->gpio); + clk_disable_unprepare(gc->clk); + + return 0; +} + +static const struct of_device_id lpc18xx_gpio_match[] = { + { .compatible = "nxp,lpc1850-gpio" }, + { } +}; +MODULE_DEVICE_TABLE(of, lpc18xx_gpio_match); + +static struct platform_driver lpc18xx_gpio_driver = { + .probe = lpc18xx_gpio_probe, + .remove = lpc18xx_gpio_remove, + .driver = { + .name = "lpc18xx-gpio", + .of_match_table = lpc18xx_gpio_match, + }, +}; +module_platform_driver(lpc18xx_gpio_driver); + +MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>"); +MODULE_DESCRIPTION("GPIO driver for LPC18xx/43xx"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index 127c755b38dc..153af464c7a7 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c @@ -72,7 +72,7 @@ struct lp_gpio { * * per gpio specific registers consist of two 32bit registers per gpio * (LP_CONFIG1 and LP_CONFIG2), with 94 gpios there's a total of - * 188 config registes. + * 188 config registers. * * A simplified view of the register layout look like this: * diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c index 0fa4543c5e02..aed4ca9338bc 100644 --- a/drivers/gpio/gpio-max732x.c +++ b/drivers/gpio/gpio-max732x.c @@ -429,6 +429,14 @@ static int max732x_irq_set_type(struct irq_data *d, unsigned int type) return 0; } +static int max732x_irq_set_wake(struct irq_data *data, unsigned int on) +{ + struct max732x_chip *chip = irq_data_get_irq_chip_data(data); + + irq_set_irq_wake(chip->client->irq, on); + return 0; +} + static struct irq_chip max732x_irq_chip = { .name = "max732x", .irq_mask = max732x_irq_mask, @@ -436,6 +444,7 @@ static struct irq_chip max732x_irq_chip = { .irq_bus_lock = max732x_irq_bus_lock, .irq_bus_sync_unlock = max732x_irq_bus_sync_unlock, .irq_set_type = max732x_irq_set_type, + .irq_set_wake = max732x_irq_set_wake, }; static uint8_t max732x_irq_pending(struct max732x_chip *chip) @@ -507,12 +516,10 @@ static int max732x_irq_setup(struct max732x_chip *chip, chip->irq_features = has_irq; mutex_init(&chip->irq_lock); - ret = devm_request_threaded_irq(&client->dev, - client->irq, - NULL, - max732x_irq_handler, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - dev_name(&client->dev), chip); + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, max732x_irq_handler, IRQF_ONESHOT | + IRQF_TRIGGER_FALLING | IRQF_SHARED, + dev_name(&client->dev), chip); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); @@ -521,7 +528,7 @@ static int max732x_irq_setup(struct max732x_chip *chip, ret = gpiochip_irqchip_add(&chip->gpio_chip, &max732x_irq_chip, irq_base, - handle_edge_irq, + handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(&client->dev, diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c index c3ab46e595da..abd8676ce2b6 100644 --- a/drivers/gpio/gpio-moxart.c +++ b/drivers/gpio/gpio-moxart.c @@ -39,17 +39,6 @@ static void moxart_gpio_free(struct gpio_chip *chip, unsigned offset) pinctrl_free_gpio(offset); } -static int moxart_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct bgpio_chip *bgc = to_bgpio_chip(chip); - u32 ret = bgc->read_reg(bgc->reg_dir); - - if (ret & BIT(offset)) - return !!(bgc->read_reg(bgc->reg_set) & BIT(offset)); - else - return !!(bgc->read_reg(bgc->reg_dat) & BIT(offset)); -} - static int moxart_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -68,8 +57,9 @@ static int moxart_gpio_probe(struct platform_device *pdev) return PTR_ERR(base); ret = bgpio_init(bgc, dev, 4, base + GPIO_DATA_IN, - base + GPIO_DATA_OUT, NULL, - base + GPIO_PIN_DIRECTION, NULL, 0); + base + GPIO_DATA_OUT, NULL, + base + GPIO_PIN_DIRECTION, NULL, + BGPIOF_READ_OUTPUT_REG_SET); if (ret) { dev_err(&pdev->dev, "bgpio_init failed\n"); return ret; @@ -78,7 +68,6 @@ static int moxart_gpio_probe(struct platform_device *pdev) bgc->gc.label = "moxart-gpio"; bgc->gc.request = moxart_gpio_request; bgc->gc.free = moxart_gpio_free; - bgc->gc.get = moxart_gpio_get; bgc->data = bgc->read_reg(bgc->reg_set); bgc->gc.base = 0; bgc->gc.ngpio = 32; diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 9f7446a7ac64..ec1eb1b7250f 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -131,7 +131,7 @@ static struct mxc_gpio_hwdata *mxc_gpio_hwdata; #define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge) #define GPIO_INT_BOTH_EDGES 0x4 -static struct platform_device_id mxc_gpio_devtype[] = { +static const struct platform_device_id mxc_gpio_devtype[] = { { .name = "imx1-gpio", .driver_data = IMX1_GPIO, @@ -437,20 +437,20 @@ static int mxc_gpio_probe(struct platform_device *pdev) irq_set_chained_handler(port->irq, mx2_gpio_irq_handler); } else { /* setup one handler for each entry */ - irq_set_chained_handler(port->irq, mx3_gpio_irq_handler); - irq_set_handler_data(port->irq, port); - if (port->irq_high > 0) { + irq_set_chained_handler_and_data(port->irq, + mx3_gpio_irq_handler, port); + if (port->irq_high > 0) /* setup handler for GPIO 16 to 31 */ - irq_set_chained_handler(port->irq_high, - mx3_gpio_irq_handler); - irq_set_handler_data(port->irq_high, port); - } + irq_set_chained_handler_and_data(port->irq_high, + mx3_gpio_irq_handler, + port); } err = bgpio_init(&port->bgc, &pdev->dev, 4, port->base + GPIO_PSR, port->base + GPIO_DR, NULL, - port->base + GPIO_GDIR, NULL, 0); + port->base + GPIO_GDIR, NULL, + BGPIOF_READ_OUTPUT_REG_SET); if (err) goto out_bgio; diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index 84cbda6acdda..551d15d7c369 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -239,7 +239,7 @@ static int mxs_gpio_get_direction(struct gpio_chip *gc, unsigned offset) return !(dir & mask); } -static struct platform_device_id mxs_gpio_ids[] = { +static const struct platform_device_id mxs_gpio_ids[] = { { .name = "imx23-gpio", .driver_data = IMX23_GPIO, @@ -320,8 +320,8 @@ static int mxs_gpio_probe(struct platform_device *pdev) mxs_gpio_init_gc(port, irq_base); /* setup one handler for each entry */ - irq_set_chained_handler(port->irq, mxs_gpio_irq_handler); - irq_set_handler_data(port->irq, port); + irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler, + port); err = bgpio_init(&port->bgc, &pdev->dev, 4, port->base + PINCTRL_DIN(port), diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index b232397ad7ec..b0c57d505be7 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -488,9 +488,6 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) unsigned long flags; unsigned offset = d->hwirq; - if (!BANK_USED(bank)) - pm_runtime_get_sync(bank->dev); - if (type & ~IRQ_TYPE_SENSE_MASK) return -EINVAL; @@ -498,12 +495,18 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) return -EINVAL; + if (!BANK_USED(bank)) + pm_runtime_get_sync(bank->dev); + spin_lock_irqsave(&bank->lock, flags); retval = omap_set_gpio_triggering(bank, offset, type); + if (retval) + goto error; omap_gpio_init_irq(bank, offset); if (!omap_gpio_is_input(bank, offset)) { spin_unlock_irqrestore(&bank->lock, flags); - return -EINVAL; + retval = -EINVAL; + goto error; } spin_unlock_irqrestore(&bank->lock, flags); @@ -512,6 +515,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) __irq_set_handler_locked(d->irq, handle_edge_irq); + return 0; + +error: + if (!BANK_USED(bank)) + pm_runtime_put(bank->dev); return retval; } @@ -638,15 +646,6 @@ static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset, return 0; } -static void omap_reset_gpio(struct gpio_bank *bank, unsigned offset) -{ - omap_set_gpio_direction(bank, offset, 1); - omap_set_gpio_irqenable(bank, offset, 0); - omap_clear_gpio_irqstatus(bank, offset); - omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); - omap_clear_gpio_debounce(bank, offset); -} - /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) { @@ -669,14 +668,7 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) pm_runtime_get_sync(bank->dev); spin_lock_irqsave(&bank->lock, flags); - /* Set trigger to none. You need to enable the desired trigger with - * request_irq() or set_irq_type(). Only do this if the IRQ line has - * not already been requested. - */ - if (!LINE_USED(bank->irq_usage, offset)) { - omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); - omap_enable_gpio_module(bank, offset); - } + omap_enable_gpio_module(bank, offset); bank->mod_usage |= BIT(offset); spin_unlock_irqrestore(&bank->lock, flags); @@ -690,8 +682,11 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) spin_lock_irqsave(&bank->lock, flags); bank->mod_usage &= ~(BIT(offset)); + if (!LINE_USED(bank->irq_usage, offset)) { + omap_set_gpio_direction(bank, offset, 1); + omap_clear_gpio_debounce(bank, offset); + } omap_disable_gpio_module(bank, offset); - omap_reset_gpio(bank, offset); spin_unlock_irqrestore(&bank->lock, flags); /* @@ -795,11 +790,23 @@ static unsigned int omap_gpio_irq_startup(struct irq_data *d) pm_runtime_get_sync(bank->dev); spin_lock_irqsave(&bank->lock, flags); - omap_gpio_init_irq(bank, offset); + + if (!LINE_USED(bank->mod_usage, offset)) + omap_set_gpio_direction(bank, offset, 1); + else if (!omap_gpio_is_input(bank, offset)) + goto err; + omap_enable_gpio_module(bank, offset); + bank->irq_usage |= BIT(offset); + spin_unlock_irqrestore(&bank->lock, flags); omap_gpio_unmask_irq(d); return 0; +err: + spin_unlock_irqrestore(&bank->lock, flags); + if (!BANK_USED(bank)) + pm_runtime_put(bank->dev); + return -EINVAL; } static void omap_gpio_irq_shutdown(struct irq_data *d) @@ -810,8 +817,12 @@ static void omap_gpio_irq_shutdown(struct irq_data *d) spin_lock_irqsave(&bank->lock, flags); bank->irq_usage &= ~(BIT(offset)); + omap_set_gpio_irqenable(bank, offset, 0); + omap_clear_gpio_irqstatus(bank, offset); + omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); + if (!LINE_USED(bank->mod_usage, offset)) + omap_clear_gpio_debounce(bank, offset); omap_disable_gpio_module(bank, offset); - omap_reset_gpio(bank, offset); spin_unlock_irqrestore(&bank->lock, flags); /* @@ -1233,6 +1244,17 @@ static int omap_gpio_probe(struct platform_device *pdev) return 0; } +static int omap_gpio_remove(struct platform_device *pdev) +{ + struct gpio_bank *bank = platform_get_drvdata(pdev); + + list_del(&bank->node); + gpiochip_remove(&bank->chip); + pm_runtime_disable(bank->dev); + + return 0; +} + #ifdef CONFIG_ARCH_OMAP2PLUS #if defined(CONFIG_PM) @@ -1418,6 +1440,7 @@ static int omap_gpio_runtime_resume(struct device *dev) } #endif /* CONFIG_PM */ +#if IS_BUILTIN(CONFIG_GPIO_OMAP) void omap2_gpio_prepare_for_idle(int pwr_mode) { struct gpio_bank *bank; @@ -1443,6 +1466,7 @@ void omap2_gpio_resume_after_idle(void) pm_runtime_get_sync(bank->dev); } } +#endif #if defined(CONFIG_PM) static void omap_gpio_init_context(struct gpio_bank *p) @@ -1598,6 +1622,7 @@ MODULE_DEVICE_TABLE(of, omap_gpio_match); static struct platform_driver omap_gpio_driver = { .probe = omap_gpio_probe, + .remove = omap_gpio_remove, .driver = { .name = "omap_gpio", .pm = &gpio_pm_ops, @@ -1615,3 +1640,13 @@ static int __init omap_gpio_drv_reg(void) return platform_driver_register(&omap_gpio_driver); } postcore_initcall(omap_gpio_drv_reg); + +static void __exit omap_gpio_exit(void) +{ + platform_driver_unregister(&omap_gpio_driver); +} +module_exit(omap_gpio_exit); + +MODULE_DESCRIPTION("omap gpio driver"); +MODULE_ALIAS("platform:gpio-omap"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index e2da64abbccd..d233eb3b8132 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -443,12 +443,13 @@ static struct irq_chip pca953x_irq_chip = { .irq_set_type = pca953x_irq_set_type, }; -static u8 pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) +static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) { u8 cur_stat[MAX_BANK]; u8 old_stat[MAX_BANK]; - u8 pendings = 0; - u8 trigger[MAX_BANK], triggers = 0; + bool pending_seen = false; + bool trigger_seen = false; + u8 trigger[MAX_BANK]; int ret, i, offset = 0; switch (chip->chip_type) { @@ -461,7 +462,7 @@ static u8 pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) } ret = pca953x_read_regs(chip, offset, cur_stat); if (ret) - return 0; + return false; /* Remove output pins from the equation */ for (i = 0; i < NBANK(chip); i++) @@ -471,11 +472,12 @@ static u8 pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) for (i = 0; i < NBANK(chip); i++) { trigger[i] = (cur_stat[i] ^ old_stat[i]) & chip->irq_mask[i]; - triggers += trigger[i]; + if (trigger[i]) + trigger_seen = true; } - if (!triggers) - return 0; + if (!trigger_seen) + return false; memcpy(chip->irq_stat, cur_stat, NBANK(chip)); @@ -483,10 +485,11 @@ static u8 pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) pending[i] = (old_stat[i] & chip->irq_trig_fall[i]) | (cur_stat[i] & chip->irq_trig_raise[i]); pending[i] &= trigger[i]; - pendings += pending[i]; + if (pending[i]) + pending_seen = true; } - return pendings; + return pending_seen; } static irqreturn_t pca953x_irq_handler(int irq, void *devid) @@ -630,7 +633,7 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert) memset(val, 0, NBANK(chip)); pca953x_write_regs(chip, PCA957X_INVRT, val); - /* To enable register 6, 7 to controll pull up and pull down */ + /* To enable register 6, 7 to control pull up and pull down */ memset(val, 0x02, NBANK(chip)); pca953x_write_regs(chip, PCA957X_BKEN, val); diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 945f0cda8529..404f3c61ef9b 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -91,6 +91,8 @@ struct pcf857x { spinlock_t slock; /* protect irq demux */ unsigned out; /* software latch */ unsigned status; /* current status */ + unsigned int irq_parent; + unsigned irq_enabled; /* enabled irqs */ int (*write)(struct i2c_client *client, unsigned data); int (*read)(struct i2c_client *client); @@ -194,7 +196,7 @@ static irqreturn_t pcf857x_irq(int irq, void *data) * interrupt source, just to avoid bad irqs */ - change = (gpio->status ^ status); + change = (gpio->status ^ status) & gpio->irq_enabled; for_each_set_bit(i, &change, gpio->chip.ngpio) handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i)); gpio->status = status; @@ -209,29 +211,62 @@ static irqreturn_t pcf857x_irq(int irq, void *data) */ static void noop(struct irq_data *data) { } -static unsigned int noop_ret(struct irq_data *data) +static int pcf857x_irq_set_wake(struct irq_data *data, unsigned int on) { - return 0; + struct pcf857x *gpio = irq_data_get_irq_chip_data(data); + + int error = 0; + + if (gpio->irq_parent) { + error = irq_set_irq_wake(gpio->irq_parent, on); + if (error) { + dev_dbg(&gpio->client->dev, + "irq %u doesn't support irq_set_wake\n", + gpio->irq_parent); + gpio->irq_parent = 0; + } + } + return error; } -static int pcf857x_irq_set_wake(struct irq_data *data, unsigned int on) +static void pcf857x_irq_enable(struct irq_data *data) { struct pcf857x *gpio = irq_data_get_irq_chip_data(data); - irq_set_irq_wake(gpio->client->irq, on); - return 0; + gpio->irq_enabled |= (1 << data->hwirq); +} + +static void pcf857x_irq_disable(struct irq_data *data) +{ + struct pcf857x *gpio = irq_data_get_irq_chip_data(data); + + gpio->irq_enabled &= ~(1 << data->hwirq); +} + +static void pcf857x_irq_bus_lock(struct irq_data *data) +{ + struct pcf857x *gpio = irq_data_get_irq_chip_data(data); + + mutex_lock(&gpio->lock); +} + +static void pcf857x_irq_bus_sync_unlock(struct irq_data *data) +{ + struct pcf857x *gpio = irq_data_get_irq_chip_data(data); + + mutex_unlock(&gpio->lock); } static struct irq_chip pcf857x_irq_chip = { .name = "pcf857x", - .irq_startup = noop_ret, - .irq_shutdown = noop, - .irq_enable = noop, - .irq_disable = noop, + .irq_enable = pcf857x_irq_enable, + .irq_disable = pcf857x_irq_disable, .irq_ack = noop, .irq_mask = noop, .irq_unmask = noop, .irq_set_wake = pcf857x_irq_set_wake, + .irq_bus_lock = pcf857x_irq_bus_lock, + .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, }; /*-------------------------------------------------------------------------*/ @@ -364,6 +399,7 @@ static int pcf857x_probe(struct i2c_client *client, gpiochip_set_chained_irqchip(&gpio->chip, &pcf857x_irq_chip, client->irq, NULL); + gpio->irq_parent = client->irq; } /* Let platform code set up the GPIOs and their users. diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index fd3977465948..1e14a6c74ed1 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -177,8 +177,17 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on) struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv, gpio_chip); - - irq_set_irq_wake(p->irq_parent, on); + int error; + + if (p->irq_parent) { + error = irq_set_irq_wake(p->irq_parent, on); + if (error) { + dev_dbg(&p->pdev->dev, + "irq %u doesn't support irq_set_wake\n", + p->irq_parent); + p->irq_parent = 0; + } + } if (!p->clk) return 0; diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c index 202361eb7279..81bdbe7ba2a4 100644 --- a/drivers/gpio/gpio-stp-xway.c +++ b/drivers/gpio/gpio-stp-xway.c @@ -58,7 +58,7 @@ #define XWAY_STP_ADSL_MASK 0x3 /* 2 groups of 3 bits can be driven by the phys */ -#define XWAY_STP_PHY_MASK 0x3 +#define XWAY_STP_PHY_MASK 0x7 #define XWAY_STP_PHY1_SHIFT 27 #define XWAY_STP_PHY2_SHIFT 15 @@ -200,7 +200,7 @@ static int xway_stp_hw_init(struct xway_stp *chip) static int xway_stp_probe(struct platform_device *pdev) { struct resource *res; - const __be32 *shadow, *groups, *dsl, *phy; + u32 shadow, groups, dsl, phy; struct xway_stp *chip; struct clk *clk; int ret = 0; @@ -223,33 +223,28 @@ static int xway_stp_probe(struct platform_device *pdev) chip->gc.owner = THIS_MODULE; /* store the shadow value if one was passed by the devicetree */ - shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL); - if (shadow) - chip->shadow = be32_to_cpu(*shadow); + if (!of_property_read_u32(pdev->dev.of_node, "lantiq,shadow", &shadow)) + chip->shadow = shadow; /* find out which gpio groups should be enabled */ - groups = of_get_property(pdev->dev.of_node, "lantiq,groups", NULL); - if (groups) - chip->groups = be32_to_cpu(*groups) & XWAY_STP_GROUP_MASK; + if (!of_property_read_u32(pdev->dev.of_node, "lantiq,groups", &groups)) + chip->groups = groups & XWAY_STP_GROUP_MASK; else chip->groups = XWAY_STP_GROUP0; chip->gc.ngpio = fls(chip->groups) * 8; /* find out which gpios are controlled by the dsl core */ - dsl = of_get_property(pdev->dev.of_node, "lantiq,dsl", NULL); - if (dsl) - chip->dsl = be32_to_cpu(*dsl) & XWAY_STP_ADSL_MASK; + if (!of_property_read_u32(pdev->dev.of_node, "lantiq,dsl", &dsl)) + chip->dsl = dsl & XWAY_STP_ADSL_MASK; /* find out which gpios are controlled by the phys */ if (of_machine_is_compatible("lantiq,ar9") || of_machine_is_compatible("lantiq,gr9") || of_machine_is_compatible("lantiq,vr9")) { - phy = of_get_property(pdev->dev.of_node, "lantiq,phy1", NULL); - if (phy) - chip->phy1 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK; - phy = of_get_property(pdev->dev.of_node, "lantiq,phy2", NULL); - if (phy) - chip->phy2 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK; + if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy1", &phy)) + chip->phy1 = phy & XWAY_STP_PHY_MASK; + if (!of_property_read_u32(pdev->dev.of_node, "lantiq,phy2", &phy)) + chip->phy2 = phy & XWAY_STP_PHY_MASK; } /* check which edge trigger we should use, default to a falling edge */ diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c index 46b89614aa91..12c99d969b98 100644 --- a/drivers/gpio/gpio-tb10x.c +++ b/drivers/gpio/gpio-tb10x.c @@ -292,7 +292,6 @@ static int tb10x_gpio_remove(struct platform_device *pdev) BIT(tb10x_gpio->gc.ngpio) - 1, 0, 0); kfree(tb10x_gpio->domain->gc); irq_domain_remove(tb10x_gpio->domain); - free_irq(tb10x_gpio->irq, tb10x_gpio); } gpiochip_remove(&tb10x_gpio->gc); diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 1741981d53c8..9b25c90f725c 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -288,7 +288,7 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) tegra_gpio_writel(1 << pin, GPIO_INT_CLR(gpio)); /* if gpio is edge triggered, clear condition - * before executing the hander so that we don't + * before executing the handler so that we don't * miss edges */ if (lvl & (0x100 << pin)) { @@ -515,8 +515,8 @@ static int tegra_gpio_probe(struct platform_device *pdev) for (i = 0; i < tegra_gpio_bank_count; i++) { bank = &tegra_gpio_banks[i]; - irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler); - irq_set_handler_data(bank->irq, bank); + irq_set_chained_handler_and_data(bank->irq, + tegra_gpio_irq_handler, bank); for (j = 0; j < 4; j++) spin_lock_init(&bank->lvl_lock[j]); diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c index 92fbabd82879..b29a102d136b 100644 --- a/drivers/gpio/gpio-ts5500.c +++ b/drivers/gpio/gpio-ts5500.c @@ -440,7 +440,7 @@ static int ts5500_dio_remove(struct platform_device *pdev) return 0; } -static struct platform_device_id ts5500_dio_ids[] = { +static const struct platform_device_id ts5500_dio_ids[] = { { "ts5500-dio1", TS5500_DIO1 }, { "ts5500-dio2", TS5500_DIO2 }, { "ts5500-dio-lcd", TS5500_LCD }, diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c index fb9d29a5d584..d57068b9083e 100644 --- a/drivers/gpio/gpio-xgene-sb.c +++ b/drivers/gpio/gpio-xgene-sb.c @@ -25,8 +25,11 @@ #include <linux/of_gpio.h> #include <linux/gpio.h> #include <linux/gpio/driver.h> +#include <linux/acpi.h> #include <linux/basic_mmio_gpio.h> +#include "gpiolib.h" + #define XGENE_MAX_GPIO_DS 22 #define XGENE_MAX_GPIO_DS_IRQ 6 @@ -112,7 +115,6 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev) GFP_KERNEL); if (!priv->irq) return -ENOMEM; - memset(priv->irq, 0, sizeof(u32) * XGENE_MAX_GPIO_DS); for (i = 0; i < priv->nirq; i++) { priv->irq[default_lines[i]] = platform_get_irq(pdev, i); @@ -129,6 +131,11 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev) else dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n"); + if (priv->nirq > 0) { + /* Register interrupt handlers for gpio signaled acpi events */ + acpi_gpiochip_request_interrupts(&priv->bgc.gc); + } + return ret; } @@ -136,6 +143,10 @@ static int xgene_gpio_sb_remove(struct platform_device *pdev) { struct xgene_gpio_sb *priv = platform_get_drvdata(pdev); + if (priv->nirq > 0) { + acpi_gpiochip_free_interrupts(&priv->bgc.gc); + } + return bgpio_remove(&priv->bgc); } @@ -145,10 +156,19 @@ static const struct of_device_id xgene_gpio_sb_of_match[] = { }; MODULE_DEVICE_TABLE(of, xgene_gpio_sb_of_match); +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_gpio_sb_acpi_match[] = { + {"APMC0D15", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, xgene_gpio_sb_acpi_match); +#endif + static struct platform_driver xgene_gpio_sb_driver = { .driver = { .name = "xgene-gpio-sb", .of_match_table = xgene_gpio_sb_of_match, + .acpi_match_table = ACPI_PTR(xgene_gpio_sb_acpi_match), }, .probe = xgene_gpio_sb_probe, .remove = xgene_gpio_sb_remove, diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index 61243d177740..77fe5d3cb105 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -41,10 +41,10 @@ /** * struct xgpio_instance - Stores information about GPIO device * @mmchip: OF GPIO chip for memory mapped banks + * @gpio_width: GPIO width for every channel * @gpio_state: GPIO state shadow register * @gpio_dir: GPIO direction shadow register * @gpio_lock: Lock used for synchronization - * @inited: True if the port has been inited */ struct xgpio_instance { struct of_mm_gpio_chip mmchip; @@ -231,6 +231,8 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc) * @pdev: pointer to the platform device * * This function remove gpiochips and frees all the allocated resources. + * + * Return: 0 always */ static int xgpio_remove(struct platform_device *pdev) { diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c new file mode 100644 index 000000000000..9bdab7203d65 --- /dev/null +++ b/drivers/gpio/gpio-xlp.c @@ -0,0 +1,427 @@ +/* + * Copyright (C) 2003-2015 Broadcom Corporation + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/gpio.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/module.h> +#include <linux/irq.h> +#include <linux/interrupt.h> + +/* + * XLP GPIO has multiple 32 bit registers for each feature where each register + * controls 32 pins. So, pins up to 64 require 2 32-bit registers and up to 96 + * require 3 32-bit registers for each feature. + * Here we only define offset of the first register for each feature. Offset of + * the registers for pins greater than 32 can be calculated as following(Use + * GPIO_INT_STAT as example): + * + * offset = (gpio / XLP_GPIO_REGSZ) * 4; + * reg_addr = addr + offset; + * + * where addr is base address of the that feature register and gpio is the pin. + */ +#define GPIO_OUTPUT_EN 0x00 +#define GPIO_PADDRV 0x08 +#define GPIO_INT_EN00 0x18 +#define GPIO_INT_EN10 0x20 +#define GPIO_INT_EN20 0x28 +#define GPIO_INT_EN30 0x30 +#define GPIO_INT_POL 0x38 +#define GPIO_INT_TYPE 0x40 +#define GPIO_INT_STAT 0x48 + +#define GPIO_9XX_BYTESWAP 0X00 +#define GPIO_9XX_CTRL 0X04 +#define GPIO_9XX_OUTPUT_EN 0x14 +#define GPIO_9XX_PADDRV 0x24 +/* + * Only for 4 interrupt enable reg are defined for now, + * total reg available are 12. + */ +#define GPIO_9XX_INT_EN00 0x44 +#define GPIO_9XX_INT_EN10 0x54 +#define GPIO_9XX_INT_EN20 0x64 +#define GPIO_9XX_INT_EN30 0x74 +#define GPIO_9XX_INT_POL 0x104 +#define GPIO_9XX_INT_TYPE 0x114 +#define GPIO_9XX_INT_STAT 0x124 + +#define GPIO_3XX_INT_EN00 0x18 +#define GPIO_3XX_INT_EN10 0x20 +#define GPIO_3XX_INT_EN20 0x28 +#define GPIO_3XX_INT_EN30 0x30 +#define GPIO_3XX_INT_POL 0x78 +#define GPIO_3XX_INT_TYPE 0x80 +#define GPIO_3XX_INT_STAT 0x88 + +/* Interrupt type register mask */ +#define XLP_GPIO_IRQ_TYPE_LVL 0x0 +#define XLP_GPIO_IRQ_TYPE_EDGE 0x1 + +/* Interrupt polarity register mask */ +#define XLP_GPIO_IRQ_POL_HIGH 0x0 +#define XLP_GPIO_IRQ_POL_LOW 0x1 + +#define XLP_GPIO_REGSZ 32 +#define XLP_GPIO_IRQ_BASE 768 +#define XLP_MAX_NR_GPIO 96 + +/* XLP variants supported by this driver */ +enum { + XLP_GPIO_VARIANT_XLP832 = 1, + XLP_GPIO_VARIANT_XLP316, + XLP_GPIO_VARIANT_XLP208, + XLP_GPIO_VARIANT_XLP980, + XLP_GPIO_VARIANT_XLP532 +}; + +struct xlp_gpio_priv { + struct gpio_chip chip; + DECLARE_BITMAP(gpio_enabled_mask, XLP_MAX_NR_GPIO); + void __iomem *gpio_intr_en; /* pointer to first intr enable reg */ + void __iomem *gpio_intr_stat; /* pointer to first intr status reg */ + void __iomem *gpio_intr_type; /* pointer to first intr type reg */ + void __iomem *gpio_intr_pol; /* pointer to first intr polarity reg */ + void __iomem *gpio_out_en; /* pointer to first output enable reg */ + void __iomem *gpio_paddrv; /* pointer to first pad drive reg */ + spinlock_t lock; +}; + +static struct xlp_gpio_priv *gpio_chip_to_xlp_priv(struct gpio_chip *gc) +{ + return container_of(gc, struct xlp_gpio_priv, chip); +} + +static int xlp_gpio_get_reg(void __iomem *addr, unsigned gpio) +{ + u32 pos, regset; + + pos = gpio % XLP_GPIO_REGSZ; + regset = (gpio / XLP_GPIO_REGSZ) * 4; + return !!(readl(addr + regset) & BIT(pos)); +} + +static void xlp_gpio_set_reg(void __iomem *addr, unsigned gpio, int state) +{ + u32 value, pos, regset; + + pos = gpio % XLP_GPIO_REGSZ; + regset = (gpio / XLP_GPIO_REGSZ) * 4; + value = readl(addr + regset); + + if (state) + value |= BIT(pos); + else + value &= ~BIT(pos); + + writel(value, addr + regset); +} + +static void xlp_gpio_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct xlp_gpio_priv *priv = gpio_chip_to_xlp_priv(gc); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + xlp_gpio_set_reg(priv->gpio_intr_en, d->hwirq, 0x0); + __clear_bit(d->hwirq, priv->gpio_enabled_mask); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void xlp_gpio_irq_mask_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct xlp_gpio_priv *priv = gpio_chip_to_xlp_priv(gc); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + xlp_gpio_set_reg(priv->gpio_intr_en, d->hwirq, 0x0); + xlp_gpio_set_reg(priv->gpio_intr_stat, d->hwirq, 0x1); + __clear_bit(d->hwirq, priv->gpio_enabled_mask); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void xlp_gpio_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct xlp_gpio_priv *priv = gpio_chip_to_xlp_priv(gc); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + xlp_gpio_set_reg(priv->gpio_intr_en, d->hwirq, 0x1); + __set_bit(d->hwirq, priv->gpio_enabled_mask); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int xlp_gpio_set_irq_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct xlp_gpio_priv *priv = gpio_chip_to_xlp_priv(gc); + int pol, irq_type; + + switch (type) { + case IRQ_TYPE_EDGE_RISING: + irq_type = XLP_GPIO_IRQ_TYPE_EDGE; + pol = XLP_GPIO_IRQ_POL_HIGH; + break; + case IRQ_TYPE_EDGE_FALLING: + irq_type = XLP_GPIO_IRQ_TYPE_EDGE; + pol = XLP_GPIO_IRQ_POL_LOW; + break; + case IRQ_TYPE_LEVEL_HIGH: + irq_type = XLP_GPIO_IRQ_TYPE_LVL; + pol = XLP_GPIO_IRQ_POL_HIGH; + break; + case IRQ_TYPE_LEVEL_LOW: + irq_type = XLP_GPIO_IRQ_TYPE_LVL; + pol = XLP_GPIO_IRQ_POL_LOW; + break; + default: + return -EINVAL; + } + + xlp_gpio_set_reg(priv->gpio_intr_type, d->hwirq, irq_type); + xlp_gpio_set_reg(priv->gpio_intr_pol, d->hwirq, pol); + + return 0; +} + +static struct irq_chip xlp_gpio_irq_chip = { + .name = "XLP-GPIO", + .irq_mask_ack = xlp_gpio_irq_mask_ack, + .irq_disable = xlp_gpio_irq_disable, + .irq_set_type = xlp_gpio_set_irq_type, + .irq_unmask = xlp_gpio_irq_unmask, + .flags = IRQCHIP_ONESHOT_SAFE, +}; + +static irqreturn_t xlp_gpio_generic_handler(int irq, void *data) +{ + struct xlp_gpio_priv *priv = data; + int gpio, regoff; + u32 gpio_stat; + + regoff = -1; + gpio_stat = 0; + for_each_set_bit(gpio, priv->gpio_enabled_mask, XLP_MAX_NR_GPIO) { + if (regoff != gpio / XLP_GPIO_REGSZ) { + regoff = gpio / XLP_GPIO_REGSZ; + gpio_stat = readl(priv->gpio_intr_stat + regoff * 4); + } + if (gpio_stat & BIT(gpio % XLP_GPIO_REGSZ)) + generic_handle_irq(irq_find_mapping( + priv->chip.irqdomain, gpio)); + } + + return IRQ_HANDLED; +} + +static int xlp_gpio_dir_output(struct gpio_chip *gc, unsigned gpio, int state) +{ + struct xlp_gpio_priv *priv = gpio_chip_to_xlp_priv(gc); + + BUG_ON(gpio >= gc->ngpio); + xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x1); + + return 0; +} + +static int xlp_gpio_dir_input(struct gpio_chip *gc, unsigned gpio) +{ + struct xlp_gpio_priv *priv = gpio_chip_to_xlp_priv(gc); + + BUG_ON(gpio >= gc->ngpio); + xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x0); + + return 0; +} + +static int xlp_gpio_get(struct gpio_chip *gc, unsigned gpio) +{ + struct xlp_gpio_priv *priv = gpio_chip_to_xlp_priv(gc); + + BUG_ON(gpio >= gc->ngpio); + return xlp_gpio_get_reg(priv->gpio_paddrv, gpio); +} + +static void xlp_gpio_set(struct gpio_chip *gc, unsigned gpio, int state) +{ + struct xlp_gpio_priv *priv = gpio_chip_to_xlp_priv(gc); + + BUG_ON(gpio >= gc->ngpio); + xlp_gpio_set_reg(priv->gpio_paddrv, gpio, state); +} + +static const struct of_device_id xlp_gpio_of_ids[] = { + { + .compatible = "netlogic,xlp832-gpio", + .data = (void *)XLP_GPIO_VARIANT_XLP832, + }, + { + .compatible = "netlogic,xlp316-gpio", + .data = (void *)XLP_GPIO_VARIANT_XLP316, + }, + { + .compatible = "netlogic,xlp208-gpio", + .data = (void *)XLP_GPIO_VARIANT_XLP208, + }, + { + .compatible = "netlogic,xlp980-gpio", + .data = (void *)XLP_GPIO_VARIANT_XLP980, + }, + { + .compatible = "netlogic,xlp532-gpio", + .data = (void *)XLP_GPIO_VARIANT_XLP532, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, xlp_gpio_of_ids); + +static int xlp_gpio_probe(struct platform_device *pdev) +{ + struct gpio_chip *gc; + struct resource *iores; + struct xlp_gpio_priv *priv; + const struct of_device_id *of_id; + void __iomem *gpio_base; + int irq_base, irq, err; + int ngpio; + u32 soc_type; + + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iores) + return -ENODEV; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + gpio_base = devm_ioremap_resource(&pdev->dev, iores); + if (IS_ERR(gpio_base)) + return PTR_ERR(gpio_base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + of_id = of_match_device(xlp_gpio_of_ids, &pdev->dev); + if (!of_id) { + dev_err(&pdev->dev, "Failed to get soc type!\n"); + return -ENODEV; + } + + soc_type = (uintptr_t) of_id->data; + + switch (soc_type) { + case XLP_GPIO_VARIANT_XLP832: + priv->gpio_out_en = gpio_base + GPIO_OUTPUT_EN; + priv->gpio_paddrv = gpio_base + GPIO_PADDRV; + priv->gpio_intr_stat = gpio_base + GPIO_INT_STAT; + priv->gpio_intr_type = gpio_base + GPIO_INT_TYPE; + priv->gpio_intr_pol = gpio_base + GPIO_INT_POL; + priv->gpio_intr_en = gpio_base + GPIO_INT_EN00; + ngpio = 41; + break; + case XLP_GPIO_VARIANT_XLP208: + case XLP_GPIO_VARIANT_XLP316: + priv->gpio_out_en = gpio_base + GPIO_OUTPUT_EN; + priv->gpio_paddrv = gpio_base + GPIO_PADDRV; + priv->gpio_intr_stat = gpio_base + GPIO_3XX_INT_STAT; + priv->gpio_intr_type = gpio_base + GPIO_3XX_INT_TYPE; + priv->gpio_intr_pol = gpio_base + GPIO_3XX_INT_POL; + priv->gpio_intr_en = gpio_base + GPIO_3XX_INT_EN00; + + ngpio = (soc_type == XLP_GPIO_VARIANT_XLP208) ? 42 : 57; + break; + case XLP_GPIO_VARIANT_XLP980: + case XLP_GPIO_VARIANT_XLP532: + priv->gpio_out_en = gpio_base + GPIO_9XX_OUTPUT_EN; + priv->gpio_paddrv = gpio_base + GPIO_9XX_PADDRV; + priv->gpio_intr_stat = gpio_base + GPIO_9XX_INT_STAT; + priv->gpio_intr_type = gpio_base + GPIO_9XX_INT_TYPE; + priv->gpio_intr_pol = gpio_base + GPIO_9XX_INT_POL; + priv->gpio_intr_en = gpio_base + GPIO_9XX_INT_EN00; + + ngpio = (soc_type == XLP_GPIO_VARIANT_XLP980) ? 66 : 67; + break; + default: + dev_err(&pdev->dev, "Unknown Processor type!\n"); + return -ENODEV; + } + + bitmap_zero(priv->gpio_enabled_mask, XLP_MAX_NR_GPIO); + + gc = &priv->chip; + + gc->owner = THIS_MODULE; + gc->label = dev_name(&pdev->dev); + gc->base = 0; + gc->dev = &pdev->dev; + gc->ngpio = ngpio; + gc->of_node = pdev->dev.of_node; + gc->direction_output = xlp_gpio_dir_output; + gc->direction_input = xlp_gpio_dir_input; + gc->set = xlp_gpio_set; + gc->get = xlp_gpio_get; + + spin_lock_init(&priv->lock); + + err = devm_request_irq(&pdev->dev, irq, xlp_gpio_generic_handler, + IRQ_TYPE_NONE, pdev->name, priv); + if (err) + return err; + + irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0); + if (irq_base < 0) { + dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n"); + return err; + } + + err = gpiochip_add(gc); + if (err < 0) + goto out_free_desc; + + err = gpiochip_irqchip_add(gc, &xlp_gpio_irq_chip, irq_base, + handle_level_irq, IRQ_TYPE_NONE); + if (err) { + dev_err(&pdev->dev, "Could not connect irqchip to gpiochip!\n"); + goto out_gpio_remove; + } + + dev_info(&pdev->dev, "registered %d GPIOs\n", gc->ngpio); + + return 0; + +out_gpio_remove: + gpiochip_remove(gc); +out_free_desc: + irq_free_descs(irq_base, gc->ngpio); + return err; +} + +static struct platform_driver xlp_gpio_driver = { + .driver = { + .name = "xlp-gpio", + .of_match_table = xlp_gpio_of_ids, + }, + .probe = xlp_gpio_probe, +}; +module_platform_driver(xlp_gpio_driver); + +MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>"); +MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@broadcom.com>"); +MODULE_DESCRIPTION("Netlogic XLP GPIO Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 184c4b1b2558..2e87c4b8da26 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -18,34 +18,47 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/of.h> #define DRIVER_NAME "zynq-gpio" /* Maximum banks */ #define ZYNQ_GPIO_MAX_BANK 4 +#define ZYNQMP_GPIO_MAX_BANK 6 #define ZYNQ_GPIO_BANK0_NGPIO 32 #define ZYNQ_GPIO_BANK1_NGPIO 22 #define ZYNQ_GPIO_BANK2_NGPIO 32 #define ZYNQ_GPIO_BANK3_NGPIO 32 -#define ZYNQ_GPIO_NR_GPIOS (ZYNQ_GPIO_BANK0_NGPIO + \ - ZYNQ_GPIO_BANK1_NGPIO + \ - ZYNQ_GPIO_BANK2_NGPIO + \ - ZYNQ_GPIO_BANK3_NGPIO) - -#define ZYNQ_GPIO_BANK0_PIN_MIN 0 -#define ZYNQ_GPIO_BANK0_PIN_MAX (ZYNQ_GPIO_BANK0_PIN_MIN + \ - ZYNQ_GPIO_BANK0_NGPIO - 1) -#define ZYNQ_GPIO_BANK1_PIN_MIN (ZYNQ_GPIO_BANK0_PIN_MAX + 1) -#define ZYNQ_GPIO_BANK1_PIN_MAX (ZYNQ_GPIO_BANK1_PIN_MIN + \ - ZYNQ_GPIO_BANK1_NGPIO - 1) -#define ZYNQ_GPIO_BANK2_PIN_MIN (ZYNQ_GPIO_BANK1_PIN_MAX + 1) -#define ZYNQ_GPIO_BANK2_PIN_MAX (ZYNQ_GPIO_BANK2_PIN_MIN + \ - ZYNQ_GPIO_BANK2_NGPIO - 1) -#define ZYNQ_GPIO_BANK3_PIN_MIN (ZYNQ_GPIO_BANK2_PIN_MAX + 1) -#define ZYNQ_GPIO_BANK3_PIN_MAX (ZYNQ_GPIO_BANK3_PIN_MIN + \ - ZYNQ_GPIO_BANK3_NGPIO - 1) +#define ZYNQMP_GPIO_BANK0_NGPIO 26 +#define ZYNQMP_GPIO_BANK1_NGPIO 26 +#define ZYNQMP_GPIO_BANK2_NGPIO 26 +#define ZYNQMP_GPIO_BANK3_NGPIO 32 +#define ZYNQMP_GPIO_BANK4_NGPIO 32 +#define ZYNQMP_GPIO_BANK5_NGPIO 32 + +#define ZYNQ_GPIO_NR_GPIOS 118 +#define ZYNQMP_GPIO_NR_GPIOS 174 + +#define ZYNQ_GPIO_BANK0_PIN_MIN(str) 0 +#define ZYNQ_GPIO_BANK0_PIN_MAX(str) (ZYNQ_GPIO_BANK0_PIN_MIN(str) + \ + ZYNQ##str##_GPIO_BANK0_NGPIO - 1) +#define ZYNQ_GPIO_BANK1_PIN_MIN(str) (ZYNQ_GPIO_BANK0_PIN_MAX(str) + 1) +#define ZYNQ_GPIO_BANK1_PIN_MAX(str) (ZYNQ_GPIO_BANK1_PIN_MIN(str) + \ + ZYNQ##str##_GPIO_BANK1_NGPIO - 1) +#define ZYNQ_GPIO_BANK2_PIN_MIN(str) (ZYNQ_GPIO_BANK1_PIN_MAX(str) + 1) +#define ZYNQ_GPIO_BANK2_PIN_MAX(str) (ZYNQ_GPIO_BANK2_PIN_MIN(str) + \ + ZYNQ##str##_GPIO_BANK2_NGPIO - 1) +#define ZYNQ_GPIO_BANK3_PIN_MIN(str) (ZYNQ_GPIO_BANK2_PIN_MAX(str) + 1) +#define ZYNQ_GPIO_BANK3_PIN_MAX(str) (ZYNQ_GPIO_BANK3_PIN_MIN(str) + \ + ZYNQ##str##_GPIO_BANK3_NGPIO - 1) +#define ZYNQ_GPIO_BANK4_PIN_MIN(str) (ZYNQ_GPIO_BANK3_PIN_MAX(str) + 1) +#define ZYNQ_GPIO_BANK4_PIN_MAX(str) (ZYNQ_GPIO_BANK4_PIN_MIN(str) + \ + ZYNQ##str##_GPIO_BANK4_NGPIO - 1) +#define ZYNQ_GPIO_BANK5_PIN_MIN(str) (ZYNQ_GPIO_BANK4_PIN_MAX(str) + 1) +#define ZYNQ_GPIO_BANK5_PIN_MAX(str) (ZYNQ_GPIO_BANK5_PIN_MIN(str) + \ + ZYNQ##str##_GPIO_BANK5_NGPIO - 1) /* Register offsets for the GPIO device */ @@ -89,12 +102,30 @@ * @base_addr: base address of the GPIO device * @clk: clock resource for this controller * @irq: interrupt for the GPIO device + * @p_data: pointer to platform data */ struct zynq_gpio { struct gpio_chip chip; void __iomem *base_addr; struct clk *clk; int irq; + const struct zynq_platform_data *p_data; +}; + +/** + * struct zynq_platform_data - zynq gpio platform data structure + * @label: string to store in gpio->label + * @ngpio: max number of gpio pins + * @max_bank: maximum number of gpio banks + * @bank_min: this array represents bank's min pin + * @bank_max: this array represents bank's max pin +*/ +struct zynq_platform_data { + const char *label; + u16 ngpio; + int max_bank; + int bank_min[ZYNQMP_GPIO_MAX_BANK]; + int bank_max[ZYNQMP_GPIO_MAX_BANK]; }; static struct irq_chip zynq_gpio_level_irqchip; @@ -112,39 +143,26 @@ static struct irq_chip zynq_gpio_edge_irqchip; */ static inline void zynq_gpio_get_bank_pin(unsigned int pin_num, unsigned int *bank_num, - unsigned int *bank_pin_num) + unsigned int *bank_pin_num, + struct zynq_gpio *gpio) { - switch (pin_num) { - case ZYNQ_GPIO_BANK0_PIN_MIN ... ZYNQ_GPIO_BANK0_PIN_MAX: - *bank_num = 0; - *bank_pin_num = pin_num; - break; - case ZYNQ_GPIO_BANK1_PIN_MIN ... ZYNQ_GPIO_BANK1_PIN_MAX: - *bank_num = 1; - *bank_pin_num = pin_num - ZYNQ_GPIO_BANK1_PIN_MIN; - break; - case ZYNQ_GPIO_BANK2_PIN_MIN ... ZYNQ_GPIO_BANK2_PIN_MAX: - *bank_num = 2; - *bank_pin_num = pin_num - ZYNQ_GPIO_BANK2_PIN_MIN; - break; - case ZYNQ_GPIO_BANK3_PIN_MIN ... ZYNQ_GPIO_BANK3_PIN_MAX: - *bank_num = 3; - *bank_pin_num = pin_num - ZYNQ_GPIO_BANK3_PIN_MIN; - break; - default: - WARN(true, "invalid GPIO pin number: %u", pin_num); - *bank_num = 0; - *bank_pin_num = 0; - break; + int bank; + + for (bank = 0; bank < gpio->p_data->max_bank; bank++) { + if ((pin_num >= gpio->p_data->bank_min[bank]) && + (pin_num <= gpio->p_data->bank_max[bank])) { + *bank_num = bank; + *bank_pin_num = pin_num - + gpio->p_data->bank_min[bank]; + return; + } } -} -static const unsigned int zynq_gpio_bank_offset[] = { - ZYNQ_GPIO_BANK0_PIN_MIN, - ZYNQ_GPIO_BANK1_PIN_MIN, - ZYNQ_GPIO_BANK2_PIN_MIN, - ZYNQ_GPIO_BANK3_PIN_MIN, -}; + /* default */ + WARN(true, "invalid GPIO pin number: %u", pin_num); + *bank_num = 0; + *bank_pin_num = 0; +} /** * zynq_gpio_get_value - Get the state of the specified pin of GPIO device @@ -161,7 +179,7 @@ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin) unsigned int bank_num, bank_pin_num; struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); - zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num); + zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); data = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DATA_RO_OFFSET(bank_num)); @@ -185,7 +203,7 @@ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin, unsigned int reg_offset, bank_num, bank_pin_num; struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); - zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num); + zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); if (bank_pin_num >= ZYNQ_GPIO_MID_PIN_NUM) { /* only 16 data bits in bit maskable reg */ @@ -222,7 +240,7 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin) unsigned int bank_num, bank_pin_num; struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); - zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num); + zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); /* bank 0 pins 7 and 8 are special and cannot be used as inputs */ if (bank_num == 0 && (bank_pin_num == 7 || bank_pin_num == 8)) @@ -255,7 +273,7 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, unsigned int bank_num, bank_pin_num; struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip); - zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num); + zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); /* set the GPIO pin as output */ reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); @@ -286,7 +304,7 @@ static void zynq_gpio_irq_mask(struct irq_data *irq_data) struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); device_pin_num = irq_data->hwirq; - zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num); + zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); writel_relaxed(BIT(bank_pin_num), gpio->base_addr + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); } @@ -306,7 +324,7 @@ static void zynq_gpio_irq_unmask(struct irq_data *irq_data) struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); device_pin_num = irq_data->hwirq; - zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num); + zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); writel_relaxed(BIT(bank_pin_num), gpio->base_addr + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); } @@ -325,7 +343,7 @@ static void zynq_gpio_irq_ack(struct irq_data *irq_data) struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); device_pin_num = irq_data->hwirq; - zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num); + zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); writel_relaxed(BIT(bank_pin_num), gpio->base_addr + ZYNQ_GPIO_INTSTS_OFFSET(bank_num)); } @@ -335,7 +353,7 @@ static void zynq_gpio_irq_ack(struct irq_data *irq_data) * @irq_data: irq data containing irq number of gpio pin for the interrupt * to enable * - * Clears the INTSTS bit and unmasks the given interrrupt. + * Clears the INTSTS bit and unmasks the given interrupt. */ static void zynq_gpio_irq_enable(struct irq_data *irq_data) { @@ -375,7 +393,7 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type) struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data); device_pin_num = irq_data->hwirq; - zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num); + zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); int_type = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); @@ -470,7 +488,7 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio, unsigned int bank_num, unsigned long pending) { - unsigned int bank_offset = zynq_gpio_bank_offset[bank_num]; + unsigned int bank_offset = gpio->p_data->bank_min[bank_num]; struct irq_domain *irqdomain = gpio->chip.irqdomain; int offset; @@ -505,7 +523,7 @@ static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc) chained_irq_enter(irqchip, desc); - for (bank_num = 0; bank_num < ZYNQ_GPIO_MAX_BANK; bank_num++) { + for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { int_sts = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTSTS_OFFSET(bank_num)); int_enb = readl_relaxed(gpio->base_addr + @@ -582,6 +600,46 @@ static const struct dev_pm_ops zynq_gpio_dev_pm_ops = { zynq_gpio_runtime_resume, NULL) }; +static const struct zynq_platform_data zynqmp_gpio_def = { + .label = "zynqmp_gpio", + .ngpio = ZYNQMP_GPIO_NR_GPIOS, + .max_bank = ZYNQMP_GPIO_MAX_BANK, + .bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(MP), + .bank_max[0] = ZYNQ_GPIO_BANK0_PIN_MAX(MP), + .bank_min[1] = ZYNQ_GPIO_BANK1_PIN_MIN(MP), + .bank_max[1] = ZYNQ_GPIO_BANK1_PIN_MAX(MP), + .bank_min[2] = ZYNQ_GPIO_BANK2_PIN_MIN(MP), + .bank_max[2] = ZYNQ_GPIO_BANK2_PIN_MAX(MP), + .bank_min[3] = ZYNQ_GPIO_BANK3_PIN_MIN(MP), + .bank_max[3] = ZYNQ_GPIO_BANK3_PIN_MAX(MP), + .bank_min[4] = ZYNQ_GPIO_BANK4_PIN_MIN(MP), + .bank_max[4] = ZYNQ_GPIO_BANK4_PIN_MAX(MP), + .bank_min[5] = ZYNQ_GPIO_BANK5_PIN_MIN(MP), + .bank_max[5] = ZYNQ_GPIO_BANK5_PIN_MAX(MP), +}; + +static const struct zynq_platform_data zynq_gpio_def = { + .label = "zynq_gpio", + .ngpio = ZYNQ_GPIO_NR_GPIOS, + .max_bank = ZYNQ_GPIO_MAX_BANK, + .bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(), + .bank_max[0] = ZYNQ_GPIO_BANK0_PIN_MAX(), + .bank_min[1] = ZYNQ_GPIO_BANK1_PIN_MIN(), + .bank_max[1] = ZYNQ_GPIO_BANK1_PIN_MAX(), + .bank_min[2] = ZYNQ_GPIO_BANK2_PIN_MIN(), + .bank_max[2] = ZYNQ_GPIO_BANK2_PIN_MAX(), + .bank_min[3] = ZYNQ_GPIO_BANK3_PIN_MIN(), + .bank_max[3] = ZYNQ_GPIO_BANK3_PIN_MAX(), +}; + +static const struct of_device_id zynq_gpio_of_match[] = { + { .compatible = "xlnx,zynq-gpio-1.0", .data = (void *)&zynq_gpio_def }, + { .compatible = "xlnx,zynqmp-gpio-1.0", + .data = (void *)&zynqmp_gpio_def }, + { /* end of table */ } +}; +MODULE_DEVICE_TABLE(of, zynq_gpio_of_match); + /** * zynq_gpio_probe - Initialization method for a zynq_gpio device * @pdev: platform device instance @@ -599,11 +657,18 @@ static int zynq_gpio_probe(struct platform_device *pdev) struct zynq_gpio *gpio; struct gpio_chip *chip; struct resource *res; + const struct of_device_id *match; gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; + match = of_match_node(zynq_gpio_of_match, pdev->dev.of_node); + if (!match) { + dev_err(&pdev->dev, "of_match_node() failed\n"); + return -EINVAL; + } + gpio->p_data = match->data; platform_set_drvdata(pdev, gpio); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -619,7 +684,7 @@ static int zynq_gpio_probe(struct platform_device *pdev) /* configure the gpio chip */ chip = &gpio->chip; - chip->label = "zynq_gpio"; + chip->label = gpio->p_data->label; chip->owner = THIS_MODULE; chip->dev = &pdev->dev; chip->get = zynq_gpio_get_value; @@ -629,7 +694,7 @@ static int zynq_gpio_probe(struct platform_device *pdev) chip->direction_input = zynq_gpio_dir_in; chip->direction_output = zynq_gpio_dir_out; chip->base = -1; - chip->ngpio = ZYNQ_GPIO_NR_GPIOS; + chip->ngpio = gpio->p_data->ngpio; /* Enable GPIO clock */ gpio->clk = devm_clk_get(&pdev->dev, NULL); @@ -651,7 +716,7 @@ static int zynq_gpio_probe(struct platform_device *pdev) } /* disable interrupts for all banks */ - for (bank_num = 0; bank_num < ZYNQ_GPIO_MAX_BANK; bank_num++) + for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); @@ -695,12 +760,6 @@ static int zynq_gpio_remove(struct platform_device *pdev) return 0; } -static struct of_device_id zynq_gpio_of_match[] = { - { .compatible = "xlnx,zynq-gpio-1.0", }, - { /* end of table */ } -}; -MODULE_DEVICE_TABLE(of, zynq_gpio_of_match); - static struct platform_driver zynq_gpio_driver = { .driver = { .name = DRIVER_NAME, diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 725d16138b74..533fe5dbe6f8 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -114,10 +114,11 @@ static inline int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1") * @pin: ACPI GPIO pin number (0-based, controller-relative) * - * Returns GPIO descriptor to use with Linux generic GPIO API, or ERR_PTR - * error value + * Return: GPIO descriptor to use with Linux generic GPIO API, or ERR_PTR + * error value. Specifically returns %-EPROBE_DEFER if the referenced GPIO + * controller does not have gpiochip registered at the moment. This is to + * support probe deferral. */ - static struct gpio_desc *acpi_get_gpiod(char *path, int pin) { struct gpio_chip *chip; @@ -131,7 +132,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin) chip = gpiochip_find(handle, acpi_gpiochip_find); if (!chip) - return ERR_PTR(-ENODEV); + return ERR_PTR(-EPROBE_DEFER); offset = acpi_gpiochip_pin_to_gpio_offset(chip, pin); if (offset < 0) @@ -307,6 +308,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) acpi_walk_resources(handle, "_AEI", acpi_gpiochip_request_interrupt, acpi_gpio); } +EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts); /** * acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts. @@ -346,6 +348,7 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) kfree(event); } } +EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts); int acpi_dev_add_driver_gpios(struct acpi_device *adev, const struct acpi_gpio_mapping *gpios) @@ -514,6 +517,35 @@ struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev, return lookup.desc ? lookup.desc : ERR_PTR(-ENOENT); } +/** + * acpi_dev_gpio_irq_get() - Find GpioInt and translate it to Linux IRQ number + * @adev: pointer to a ACPI device to get IRQ from + * @index: index of GpioInt resource (starting from %0) + * + * If the device has one or more GpioInt resources, this function can be + * used to translate from the GPIO offset in the resource to the Linux IRQ + * number. + * + * Return: Linux IRQ number (>%0) on success, negative errno on failure. + */ +int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) +{ + int idx, i; + + for (i = 0, idx = 0; idx <= index; i++) { + struct acpi_gpio_info info; + struct gpio_desc *desc; + + desc = acpi_get_gpiod_by_index(adev, NULL, i, &info); + if (IS_ERR(desc)) + break; + if (info.gpioint && idx++ == index) + return gpiod_to_irq(desc); + } + return -ENOENT; +} +EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get); + static acpi_status acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value, void *handler_context, diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a6c67c6b4680..9a0ec48a4737 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -242,7 +242,7 @@ int of_gpio_simple_xlate(struct gpio_chip *gc, { /* * We're discouraging gpio_cells < 2, since that way you'll have to - * write your own xlate function (that will have to retrive the GPIO + * write your own xlate function (that will have to retrieve the GPIO * number and the flags from a single gpio cell -- this is possible, * but not recommended). */ diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index af3bc7a8033b..b57ed8e55ab5 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -6,14 +6,29 @@ #include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/kdev_t.h> +#include <linux/slab.h> #include "gpiolib.h" -static DEFINE_IDR(dirent_idr); +#define GPIO_IRQF_TRIGGER_FALLING BIT(0) +#define GPIO_IRQF_TRIGGER_RISING BIT(1) +#define GPIO_IRQF_TRIGGER_BOTH (GPIO_IRQF_TRIGGER_FALLING | \ + GPIO_IRQF_TRIGGER_RISING) +struct gpiod_data { + struct gpio_desc *desc; + + struct mutex mutex; + struct kernfs_node *value_kn; + int irq; + unsigned char irq_flags; -/* lock protects against unexport_gpio() being called while - * sysfs files are active. + bool direction_can_change; +}; + +/* + * Lock to serialise gpiod export and unexport, and prevent re-export of + * gpiod whose chip is being unregistered. */ static DEFINE_MUTEX(sysfs_lock); @@ -38,38 +53,35 @@ static DEFINE_MUTEX(sysfs_lock); * /edge configuration */ -static ssize_t gpio_direction_show(struct device *dev, +static ssize_t direction_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct gpio_desc *desc = dev_get_drvdata(dev); + struct gpiod_data *data = dev_get_drvdata(dev); + struct gpio_desc *desc = data->desc; ssize_t status; - mutex_lock(&sysfs_lock); + mutex_lock(&data->mutex); - if (!test_bit(FLAG_EXPORT, &desc->flags)) { - status = -EIO; - } else { - gpiod_get_direction(desc); - status = sprintf(buf, "%s\n", + gpiod_get_direction(desc); + status = sprintf(buf, "%s\n", test_bit(FLAG_IS_OUT, &desc->flags) ? "out" : "in"); - } - mutex_unlock(&sysfs_lock); + mutex_unlock(&data->mutex); + return status; } -static ssize_t gpio_direction_store(struct device *dev, +static ssize_t direction_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - struct gpio_desc *desc = dev_get_drvdata(dev); + struct gpiod_data *data = dev_get_drvdata(dev); + struct gpio_desc *desc = data->desc; ssize_t status; - mutex_lock(&sysfs_lock); + mutex_lock(&data->mutex); - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else if (sysfs_streq(buf, "high")) + if (sysfs_streq(buf, "high")) status = gpiod_direction_output_raw(desc, 1); else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low")) status = gpiod_direction_output_raw(desc, 0); @@ -78,43 +90,40 @@ static ssize_t gpio_direction_store(struct device *dev, else status = -EINVAL; - mutex_unlock(&sysfs_lock); + mutex_unlock(&data->mutex); + return status ? : size; } +static DEVICE_ATTR_RW(direction); -static /* const */ DEVICE_ATTR(direction, 0644, - gpio_direction_show, gpio_direction_store); - -static ssize_t gpio_value_show(struct device *dev, +static ssize_t value_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct gpio_desc *desc = dev_get_drvdata(dev); + struct gpiod_data *data = dev_get_drvdata(dev); + struct gpio_desc *desc = data->desc; ssize_t status; - mutex_lock(&sysfs_lock); + mutex_lock(&data->mutex); - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else - status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc)); + status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc)); + + mutex_unlock(&data->mutex); - mutex_unlock(&sysfs_lock); return status; } -static ssize_t gpio_value_store(struct device *dev, +static ssize_t value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - struct gpio_desc *desc = dev_get_drvdata(dev); + struct gpiod_data *data = dev_get_drvdata(dev); + struct gpio_desc *desc = data->desc; ssize_t status; - mutex_lock(&sysfs_lock); + mutex_lock(&data->mutex); - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else if (!test_bit(FLAG_IS_OUT, &desc->flags)) + if (!test_bit(FLAG_IS_OUT, &desc->flags)) { status = -EPERM; - else { + } else { long value; status = kstrtol(buf, 0, &value); @@ -124,172 +133,168 @@ static ssize_t gpio_value_store(struct device *dev, } } - mutex_unlock(&sysfs_lock); + mutex_unlock(&data->mutex); + return status; } - -static DEVICE_ATTR(value, 0644, - gpio_value_show, gpio_value_store); +static DEVICE_ATTR_RW(value); static irqreturn_t gpio_sysfs_irq(int irq, void *priv) { - struct kernfs_node *value_sd = priv; + struct gpiod_data *data = priv; + + sysfs_notify_dirent(data->value_kn); - sysfs_notify_dirent(value_sd); return IRQ_HANDLED; } -static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev, - unsigned long gpio_flags) +/* Caller holds gpiod-data mutex. */ +static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags) { - struct kernfs_node *value_sd; + struct gpiod_data *data = dev_get_drvdata(dev); + struct gpio_desc *desc = data->desc; unsigned long irq_flags; - int ret, irq, id; + int ret; - if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags) - return 0; - - irq = gpiod_to_irq(desc); - if (irq < 0) + data->irq = gpiod_to_irq(desc); + if (data->irq < 0) return -EIO; - id = desc->flags >> ID_SHIFT; - value_sd = idr_find(&dirent_idr, id); - if (value_sd) - free_irq(irq, value_sd); - - desc->flags &= ~GPIO_TRIGGER_MASK; - - if (!gpio_flags) { - gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); - ret = 0; - goto free_id; - } + data->value_kn = sysfs_get_dirent(dev->kobj.sd, "value"); + if (!data->value_kn) + return -ENODEV; irq_flags = IRQF_SHARED; - if (test_bit(FLAG_TRIG_FALL, &gpio_flags)) + if (flags & GPIO_IRQF_TRIGGER_FALLING) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; - if (test_bit(FLAG_TRIG_RISE, &gpio_flags)) + if (flags & GPIO_IRQF_TRIGGER_RISING) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; - if (!value_sd) { - value_sd = sysfs_get_dirent(dev->kobj.sd, "value"); - if (!value_sd) { - ret = -ENODEV; - goto err_out; - } - - ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL); - if (ret < 0) - goto free_sd; - id = ret; - - desc->flags &= GPIO_FLAGS_MASK; - desc->flags |= (unsigned long)id << ID_SHIFT; - - if (desc->flags >> ID_SHIFT != id) { - ret = -ERANGE; - goto free_id; - } - } + /* + * FIXME: This should be done in the irq_request_resources callback + * when the irq is requested, but a few drivers currently fail + * to do so. + * + * Remove this redundant call (along with the corresponding + * unlock) when those drivers have been fixed. + */ + ret = gpiochip_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); + if (ret < 0) + goto err_put_kn; - ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags, - "gpiolib", value_sd); + ret = request_any_context_irq(data->irq, gpio_sysfs_irq, irq_flags, + "gpiolib", data); if (ret < 0) - goto free_id; + goto err_unlock; - ret = gpiochip_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); - if (ret < 0) { - gpiod_warn(desc, "failed to flag the GPIO for IRQ\n"); - goto free_id; - } + data->irq_flags = flags; - desc->flags |= gpio_flags; return 0; -free_id: - idr_remove(&dirent_idr, id); - desc->flags &= GPIO_FLAGS_MASK; -free_sd: - if (value_sd) - sysfs_put(value_sd); -err_out: +err_unlock: + gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); +err_put_kn: + sysfs_put(data->value_kn); + return ret; } +/* + * Caller holds gpiod-data mutex (unless called after class-device + * deregistration). + */ +static void gpio_sysfs_free_irq(struct device *dev) +{ + struct gpiod_data *data = dev_get_drvdata(dev); + struct gpio_desc *desc = data->desc; + + data->irq_flags = 0; + free_irq(data->irq, data); + gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); + sysfs_put(data->value_kn); +} + static const struct { const char *name; - unsigned long flags; + unsigned char flags; } trigger_types[] = { { "none", 0 }, - { "falling", BIT(FLAG_TRIG_FALL) }, - { "rising", BIT(FLAG_TRIG_RISE) }, - { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) }, + { "falling", GPIO_IRQF_TRIGGER_FALLING }, + { "rising", GPIO_IRQF_TRIGGER_RISING }, + { "both", GPIO_IRQF_TRIGGER_BOTH }, }; -static ssize_t gpio_edge_show(struct device *dev, +static ssize_t edge_show(struct device *dev, struct device_attribute *attr, char *buf) { - const struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; + struct gpiod_data *data = dev_get_drvdata(dev); + ssize_t status = 0; + int i; - mutex_lock(&sysfs_lock); - - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else { - int i; + mutex_lock(&data->mutex); - status = 0; - for (i = 0; i < ARRAY_SIZE(trigger_types); i++) - if ((desc->flags & GPIO_TRIGGER_MASK) - == trigger_types[i].flags) { - status = sprintf(buf, "%s\n", - trigger_types[i].name); - break; - } + for (i = 0; i < ARRAY_SIZE(trigger_types); i++) { + if (data->irq_flags == trigger_types[i].flags) { + status = sprintf(buf, "%s\n", trigger_types[i].name); + break; + } } - mutex_unlock(&sysfs_lock); + mutex_unlock(&data->mutex); + return status; } -static ssize_t gpio_edge_store(struct device *dev, +static ssize_t edge_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - struct gpio_desc *desc = dev_get_drvdata(dev); - ssize_t status; - int i; + struct gpiod_data *data = dev_get_drvdata(dev); + unsigned char flags; + ssize_t status = size; + int i; - for (i = 0; i < ARRAY_SIZE(trigger_types); i++) + for (i = 0; i < ARRAY_SIZE(trigger_types); i++) { if (sysfs_streq(trigger_types[i].name, buf)) - goto found; - return -EINVAL; + break; + } -found: - mutex_lock(&sysfs_lock); + if (i == ARRAY_SIZE(trigger_types)) + return -EINVAL; - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else { - status = gpio_setup_irq(desc, dev, trigger_types[i].flags); + flags = trigger_types[i].flags; + + mutex_lock(&data->mutex); + + if (flags == data->irq_flags) { + status = size; + goto out_unlock; + } + + if (data->irq_flags) + gpio_sysfs_free_irq(dev); + + if (flags) { + status = gpio_sysfs_request_irq(dev, flags); if (!status) status = size; } - mutex_unlock(&sysfs_lock); +out_unlock: + mutex_unlock(&data->mutex); return status; } +static DEVICE_ATTR_RW(edge); -static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store); - -static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev, - int value) +/* Caller holds gpiod-data mutex. */ +static int gpio_sysfs_set_active_low(struct device *dev, int value) { + struct gpiod_data *data = dev_get_drvdata(dev); + struct gpio_desc *desc = data->desc; int status = 0; + unsigned int flags = data->irq_flags; if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) return 0; @@ -300,69 +305,59 @@ static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev, clear_bit(FLAG_ACTIVE_LOW, &desc->flags); /* reconfigure poll(2) support if enabled on one edge only */ - if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^ - !!test_bit(FLAG_TRIG_FALL, &desc->flags))) { - unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK; - - gpio_setup_irq(desc, dev, 0); - status = gpio_setup_irq(desc, dev, trigger_flags); + if (flags == GPIO_IRQF_TRIGGER_FALLING || + flags == GPIO_IRQF_TRIGGER_RISING) { + gpio_sysfs_free_irq(dev); + status = gpio_sysfs_request_irq(dev, flags); } return status; } -static ssize_t gpio_active_low_show(struct device *dev, +static ssize_t active_low_show(struct device *dev, struct device_attribute *attr, char *buf) { - const struct gpio_desc *desc = dev_get_drvdata(dev); + struct gpiod_data *data = dev_get_drvdata(dev); + struct gpio_desc *desc = data->desc; ssize_t status; - mutex_lock(&sysfs_lock); + mutex_lock(&data->mutex); - if (!test_bit(FLAG_EXPORT, &desc->flags)) - status = -EIO; - else - status = sprintf(buf, "%d\n", + status = sprintf(buf, "%d\n", !!test_bit(FLAG_ACTIVE_LOW, &desc->flags)); - mutex_unlock(&sysfs_lock); + mutex_unlock(&data->mutex); return status; } -static ssize_t gpio_active_low_store(struct device *dev, +static ssize_t active_low_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - struct gpio_desc *desc = dev_get_drvdata(dev); + struct gpiod_data *data = dev_get_drvdata(dev); ssize_t status; + long value; - mutex_lock(&sysfs_lock); + mutex_lock(&data->mutex); - if (!test_bit(FLAG_EXPORT, &desc->flags)) { - status = -EIO; - } else { - long value; + status = kstrtol(buf, 0, &value); + if (status == 0) + status = gpio_sysfs_set_active_low(dev, value); - status = kstrtol(buf, 0, &value); - if (status == 0) - status = sysfs_set_active_low(desc, dev, value != 0); - } - - mutex_unlock(&sysfs_lock); + mutex_unlock(&data->mutex); return status ? : size; } - -static DEVICE_ATTR(active_low, 0644, - gpio_active_low_show, gpio_active_low_store); +static DEVICE_ATTR_RW(active_low); static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); - struct gpio_desc *desc = dev_get_drvdata(dev); + struct gpiod_data *data = dev_get_drvdata(dev); + struct gpio_desc *desc = data->desc; umode_t mode = attr->mode; - bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags); + bool show_direction = data->direction_can_change; if (attr == &dev_attr_direction.attr) { if (!show_direction) @@ -402,32 +397,32 @@ static const struct attribute_group *gpio_groups[] = { * /ngpio ... matching gpio_chip.ngpio */ -static ssize_t chip_base_show(struct device *dev, +static ssize_t base_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%d\n", chip->base); } -static DEVICE_ATTR(base, 0444, chip_base_show, NULL); +static DEVICE_ATTR_RO(base); -static ssize_t chip_label_show(struct device *dev, +static ssize_t label_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%s\n", chip->label ? : ""); } -static DEVICE_ATTR(label, 0444, chip_label_show, NULL); +static DEVICE_ATTR_RO(label); -static ssize_t chip_ngpio_show(struct device *dev, +static ssize_t ngpio_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%u\n", chip->ngpio); } -static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); +static DEVICE_ATTR_RO(ngpio); static struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, @@ -552,6 +547,7 @@ static struct class gpio_class = { int gpiod_export(struct gpio_desc *desc, bool direction_may_change) { struct gpio_chip *chip; + struct gpiod_data *data; unsigned long flags; int status; const char *ioname = NULL; @@ -574,9 +570,9 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) mutex_lock(&sysfs_lock); /* check if chip is being removed */ - if (!chip || !chip->exported) { + if (!chip || !chip->cdev) { status = -ENODEV; - goto fail_unlock; + goto err_unlock; } spin_lock_irqsave(&gpio_lock, flags); @@ -588,43 +584,54 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) test_bit(FLAG_REQUESTED, &desc->flags), test_bit(FLAG_EXPORT, &desc->flags)); status = -EPERM; - goto fail_unlock; + goto err_unlock; } + spin_unlock_irqrestore(&gpio_lock, flags); - if (desc->chip->direction_input && desc->chip->direction_output && - direction_may_change) { - set_bit(FLAG_SYSFS_DIR, &desc->flags); + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto err_unlock; } - spin_unlock_irqrestore(&gpio_lock, flags); + data->desc = desc; + mutex_init(&data->mutex); + if (chip->direction_input && chip->direction_output) + data->direction_can_change = direction_may_change; + else + data->direction_can_change = false; offset = gpio_chip_hwgpio(desc); - if (desc->chip->names && desc->chip->names[offset]) - ioname = desc->chip->names[offset]; + if (chip->names && chip->names[offset]) + ioname = chip->names[offset]; - dev = device_create_with_groups(&gpio_class, desc->chip->dev, - MKDEV(0, 0), desc, gpio_groups, + dev = device_create_with_groups(&gpio_class, chip->dev, + MKDEV(0, 0), data, gpio_groups, ioname ? ioname : "gpio%u", desc_to_gpio(desc)); if (IS_ERR(dev)) { status = PTR_ERR(dev); - goto fail_unlock; + goto err_free_data; } set_bit(FLAG_EXPORT, &desc->flags); mutex_unlock(&sysfs_lock); return 0; -fail_unlock: +err_free_data: + kfree(data); +err_unlock: mutex_unlock(&sysfs_lock); gpiod_dbg(desc, "%s: status %d\n", __func__, status); return status; } EXPORT_SYMBOL_GPL(gpiod_export); -static int match_export(struct device *dev, const void *data) +static int match_export(struct device *dev, const void *desc) { - return dev_get_drvdata(dev) == data; + struct gpiod_data *data = dev_get_drvdata(dev); + + return data->desc == desc; } /** @@ -641,82 +648,26 @@ static int match_export(struct device *dev, const void *data) int gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) { - int status = -EINVAL; + struct device *cdev; + int ret; if (!desc) { pr_warn("%s: invalid GPIO\n", __func__); return -EINVAL; } - mutex_lock(&sysfs_lock); - - if (test_bit(FLAG_EXPORT, &desc->flags)) { - struct device *tdev; - - tdev = class_find_device(&gpio_class, NULL, desc, match_export); - if (tdev != NULL) { - status = sysfs_create_link(&dev->kobj, &tdev->kobj, - name); - put_device(tdev); - } else { - status = -ENODEV; - } - } - - mutex_unlock(&sysfs_lock); + cdev = class_find_device(&gpio_class, NULL, desc, match_export); + if (!cdev) + return -ENODEV; - if (status) - gpiod_dbg(desc, "%s: status %d\n", __func__, status); + ret = sysfs_create_link(&dev->kobj, &cdev->kobj, name); + put_device(cdev); - return status; + return ret; } EXPORT_SYMBOL_GPL(gpiod_export_link); /** - * gpiod_sysfs_set_active_low - set the polarity of gpio sysfs value - * @gpio: gpio to change - * @value: non-zero to use active low, i.e. inverted values - * - * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute. - * The GPIO does not have to be exported yet. If poll(2) support has - * been enabled for either rising or falling edge, it will be - * reconfigured to follow the new polarity. - * - * Returns zero on success, else an error. - */ -int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) -{ - struct device *dev = NULL; - int status = -EINVAL; - - if (!desc) { - pr_warn("%s: invalid GPIO\n", __func__); - return -EINVAL; - } - - mutex_lock(&sysfs_lock); - - if (test_bit(FLAG_EXPORT, &desc->flags)) { - dev = class_find_device(&gpio_class, NULL, desc, match_export); - if (dev == NULL) { - status = -ENODEV; - goto unlock; - } - } - - status = sysfs_set_active_low(desc, dev, value); - put_device(dev); -unlock: - mutex_unlock(&sysfs_lock); - - if (status) - gpiod_dbg(desc, "%s: status %d\n", __func__, status); - - return status; -} -EXPORT_SYMBOL_GPL(gpiod_sysfs_set_active_low); - -/** * gpiod_unexport - reverse effect of gpio_export() * @gpio: gpio to make unavailable * @@ -724,8 +675,8 @@ EXPORT_SYMBOL_GPL(gpiod_sysfs_set_active_low); */ void gpiod_unexport(struct gpio_desc *desc) { - int status = 0; - struct device *dev = NULL; + struct gpiod_data *data; + struct device *dev; if (!desc) { pr_warn("%s: invalid GPIO\n", __func__); @@ -734,82 +685,79 @@ void gpiod_unexport(struct gpio_desc *desc) mutex_lock(&sysfs_lock); - if (test_bit(FLAG_EXPORT, &desc->flags)) { + if (!test_bit(FLAG_EXPORT, &desc->flags)) + goto err_unlock; - dev = class_find_device(&gpio_class, NULL, desc, match_export); - if (dev) { - gpio_setup_irq(desc, dev, 0); - clear_bit(FLAG_SYSFS_DIR, &desc->flags); - clear_bit(FLAG_EXPORT, &desc->flags); - } else - status = -ENODEV; - } + dev = class_find_device(&gpio_class, NULL, desc, match_export); + if (!dev) + goto err_unlock; + + data = dev_get_drvdata(dev); + + clear_bit(FLAG_EXPORT, &desc->flags); + + device_unregister(dev); + + /* + * Release irq after deregistration to prevent race with edge_store. + */ + if (data->irq_flags) + gpio_sysfs_free_irq(dev); mutex_unlock(&sysfs_lock); - if (dev) { - device_unregister(dev); - put_device(dev); - } + put_device(dev); + kfree(data); - if (status) - gpiod_dbg(desc, "%s: status %d\n", __func__, status); + return; + +err_unlock: + mutex_unlock(&sysfs_lock); } EXPORT_SYMBOL_GPL(gpiod_unexport); -int gpiochip_export(struct gpio_chip *chip) +int gpiochip_sysfs_register(struct gpio_chip *chip) { - int status; struct device *dev; - /* Many systems register gpio chips for SOC support very early, + /* + * Many systems add gpio chips for SOC support very early, * before driver model support is available. In those cases we - * export this later, in gpiolib_sysfs_init() ... here we just + * register later, in gpiolib_sysfs_init() ... here we just * verify that _some_ field of gpio_class got initialized. */ if (!gpio_class.p) return 0; /* use chip->base for the ID; it's already known to be unique */ - mutex_lock(&sysfs_lock); dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0), chip, gpiochip_groups, "gpiochip%d", chip->base); if (IS_ERR(dev)) - status = PTR_ERR(dev); - else - status = 0; - chip->exported = (status == 0); - mutex_unlock(&sysfs_lock); + return PTR_ERR(dev); - if (status) - chip_dbg(chip, "%s: status %d\n", __func__, status); + mutex_lock(&sysfs_lock); + chip->cdev = dev; + mutex_unlock(&sysfs_lock); - return status; + return 0; } -void gpiochip_unexport(struct gpio_chip *chip) +void gpiochip_sysfs_unregister(struct gpio_chip *chip) { - int status; - struct device *dev; struct gpio_desc *desc; unsigned int i; + if (!chip->cdev) + return; + + device_unregister(chip->cdev); + + /* prevent further gpiod exports */ mutex_lock(&sysfs_lock); - dev = class_find_device(&gpio_class, NULL, chip, match_export); - if (dev) { - put_device(dev); - device_unregister(dev); - /* prevent further gpiod exports */ - chip->exported = false; - status = 0; - } else - status = -ENODEV; + chip->cdev = NULL; mutex_unlock(&sysfs_lock); - if (status) - chip_dbg(chip, "%s: status %d\n", __func__, status); - /* unregister gpiod class devices owned by sysfs */ for (i = 0; i < chip->ngpio; i++) { desc = &chip->desc[i]; @@ -836,19 +784,20 @@ static int __init gpiolib_sysfs_init(void) */ spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(chip, &gpio_chips, list) { - if (chip->exported) + if (chip->cdev) continue; /* - * TODO we yield gpio_lock here because gpiochip_export() - * acquires a mutex. This is unsafe and needs to be fixed. + * TODO we yield gpio_lock here because + * gpiochip_sysfs_register() acquires a mutex. This is unsafe + * and needs to be fixed. * * Also it would be nice to use gpiochip_find() here so we * can keep gpio_chips local to gpiolib.c, but the yield of * gpio_lock prevents us from doing this. */ spin_unlock_irqrestore(&gpio_lock, flags); - status = gpiochip_export(chip); + status = gpiochip_sysfs_register(chip); spin_lock_irqsave(&gpio_lock, flags); } spin_unlock_irqrestore(&gpio_lock, flags); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 6bc612b8a49f..be42ab368a80 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -290,7 +290,7 @@ int gpiochip_add(struct gpio_chip *chip) of_gpiochip_add(chip); acpi_gpiochip_add(chip); - status = gpiochip_export(chip); + status = gpiochip_sysfs_register(chip); if (status) goto err_remove_chip; @@ -327,10 +327,12 @@ EXPORT_SYMBOL_GPL(gpiochip_add); */ void gpiochip_remove(struct gpio_chip *chip) { + struct gpio_desc *desc; unsigned long flags; unsigned id; + bool requested = false; - gpiochip_unexport(chip); + gpiochip_sysfs_unregister(chip); gpiochip_irqchip_remove(chip); @@ -341,15 +343,17 @@ void gpiochip_remove(struct gpio_chip *chip) spin_lock_irqsave(&gpio_lock, flags); for (id = 0; id < chip->ngpio; id++) { - if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) - dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n"); + desc = &chip->desc[id]; + desc->chip = NULL; + if (test_bit(FLAG_REQUESTED, &desc->flags)) + requested = true; } - for (id = 0; id < chip->ngpio; id++) - chip->desc[id].chip = NULL; - list_del(&chip->list); spin_unlock_irqrestore(&gpio_lock, flags); + if (requested) + dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n"); + kfree(chip->desc); chip->desc = NULL; } @@ -441,6 +445,8 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, */ irq_set_handler_data(parent_irq, gpiochip); irq_set_chained_handler(parent_irq, parent_handler); + + gpiochip->irq_parent = parent_irq; } /* Set the parent IRQ for all affected IRQs */ @@ -549,6 +555,11 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) acpi_gpiochip_free_interrupts(gpiochip); + if (gpiochip->irq_parent) { + irq_set_chained_handler(gpiochip->irq_parent, NULL); + irq_set_handler_data(gpiochip->irq_parent, NULL); + } + /* Remove all IRQ mappings and delete the domain */ if (gpiochip->irqdomain) { for (offset = 0; offset < gpiochip->ngpio; offset++) @@ -608,7 +619,7 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip, of_node = gpiochip->dev->of_node; #ifdef CONFIG_OF_GPIO /* - * If the gpiochip has an assigned OF node this takes precendence + * If the gpiochip has an assigned OF node this takes precedence * FIXME: get rid of this and use gpiochip->dev->of_node everywhere */ if (gpiochip->of_node) @@ -1211,7 +1222,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value); /* * _gpio_set_open_drain_value() - Set the open drain gpio's value. * @desc: gpio descriptor whose state need to be set. - * @value: Non-zero for setting it HIGH otherise it will set to LOW. + * @value: Non-zero for setting it HIGH otherwise it will set to LOW. */ static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value) { @@ -1238,7 +1249,7 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value) /* * _gpio_set_open_source_value() - Set the open source gpio's value. * @desc: gpio descriptor whose state need to be set. - * @value: Non-zero for setting it HIGH otherise it will set to LOW. + * @value: Non-zero for setting it HIGH otherwise it will set to LOW. */ static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value) { @@ -1300,17 +1311,16 @@ static void gpio_chip_set_multiple(struct gpio_chip *chip, continue; } /* set outputs if the corresponding mask bit is set */ - if (__test_and_clear_bit(i, mask)) { + if (__test_and_clear_bit(i, mask)) chip->set(chip, i, test_bit(i, bits)); - } } } } -static void gpiod_set_array_priv(bool raw, bool can_sleep, - unsigned int array_size, - struct gpio_desc **desc_array, - int *value_array) +static void gpiod_set_array_value_priv(bool raw, bool can_sleep, + unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) { int i = 0; @@ -1320,9 +1330,9 @@ static void gpiod_set_array_priv(bool raw, bool can_sleep, unsigned long bits[BITS_TO_LONGS(chip->ngpio)]; int count = 0; - if (!can_sleep) { + if (!can_sleep) WARN_ON(chip->can_sleep); - } + memset(mask, 0, sizeof(mask)); do { struct gpio_desc *desc = desc_array[i]; @@ -1337,24 +1347,22 @@ static void gpiod_set_array_priv(bool raw, bool can_sleep, * open drain and open source outputs are set individually */ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { - _gpio_set_open_drain_value(desc,value); + _gpio_set_open_drain_value(desc, value); } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { _gpio_set_open_source_value(desc, value); } else { __set_bit(hwgpio, mask); - if (value) { + if (value) __set_bit(hwgpio, bits); - } else { + else __clear_bit(hwgpio, bits); - } count++; } i++; } while ((i < array_size) && (desc_array[i]->chip == chip)); /* push collected bits to outputs */ - if (count != 0) { + if (count != 0) gpio_chip_set_multiple(chip, mask, bits); - } } } @@ -1403,7 +1411,7 @@ void gpiod_set_value(struct gpio_desc *desc, int value) EXPORT_SYMBOL_GPL(gpiod_set_value); /** - * gpiod_set_raw_array() - assign values to an array of GPIOs + * gpiod_set_raw_array_value() - assign values to an array of GPIOs * @array_size: number of elements in the descriptor / value arrays * @desc_array: array of GPIO descriptors whose values will be assigned * @value_array: array of values to assign @@ -1414,17 +1422,18 @@ EXPORT_SYMBOL_GPL(gpiod_set_value); * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ -void gpiod_set_raw_array(unsigned int array_size, +void gpiod_set_raw_array_value(unsigned int array_size, struct gpio_desc **desc_array, int *value_array) { if (!desc_array) return; - gpiod_set_array_priv(true, false, array_size, desc_array, value_array); + gpiod_set_array_value_priv(true, false, array_size, desc_array, + value_array); } -EXPORT_SYMBOL_GPL(gpiod_set_raw_array); +EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value); /** - * gpiod_set_array() - assign values to an array of GPIOs + * gpiod_set_array_value() - assign values to an array of GPIOs * @array_size: number of elements in the descriptor / value arrays * @desc_array: array of GPIO descriptors whose values will be assigned * @value_array: array of values to assign @@ -1435,14 +1444,15 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_array); * This function should be called from contexts where we cannot sleep, and will * complain if the GPIO chip functions potentially sleep. */ -void gpiod_set_array(unsigned int array_size, - struct gpio_desc **desc_array, int *value_array) +void gpiod_set_array_value(unsigned int array_size, + struct gpio_desc **desc_array, int *value_array) { if (!desc_array) return; - gpiod_set_array_priv(false, false, array_size, desc_array, value_array); + gpiod_set_array_value_priv(false, false, array_size, desc_array, + value_array); } -EXPORT_SYMBOL_GPL(gpiod_set_array); +EXPORT_SYMBOL_GPL(gpiod_set_array_value); /** * gpiod_cansleep() - report whether gpio value access may sleep @@ -1604,7 +1614,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep); /** - * gpiod_set_raw_array_cansleep() - assign values to an array of GPIOs + * gpiod_set_raw_array_value_cansleep() - assign values to an array of GPIOs * @array_size: number of elements in the descriptor / value arrays * @desc_array: array of GPIO descriptors whose values will be assigned * @value_array: array of values to assign @@ -1614,19 +1624,20 @@ EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep); * * This function is to be called from contexts that can sleep. */ -void gpiod_set_raw_array_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - int *value_array) +void gpiod_set_raw_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) { might_sleep_if(extra_checks); if (!desc_array) return; - gpiod_set_array_priv(true, true, array_size, desc_array, value_array); + gpiod_set_array_value_priv(true, true, array_size, desc_array, + value_array); } -EXPORT_SYMBOL_GPL(gpiod_set_raw_array_cansleep); +EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value_cansleep); /** - * gpiod_set_array_cansleep() - assign values to an array of GPIOs + * gpiod_set_array_value_cansleep() - assign values to an array of GPIOs * @array_size: number of elements in the descriptor / value arrays * @desc_array: array of GPIO descriptors whose values will be assigned * @value_array: array of values to assign @@ -1636,16 +1647,17 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_array_cansleep); * * This function is to be called from contexts that can sleep. */ -void gpiod_set_array_cansleep(unsigned int array_size, - struct gpio_desc **desc_array, - int *value_array) +void gpiod_set_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) { might_sleep_if(extra_checks); if (!desc_array) return; - gpiod_set_array_priv(false, true, array_size, desc_array, value_array); + gpiod_set_array_value_priv(false, true, array_size, desc_array, + value_array); } -EXPORT_SYMBOL_GPL(gpiod_set_array_cansleep); +EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep); /** * gpiod_add_lookup_table() - register GPIO device consumers @@ -1880,7 +1892,7 @@ EXPORT_SYMBOL_GPL(gpiod_count); * * Return the GPIO descriptor corresponding to the function con_id of device * dev, -ENOENT if no GPIO has been assigned to the requested function, or - * another IS_ERR() code if an error occured while trying to acquire the GPIO. + * another IS_ERR() code if an error occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check __gpiod_get(struct device *dev, const char *con_id, enum gpiod_flags flags) @@ -1960,7 +1972,7 @@ static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, * * Return a valid GPIO descriptor, -ENOENT if no GPIO has been assigned to the * requested function and/or index, or another IS_ERR() code if an error - * occured while trying to acquire the GPIO. + * occurred while trying to acquire the GPIO. */ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, const char *con_id, @@ -2118,13 +2130,15 @@ int gpiod_hog(struct gpio_desc *desc, const char *name, local_desc = gpiochip_request_own_desc(chip, hwnum, name); if (IS_ERR(local_desc)) { - pr_debug("requesting own GPIO %s failed\n", name); + pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n", + name, chip->label, hwnum); return PTR_ERR(local_desc); } status = gpiod_configure_flags(desc, name, lflags, dflags); if (status < 0) { - pr_debug("setup of GPIO %s failed\n", name); + pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n", + name, chip->label, hwnum); gpiochip_free_own_desc(desc); return status; } diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 594b1798c0e7..bf343004b008 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -83,20 +83,12 @@ struct gpio_desc { #define FLAG_IS_OUT 1 #define FLAG_EXPORT 2 /* protected by sysfs_lock */ #define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */ -#define FLAG_TRIG_FALL 4 /* trigger on falling edge */ -#define FLAG_TRIG_RISE 5 /* trigger on rising edge */ #define FLAG_ACTIVE_LOW 6 /* value has active low */ #define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */ #define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */ #define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ -#define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */ #define FLAG_IS_HOGGED 11 /* GPIO is hogged */ -#define ID_SHIFT 16 /* add new flags before this one */ - -#define GPIO_FLAGS_MASK ((1 << ID_SHIFT) - 1) -#define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE)) - const char *label; }; @@ -151,17 +143,17 @@ static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc) #ifdef CONFIG_GPIO_SYSFS -int gpiochip_export(struct gpio_chip *chip); -void gpiochip_unexport(struct gpio_chip *chip); +int gpiochip_sysfs_register(struct gpio_chip *chip); +void gpiochip_sysfs_unregister(struct gpio_chip *chip); #else -static inline int gpiochip_export(struct gpio_chip *chip) +static inline int gpiochip_sysfs_register(struct gpio_chip *chip) { return 0; } -static inline void gpiochip_unexport(struct gpio_chip *chip) +static inline void gpiochip_sysfs_unregister(struct gpio_chip *chip) { } diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 266dcd6cdf3b..0a957828b3bd 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -36,9 +36,6 @@ #include <linux/pci.h> #include <linux/export.h> -#ifdef CONFIG_X86 -#include <asm/mtrr.h> -#endif static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -197,16 +194,7 @@ static int drm_getmap(struct drm_device *dev, void *data, map->type = r_list->map->type; map->flags = r_list->map->flags; map->handle = (void *)(unsigned long) r_list->user_token; - -#ifdef CONFIG_X86 - /* - * There appears to be exactly one user of the mtrr index: dritest. - * It's easy enough to keep it working on non-PAT systems. - */ - map->mtrr = phys_wc_to_mtrr_index(r_list->map->mtrr); -#else - map->mtrr = -1; -#endif + map->mtrr = arch_phys_wc_index(r_list->map->mtrr); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3190e793ed4..cc552a4c1f3b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -32,6 +32,7 @@ #include "i915_trace.h" #include "intel_drv.h" #include <linux/dma_remapping.h> +#include <linux/uaccess.h> #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) @@ -465,7 +466,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, } /* We can't wait for rendering with pagefaults disabled */ - if (obj->active && in_atomic()) + if (obj->active && pagefault_disabled()) return -EFAULT; if (use_cpu_reloc(obj)) diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 71e87abdcae7..481337436f72 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -396,16 +396,6 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) return -EINVAL; } -/* - * If the vendor backlight interface is not in use and ACPI backlight interface - * is broken, do not bother processing backlight change requests from firmware. - */ -static bool should_ignore_backlight_request(void) -{ - return acpi_video_backlight_support() && - !acpi_video_verify_backlight_support(); -} - static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -414,7 +404,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); - if (should_ignore_backlight_request()) { + if (acpi_video_get_backlight_type() == acpi_backlight_native) { DRM_DEBUG_KMS("opregion backlight request ignored\n"); return 0; } diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 92be50c39ffd..ab89eed9ddd9 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -7944,8 +7944,8 @@ typedef struct { typedef struct { AMD_ACPI_DESCRIPTION_HEADER SHeader; UCHAR TableUUID[16]; //0x24 - ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture. - ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture. + ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure. + ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure. ULONG Reserved[4]; //0x3C }UEFI_ACPI_VFCT; diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 67bab5c36056..6d2f39d36e44 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1119,10 +1119,9 @@ static int ipu_irq_init(struct ipu_soc *ipu) ct->regs.mask = IPU_INT_CTRL(i / 32); } - irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler); - irq_set_handler_data(ipu->irq_sync, ipu); - irq_set_chained_handler(ipu->irq_err, ipu_err_irq_handler); - irq_set_handler_data(ipu->irq_err, ipu); + irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu); + irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler, + ipu); return 0; } @@ -1131,10 +1130,8 @@ static void ipu_irq_exit(struct ipu_soc *ipu) { int i, irq; - irq_set_chained_handler(ipu->irq_err, NULL); - irq_set_handler_data(ipu->irq_err, NULL); - irq_set_chained_handler(ipu->irq_sync, NULL); - irq_set_handler_data(ipu->irq_sync, NULL); + irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL); + irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL); /* TODO: remove irq_domain_generic_chips */ diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 15338afdf7f9..cc4c6649d195 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -634,7 +634,12 @@ config HID_PLANTRONICS tristate "Plantronics USB HID Driver" depends on HID ---help--- - Provides HID support for Plantronics telephony devices. + Provides HID support for Plantronics USB audio devices. + Correctly maps vendor unique volume up/down HID usages to + KEY_VOLUMEUP and KEY_VOLUMEDOWN events and prevents core mapping + of other vendor unique HID usages to random mouse events. + + Say M here if you may ever plug in a Plantronics USB audio device. config HID_PRIMAX tristate "Primax non-fully HID-compliant devices" diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index e4a21dfd7ef3..2f8a41dc3cc8 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -24,7 +24,7 @@ obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o obj-$(CONFIG_HID_ACRUX) += hid-axff.o obj-$(CONFIG_HID_APPLE) += hid-apple.o obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o -obj-$(CONFIG_HID_AUREAL) += hid-aureal.o +obj-$(CONFIG_HID_AUREAL) += hid-aureal.o obj-$(CONFIG_HID_BELKIN) += hid-belkin.o obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o obj-$(CONFIG_HID_CHERRY) += hid-cherry.o @@ -46,12 +46,12 @@ obj-$(CONFIG_HID_ICADE) += hid-icade.o obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o obj-$(CONFIG_HID_KYE) += hid-kye.o -obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o +obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o -obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o +obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 722a925795a2..157c62775053 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -706,7 +706,8 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type) if (hid->vendor == USB_VENDOR_ID_MICROSOFT && (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 || - hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP) && + hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP || + hid->product == USB_DEVICE_ID_MS_POWER_COVER) && hid->group == HID_GROUP_MULTITOUCH) hid->group = HID_GROUP_GENERIC; @@ -1061,13 +1062,13 @@ static u32 s32ton(__s32 value, unsigned n) * Search linux-kernel and linux-usb-devel archives for "hid-core extract". */ -static __u32 extract(const struct hid_device *hid, __u8 *report, +__u32 hid_field_extract(const struct hid_device *hid, __u8 *report, unsigned offset, unsigned n) { u64 x; if (n > 32) - hid_warn(hid, "extract() called with n (%d) > 32! (%s)\n", + hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", n, current->comm); report += offset >> 3; /* adjust byte index */ @@ -1076,6 +1077,7 @@ static __u32 extract(const struct hid_device *hid, __u8 *report, x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */ return (u32) x; } +EXPORT_SYMBOL_GPL(hid_field_extract); /* * "implement" : set bits in a little endian bit stream. @@ -1221,9 +1223,9 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field, for (n = 0; n < count; n++) { value[n] = min < 0 ? - snto32(extract(hid, data, offset + n * size, size), - size) : - extract(hid, data, offset + n * size, size); + snto32(hid_field_extract(hid, data, offset + n * size, + size), size) : + hid_field_extract(hid, data, offset + n * size, size); /* Ignore report if ErrorRollOver */ if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && @@ -1851,6 +1853,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, #endif { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, @@ -1901,6 +1904,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, @@ -1959,9 +1963,12 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, - { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) }, @@ -1997,6 +2004,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, @@ -2265,14 +2273,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) }, { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_0_4_IF_KIT) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_16_16_IF_KIT) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_8_8_8_IF_KIT) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_7_IF_KIT) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_0_8_8_IF_KIT) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_PHIDGET_MOTORCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) }, { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) }, { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) }, @@ -2399,14 +2399,6 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) }, #endif - { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) }, - { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, - { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, - { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) }, - { HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) }, - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) }, { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, { } diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c index c4ef3bc726e3..1b764d1745f3 100644 --- a/drivers/hid/hid-cypress.c +++ b/drivers/hid/hid-cypress.c @@ -41,13 +41,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, for (i = 0; i < *rsize - 4; i++) if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { - __u8 tmp; - rdesc[i] = 0x19; rdesc[i + 2] = 0x29; - tmp = rdesc[i + 3]; - rdesc[i + 3] = rdesc[i + 1]; - rdesc[i + 1] = tmp; + swap(rdesc[i + 3], rdesc[i + 1]); } return rdesc; } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7ce93d927f62..b04b0820d816 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -227,6 +227,7 @@ #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 +#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053 #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 #define USB_DEVICE_ID_CHICONY_AK1D 0x1125 @@ -363,16 +364,6 @@ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 -#define USB_VENDOR_ID_GLAB 0x06c2 -#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 -#define USB_DEVICE_ID_1_PHIDGETSERVO_30 0x0039 -#define USB_DEVICE_ID_0_0_4_IF_KIT 0x0040 -#define USB_DEVICE_ID_0_16_16_IF_KIT 0x0044 -#define USB_DEVICE_ID_8_8_8_IF_KIT 0x0045 -#define USB_DEVICE_ID_0_8_7_IF_KIT 0x0051 -#define USB_DEVICE_ID_0_8_8_IF_KIT 0x0053 -#define USB_DEVICE_ID_PHIDGET_MOTORCONTROL 0x0058 - #define USB_VENDOR_ID_GOODTOUCH 0x1aad #define USB_DEVICE_ID_GOODTOUCH_000f 0x000f @@ -586,6 +577,7 @@ #define USB_DEVICE_ID_LENOVO_TPKBD 0x6009 #define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047 #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 +#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 #define USB_VENDOR_ID_LG 0x1fd2 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 @@ -673,6 +665,7 @@ #define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 #define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07dc #define USB_DEVICE_ID_MS_TYPE_COVER_3_JP 0x07dd +#define USB_DEVICE_ID_MS_POWER_COVER 0x07da #define USB_VENDOR_ID_MOJO 0x8282 #define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 @@ -852,6 +845,7 @@ #define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306 #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 #define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4 +#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002 #define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER 0x1000 @@ -958,13 +952,6 @@ #define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061 #define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068 -#define USB_VENDOR_ID_VERNIER 0x08f7 -#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 -#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002 -#define USB_DEVICE_ID_VERNIER_SKIP 0x0003 -#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 -#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006 - #define USB_VENDOR_ID_VTL 0x0306 #define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f @@ -983,9 +970,6 @@ #define USB_VENDOR_ID_WISEGROUP 0x0925 #define USB_DEVICE_ID_SMARTJOY_PLUS 0x0005 -#define USB_DEVICE_ID_1_PHIDGETSERVO_20 0x8101 -#define USB_DEVICE_ID_4_PHIDGETSERVO_20 0x8104 -#define USB_DEVICE_ID_8_8_4_IF_KIT 0x8201 #define USB_DEVICE_ID_SUPER_JOY_BOX_3 0x8888 #define USB_DEVICE_ID_QUAD_USB_JOYPAD 0x8800 #define USB_DEVICE_ID_DUAL_USB_JOYPAD 0x8866 @@ -1039,4 +1023,11 @@ #define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */ #define USB_DEVICE_ID_RI_KA_WEBMAIL 0x1320 /* Webmail Notifier */ +#define USB_VENDOR_ID_MULTIPLE_1781 0x1781 +#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD 0x0a8d + +#define USB_VENDOR_ID_DRACAL_RAPHNET 0x289b +#define USB_DEVICE_ID_RAPHNET_2NES2SNES 0x0002 +#define USB_DEVICE_ID_RAPHNET_4NES4SNES 0x0003 + #endif diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 008e89bf6f3c..3511bbaba505 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1157,7 +1157,8 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct return; /* report the usage code as scancode if the key status has changed */ - if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value) + if (usage->type == EV_KEY && + (!test_bit(usage->code, input->key)) == value) input_event(input, EV_MSC, MSC_SCAN, usage->hid); input_event(input, usage->type, usage->code, value); diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index c4c3f0952521..4f59bffd0205 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -43,6 +43,35 @@ struct lenovo_drvdata_cptkbd { #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) +static const __u8 lenovo_pro_dock_need_fixup_collection[] = { + 0x05, 0x88, /* Usage Page (Vendor Usage Page 0x88) */ + 0x09, 0x01, /* Usage (Vendor Usage 0x01) */ + 0xa1, 0x01, /* Collection (Application) */ + 0x85, 0x04, /* Report ID (4) */ + 0x19, 0x00, /* Usage Minimum (0) */ + 0x2a, 0xff, 0xff, /* Usage Maximum (65535) */ +}; + +static __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + switch (hdev->product) { + case USB_DEVICE_ID_LENOVO_TPPRODOCK: + /* the fixups that need to be done: + * - get a reasonable usage max for the vendor collection + * 0x8801 from the report ID 4 + */ + if (*rsize >= 153 && + memcmp(&rdesc[140], lenovo_pro_dock_need_fixup_collection, + sizeof(lenovo_pro_dock_need_fixup_collection)) == 0) { + rdesc[151] = 0x01; + rdesc[152] = 0x00; + } + break; + } + return rdesc; +} + static int lenovo_input_mapping_tpkbd(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) @@ -599,7 +628,8 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev) GFP_KERNEL); if (data_pointer == NULL) { hid_err(hdev, "Could not allocate memory for driver data\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } // set same default values as windows driver @@ -610,7 +640,8 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev) name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); if (name_mute == NULL || name_micmute == NULL) { hid_err(hdev, "Could not allocate memory for led data\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err; } snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev)); snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev)); @@ -634,6 +665,9 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev) lenovo_features_set_tpkbd(hdev); return 0; +err: + sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tpkbd); + return ret; } static int lenovo_probe_cptkbd(struct hid_device *hdev) @@ -762,10 +796,29 @@ static void lenovo_remove(struct hid_device *hdev) hid_hw_stop(hdev); } +static void lenovo_input_configured(struct hid_device *hdev, + struct hid_input *hi) +{ + switch (hdev->product) { + case USB_DEVICE_ID_LENOVO_TPKBD: + case USB_DEVICE_ID_LENOVO_CUSBKBD: + case USB_DEVICE_ID_LENOVO_CBTKBD: + if (test_bit(EV_REL, hi->input->evbit)) { + /* set only for trackpoint device */ + __set_bit(INPUT_PROP_POINTER, hi->input->propbit); + __set_bit(INPUT_PROP_POINTING_STICK, + hi->input->propbit); + } + break; + } +} + + static const struct hid_device_id lenovo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, { } }; @@ -774,10 +827,12 @@ MODULE_DEVICE_TABLE(hid, lenovo_devices); static struct hid_driver lenovo_driver = { .name = "lenovo", .id_table = lenovo_devices, + .input_configured = lenovo_input_configured, .input_mapping = lenovo_input_mapping, .probe = lenovo_probe, .remove = lenovo_remove, .raw_event = lenovo_raw_event, + .report_fixup = lenovo_report_fixup, }; module_hid_driver(lenovo_driver); diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index b86c18e651ed..429340d809b5 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -700,7 +700,8 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) /* insert a little delay of 10 jiffies ~ 40ms */ wait_queue_head_t wait; init_waitqueue_head (&wait); - wait_event_interruptible_timeout(wait, 0, 10); + wait_event_interruptible_timeout(wait, 0, + msecs_to_jiffies(40)); /* Select random Address */ buf[1] = 0xB2; @@ -712,13 +713,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) } if (drv_data->quirks & LG_FF) - lgff_init(hdev); - if (drv_data->quirks & LG_FF2) - lg2ff_init(hdev); - if (drv_data->quirks & LG_FF3) - lg3ff_init(hdev); - if (drv_data->quirks & LG_FF4) - lg4ff_init(hdev); + ret = lgff_init(hdev); + else if (drv_data->quirks & LG_FF2) + ret = lg2ff_init(hdev); + else if (drv_data->quirks & LG_FF3) + ret = lg3ff_init(hdev); + else if (drv_data->quirks & LG_FF4) + ret = lg4ff_init(hdev); + + if (ret) + goto err_free; return 0; err_free: @@ -731,8 +735,8 @@ static void lg_remove(struct hid_device *hdev) struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) lg4ff_deinit(hdev); - - hid_hw_stop(hdev); + else + hid_hw_stop(hdev); kfree(drv_data); } diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index 1232210b1cc5..02cec83caac3 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c @@ -68,26 +68,32 @@ #define LG4FF_FFEX_REV_MAJ 0x21 #define LG4FF_FFEX_REV_MIN 0x00 -static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range); -static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range); - -struct lg4ff_device_entry { - __u32 product_id; - __u16 range; - __u16 min_range; - __u16 max_range; +static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range); +static void lg4ff_set_range_g25(struct hid_device *hid, u16 range); + +struct lg4ff_wheel_data { + const u32 product_id; + u16 range; + const u16 min_range; + const u16 max_range; #ifdef CONFIG_LEDS_CLASS - __u8 led_state; + u8 led_state; struct led_classdev *led[5]; #endif - u32 alternate_modes; - const char *real_tag; - const char *real_name; - u16 real_product_id; - struct list_head list; + const u32 alternate_modes; + const char * const real_tag; + const char * const real_name; + const u16 real_product_id; + void (*set_range)(struct hid_device *hid, u16 range); }; +struct lg4ff_device_entry { + spinlock_t report_lock; /* Protect output HID report */ + struct hid_report *report; + struct lg4ff_wheel_data wdata; +}; + static const signed short lg4ff_wheel_effects[] = { FF_CONSTANT, FF_AUTOCENTER, @@ -95,16 +101,16 @@ static const signed short lg4ff_wheel_effects[] = { }; struct lg4ff_wheel { - const __u32 product_id; + const u32 product_id; const signed short *ff_effects; - const __u16 min_range; - const __u16 max_range; + const u16 min_range; + const u16 max_range; void (*set_range)(struct hid_device *hid, u16 range); }; struct lg4ff_compat_mode_switch { - const __u8 cmd_count; /* Number of commands to send */ - const __u8 cmd[]; + const u8 cmd_count; /* Number of commands to send */ + const u8 cmd[]; }; struct lg4ff_wheel_ident_info { @@ -134,10 +140,10 @@ struct lg4ff_alternate_mode { static const struct lg4ff_wheel lg4ff_devices[] = { {USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}, - {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_dfp}, - {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25}, - {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25}, - {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25}, + {USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_dfp}, + {USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, + {USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, + {USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, lg4ff_set_range_g25}, {USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL}, {USB_DEVICE_ID_LOGITECH_WII_WHEEL, lg4ff_wheel_effects, 40, 270, NULL} }; @@ -245,10 +251,10 @@ static const struct lg4ff_compat_mode_switch lg4ff_mode_switch_ext16_g25 = { }; /* Recalculates X axis value accordingly to currently selected range */ -static __s32 lg4ff_adjust_dfp_x_axis(__s32 value, __u16 range) +static s32 lg4ff_adjust_dfp_x_axis(s32 value, u16 range) { - __u16 max_range; - __s32 new_value; + u16 max_range; + s32 new_value; if (range == 900) return value; @@ -269,21 +275,21 @@ static __s32 lg4ff_adjust_dfp_x_axis(__s32 value, __u16 range) } int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field, - struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data) + struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data) { struct lg4ff_device_entry *entry = drv_data->device_props; - __s32 new_value = 0; + s32 new_value = 0; if (!entry) { hid_err(hid, "Device properties not found"); return 0; } - switch (entry->product_id) { + switch (entry->wdata.product_id) { case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: switch (usage->code) { case ABS_X: - new_value = lg4ff_adjust_dfp_x_axis(value, entry->range); + new_value = lg4ff_adjust_dfp_x_axis(value, entry->wdata.range); input_event(field->hidinput->input, usage->type, usage->code, new_value); return 1; default: @@ -294,14 +300,56 @@ int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field, } } -static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) +static void lg4ff_init_wheel_data(struct lg4ff_wheel_data * const wdata, const struct lg4ff_wheel *wheel, + const struct lg4ff_multimode_wheel *mmode_wheel, + const u16 real_product_id) +{ + u32 alternate_modes = 0; + const char *real_tag = NULL; + const char *real_name = NULL; + + if (mmode_wheel) { + alternate_modes = mmode_wheel->alternate_modes; + real_tag = mmode_wheel->real_tag; + real_name = mmode_wheel->real_name; + } + + { + struct lg4ff_wheel_data t_wdata = { .product_id = wheel->product_id, + .real_product_id = real_product_id, + .min_range = wheel->min_range, + .max_range = wheel->max_range, + .set_range = wheel->set_range, + .alternate_modes = alternate_modes, + .real_tag = real_tag, + .real_name = real_name }; + + memcpy(wdata, &t_wdata, sizeof(t_wdata)); + } +} + +static int lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct hid_report *report = list_entry(report_list->next, struct hid_report, list); - __s32 *value = report->field[0]->value; + struct lg4ff_device_entry *entry; + struct lg_drv_data *drv_data; + unsigned long flags; + s32 *value; int x; + drv_data = hid_get_drvdata(hid); + if (!drv_data) { + hid_err(hid, "Private driver data not found!\n"); + return -EINVAL; + } + + entry = drv_data->device_props; + if (!entry) { + hid_err(hid, "Device properties not found!\n"); + return -EINVAL; + } + value = entry->report->field[0]->value; + #define CLAMP(x) do { if (x < 0) x = 0; else if (x > 0xff) x = 0xff; } while (0) switch (effect->type) { @@ -309,6 +357,7 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */ CLAMP(x); + spin_lock_irqsave(&entry->report_lock, flags); if (x == 0x80) { /* De-activate force in slot-1*/ value[0] = 0x13; @@ -319,7 +368,8 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e value[5] = 0x00; value[6] = 0x00; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); + spin_unlock_irqrestore(&entry->report_lock, flags); return 0; } @@ -331,7 +381,8 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e value[5] = 0x00; value[6] = 0x00; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); + spin_unlock_irqrestore(&entry->report_lock, flags); break; } return 0; @@ -339,15 +390,16 @@ static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *e /* Sends default autocentering command compatible with * all wheels except Formula Force EX */ -static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude) +static void lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); - __s32 *value = report->field[0]->value; - __u32 expand_a, expand_b; + s32 *value = report->field[0]->value; + u32 expand_a, expand_b; struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; + unsigned long flags; drv_data = hid_get_drvdata(hid); if (!drv_data) { @@ -360,8 +412,10 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud hid_err(hid, "Device properties not found!\n"); return; } + value = entry->report->field[0]->value; /* De-activate Auto-Center */ + spin_lock_irqsave(&entry->report_lock, flags); if (magnitude == 0) { value[0] = 0xf5; value[1] = 0x00; @@ -371,7 +425,8 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud value[5] = 0x00; value[6] = 0x00; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); + spin_unlock_irqrestore(&entry->report_lock, flags); return; } @@ -384,7 +439,7 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud } /* Adjust for non-MOMO wheels */ - switch (entry->product_id) { + switch (entry->wdata.product_id) { case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: break; @@ -401,7 +456,7 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud value[5] = 0x00; value[6] = 0x00; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); /* Activate Auto-Center */ value[0] = 0x14; @@ -412,18 +467,34 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud value[5] = 0x00; value[6] = 0x00; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); + spin_unlock_irqrestore(&entry->report_lock, flags); } /* Sends autocentering command compatible with Formula Force EX */ -static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude) +static void lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct hid_report *report = list_entry(report_list->next, struct hid_report, list); - __s32 *value = report->field[0]->value; + struct lg4ff_device_entry *entry; + struct lg_drv_data *drv_data; + unsigned long flags; + s32 *value; magnitude = magnitude * 90 / 65535; + drv_data = hid_get_drvdata(hid); + if (!drv_data) { + hid_err(hid, "Private driver data not found!\n"); + return; + } + + entry = drv_data->device_props; + if (!entry) { + hid_err(hid, "Device properties not found!\n"); + return; + } + value = entry->report->field[0]->value; + + spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xfe; value[1] = 0x03; value[2] = magnitude >> 14; @@ -432,18 +503,33 @@ static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude) value[5] = 0x00; value[6] = 0x00; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); + spin_unlock_irqrestore(&entry->report_lock, flags); } /* Sends command to set range compatible with G25/G27/Driving Force GT */ -static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range) +static void lg4ff_set_range_g25(struct hid_device *hid, u16 range) { - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct hid_report *report = list_entry(report_list->next, struct hid_report, list); - __s32 *value = report->field[0]->value; + struct lg4ff_device_entry *entry; + struct lg_drv_data *drv_data; + unsigned long flags; + s32 *value; + drv_data = hid_get_drvdata(hid); + if (!drv_data) { + hid_err(hid, "Private driver data not found!\n"); + return; + } + + entry = drv_data->device_props; + if (!entry) { + hid_err(hid, "Device properties not found!\n"); + return; + } + value = entry->report->field[0]->value; dbg_hid("G25/G27/DFGT: setting range to %u\n", range); + spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xf8; value[1] = 0x81; value[2] = range & 0x00ff; @@ -452,20 +538,35 @@ static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range) value[5] = 0x00; value[6] = 0x00; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); + spin_unlock_irqrestore(&entry->report_lock, flags); } /* Sends commands to set range compatible with Driving Force Pro wheel */ -static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range) +static void lg4ff_set_range_dfp(struct hid_device *hid, u16 range) { - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct hid_report *report = list_entry(report_list->next, struct hid_report, list); + struct lg4ff_device_entry *entry; + struct lg_drv_data *drv_data; + unsigned long flags; int start_left, start_right, full_range; - __s32 *value = report->field[0]->value; + s32 *value; + + drv_data = hid_get_drvdata(hid); + if (!drv_data) { + hid_err(hid, "Private driver data not found!\n"); + return; + } + entry = drv_data->device_props; + if (!entry) { + hid_err(hid, "Device properties not found!\n"); + return; + } + value = entry->report->field[0]->value; dbg_hid("Driving Force Pro: setting range to %u\n", range); /* Prepare "coarse" limit command */ + spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xf8; value[1] = 0x00; /* Set later */ value[2] = 0x00; @@ -475,13 +576,13 @@ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range) value[6] = 0x00; if (range > 200) { - report->field[0]->value[1] = 0x03; + value[1] = 0x03; full_range = 900; } else { - report->field[0]->value[1] = 0x02; + value[1] = 0x02; full_range = 200; } - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); /* Prepare "fine" limit command */ value[0] = 0x81; @@ -493,7 +594,8 @@ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range) value[6] = 0x00; if (range == 200 || range == 900) { /* Do not apply any fine limit */ - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); + spin_unlock_irqrestore(&entry->report_lock, flags); return; } @@ -507,7 +609,8 @@ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range) value[5] = (start_right & 0xe) << 4 | (start_left & 0xe); value[6] = 0xff; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); + spin_unlock_irqrestore(&entry->report_lock, flags); } static const struct lg4ff_compat_mode_switch *lg4ff_get_mode_switch_command(const u16 real_product_id, const u16 target_product_id) @@ -569,19 +672,35 @@ static const struct lg4ff_compat_mode_switch *lg4ff_get_mode_switch_command(cons static int lg4ff_switch_compatibility_mode(struct hid_device *hid, const struct lg4ff_compat_mode_switch *s) { - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct hid_report *report = list_entry(report_list->next, struct hid_report, list); - __s32 *value = report->field[0]->value; + struct lg4ff_device_entry *entry; + struct lg_drv_data *drv_data; + unsigned long flags; + s32 *value; u8 i; + drv_data = hid_get_drvdata(hid); + if (!drv_data) { + hid_err(hid, "Private driver data not found!\n"); + return -EINVAL; + } + + entry = drv_data->device_props; + if (!entry) { + hid_err(hid, "Device properties not found!\n"); + return -EINVAL; + } + value = entry->report->field[0]->value; + + spin_lock_irqsave(&entry->report_lock, flags); for (i = 0; i < s->cmd_count; i++) { u8 j; for (j = 0; j < 7; j++) value[j] = s->cmd[j + (7*i)]; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); } + spin_unlock_irqrestore(&entry->report_lock, flags); hid_hw_wait(hid); return 0; } @@ -606,23 +725,23 @@ static ssize_t lg4ff_alternate_modes_show(struct device *dev, struct device_attr return 0; } - if (!entry->real_name) { + if (!entry->wdata.real_name) { hid_err(hid, "NULL pointer to string\n"); return 0; } for (i = 0; i < LG4FF_MODE_MAX_IDX; i++) { - if (entry->alternate_modes & BIT(i)) { + if (entry->wdata.alternate_modes & BIT(i)) { /* Print tag and full name */ count += scnprintf(buf + count, PAGE_SIZE - count, "%s: %s", lg4ff_alternate_modes[i].tag, - !lg4ff_alternate_modes[i].product_id ? entry->real_name : lg4ff_alternate_modes[i].name); + !lg4ff_alternate_modes[i].product_id ? entry->wdata.real_name : lg4ff_alternate_modes[i].name); if (count >= PAGE_SIZE - 1) return count; /* Mark the currently active mode with an asterisk */ - if (lg4ff_alternate_modes[i].product_id == entry->product_id || - (lg4ff_alternate_modes[i].product_id == 0 && entry->product_id == entry->real_product_id)) + if (lg4ff_alternate_modes[i].product_id == entry->wdata.product_id || + (lg4ff_alternate_modes[i].product_id == 0 && entry->wdata.product_id == entry->wdata.real_product_id)) count += scnprintf(buf + count, PAGE_SIZE - count, " *\n"); else count += scnprintf(buf + count, PAGE_SIZE - count, "\n"); @@ -675,10 +794,10 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att const u16 mode_product_id = lg4ff_alternate_modes[i].product_id; const char *tag = lg4ff_alternate_modes[i].tag; - if (entry->alternate_modes & BIT(i)) { + if (entry->wdata.alternate_modes & BIT(i)) { if (!strcmp(tag, lbuf)) { if (!mode_product_id) - target_product_id = entry->real_product_id; + target_product_id = entry->wdata.real_product_id; else target_product_id = mode_product_id; break; @@ -693,24 +812,24 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att } kfree(lbuf); /* Not needed anymore */ - if (target_product_id == entry->product_id) /* Nothing to do */ + if (target_product_id == entry->wdata.product_id) /* Nothing to do */ return count; /* Automatic switching has to be disabled for the switch to DF-EX mode to work correctly */ if (target_product_id == USB_DEVICE_ID_LOGITECH_WHEEL && !lg4ff_no_autoswitch) { hid_info(hid, "\"%s\" cannot be switched to \"DF-EX\" mode. Load the \"hid_logitech\" module with \"lg4ff_no_autoswitch=1\" parameter set and try again\n", - entry->real_name); + entry->wdata.real_name); return -EINVAL; } /* Take care of hardware limitations */ - if ((entry->real_product_id == USB_DEVICE_ID_LOGITECH_DFP_WHEEL || entry->real_product_id == USB_DEVICE_ID_LOGITECH_G25_WHEEL) && - entry->product_id > target_product_id) { - hid_info(hid, "\"%s\" cannot be switched back into \"%s\" mode\n", entry->real_name, lg4ff_alternate_modes[i].name); + if ((entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_DFP_WHEEL || entry->wdata.real_product_id == USB_DEVICE_ID_LOGITECH_G25_WHEEL) && + entry->wdata.product_id > target_product_id) { + hid_info(hid, "\"%s\" cannot be switched back into \"%s\" mode\n", entry->wdata.real_name, lg4ff_alternate_modes[i].name); return -EINVAL; } - s = lg4ff_get_mode_switch_command(entry->real_product_id, target_product_id); + s = lg4ff_get_mode_switch_command(entry->wdata.real_product_id, target_product_id); if (!s) { hid_err(hid, "Invalid target product ID %X\n", target_product_id); return -EINVAL; @@ -721,9 +840,9 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att } static DEVICE_ATTR(alternate_modes, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_alternate_modes_show, lg4ff_alternate_modes_store); -/* Read current range and display it in terminal */ -static ssize_t range_show(struct device *dev, struct device_attribute *attr, - char *buf) +/* Export the currently set range of the wheel */ +static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; @@ -742,19 +861,19 @@ static ssize_t range_show(struct device *dev, struct device_attribute *attr, return 0; } - count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range); + count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->wdata.range); return count; } /* Set range to user specified value, call appropriate function * according to the type of the wheel */ -static ssize_t range_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct hid_device *hid = to_hid_device(dev); struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; - __u16 range = simple_strtoul(buf, NULL, 10); + u16 range = simple_strtoul(buf, NULL, 10); drv_data = hid_get_drvdata(hid); if (!drv_data) { @@ -769,18 +888,18 @@ static ssize_t range_store(struct device *dev, struct device_attribute *attr, } if (range == 0) - range = entry->max_range; + range = entry->wdata.max_range; /* Check if the wheel supports range setting * and that the range is within limits for the wheel */ - if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) { - entry->set_range(hid, range); - entry->range = range; + if (entry->wdata.set_range && range >= entry->wdata.min_range && range <= entry->wdata.max_range) { + entry->wdata.set_range(hid, range); + entry->wdata.range = range; } return count; } -static DEVICE_ATTR_RW(range); +static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, lg4ff_range_show, lg4ff_range_store); static ssize_t lg4ff_real_id_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -801,12 +920,12 @@ static ssize_t lg4ff_real_id_show(struct device *dev, struct device_attribute *a return 0; } - if (!entry->real_tag || !entry->real_name) { + if (!entry->wdata.real_tag || !entry->wdata.real_name) { hid_err(hid, "NULL pointer to string\n"); return 0; } - count = scnprintf(buf, PAGE_SIZE, "%s: %s\n", entry->real_tag, entry->real_name); + count = scnprintf(buf, PAGE_SIZE, "%s: %s\n", entry->wdata.real_tag, entry->wdata.real_name); return count; } @@ -818,12 +937,27 @@ static ssize_t lg4ff_real_id_store(struct device *dev, struct device_attribute * static DEVICE_ATTR(real_id, S_IRUGO, lg4ff_real_id_show, lg4ff_real_id_store); #ifdef CONFIG_LEDS_CLASS -static void lg4ff_set_leds(struct hid_device *hid, __u8 leds) +static void lg4ff_set_leds(struct hid_device *hid, u8 leds) { - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct hid_report *report = list_entry(report_list->next, struct hid_report, list); - __s32 *value = report->field[0]->value; + struct lg_drv_data *drv_data; + struct lg4ff_device_entry *entry; + unsigned long flags; + s32 *value; + + drv_data = hid_get_drvdata(hid); + if (!drv_data) { + hid_err(hid, "Private driver data not found!\n"); + return; + } + + entry = drv_data->device_props; + if (!entry) { + hid_err(hid, "Device properties not found!\n"); + return; + } + value = entry->report->field[0]->value; + spin_lock_irqsave(&entry->report_lock, flags); value[0] = 0xf8; value[1] = 0x12; value[2] = leds; @@ -831,7 +965,8 @@ static void lg4ff_set_leds(struct hid_device *hid, __u8 leds) value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; - hid_hw_request(hid, report, HID_REQ_SET_REPORT); + hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT); + spin_unlock_irqrestore(&entry->report_lock, flags); } static void lg4ff_led_set_brightness(struct led_classdev *led_cdev, @@ -848,7 +983,7 @@ static void lg4ff_led_set_brightness(struct led_classdev *led_cdev, return; } - entry = (struct lg4ff_device_entry *)drv_data->device_props; + entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found."); @@ -856,15 +991,15 @@ static void lg4ff_led_set_brightness(struct led_classdev *led_cdev, } for (i = 0; i < 5; i++) { - if (led_cdev != entry->led[i]) + if (led_cdev != entry->wdata.led[i]) continue; - state = (entry->led_state >> i) & 1; + state = (entry->wdata.led_state >> i) & 1; if (value == LED_OFF && state) { - entry->led_state &= ~(1 << i); - lg4ff_set_leds(hid, entry->led_state); + entry->wdata.led_state &= ~(1 << i); + lg4ff_set_leds(hid, entry->wdata.led_state); } else if (value != LED_OFF && !state) { - entry->led_state |= 1 << i; - lg4ff_set_leds(hid, entry->led_state); + entry->wdata.led_state |= 1 << i; + lg4ff_set_leds(hid, entry->wdata.led_state); } break; } @@ -883,7 +1018,7 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde return LED_OFF; } - entry = (struct lg4ff_device_entry *)drv_data->device_props; + entry = drv_data->device_props; if (!entry) { hid_err(hid, "Device properties not found."); @@ -891,8 +1026,8 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde } for (i = 0; i < 5; i++) - if (led_cdev == entry->led[i]) { - value = (entry->led_state >> i) & 1; + if (led_cdev == entry->wdata.led[i]) { + value = (entry->wdata.led_state >> i) & 1; break; } @@ -991,8 +1126,11 @@ int lg4ff_init(struct hid_device *hid) { struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *dev = hidinput->input; + struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; + struct hid_report *report = list_entry(report_list->next, struct hid_report, list); const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice); + const struct lg4ff_multimode_wheel *mmode_wheel = NULL; struct lg4ff_device_entry *entry; struct lg_drv_data *drv_data; int error, i, j; @@ -1003,6 +1141,18 @@ int lg4ff_init(struct hid_device *hid) if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) return -1; + drv_data = hid_get_drvdata(hid); + if (!drv_data) { + hid_err(hid, "Cannot add device, private driver data not allocated\n"); + return -1; + } + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + spin_lock_init(&entry->report_lock); + entry->report = report; + drv_data->device_props = entry; + /* Check if a multimode wheel has been connected and * handle it appropriately */ mmode_ret = lg4ff_handle_multimode_wheel(hid, &real_product_id, bcdDevice); @@ -1012,6 +1162,11 @@ int lg4ff_init(struct hid_device *hid) */ if (mmode_ret == LG4FF_MMODE_SWITCHED) return 0; + else if (mmode_ret < 0) { + hid_err(hid, "Unable to switch device mode during initialization, errno %d\n", mmode_ret); + error = mmode_ret; + goto err_init; + } /* Check what wheel has been connected */ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) { @@ -1022,9 +1177,11 @@ int lg4ff_init(struct hid_device *hid) } if (i == ARRAY_SIZE(lg4ff_devices)) { - hid_err(hid, "Device is not supported by lg4ff driver. If you think it should be, consider reporting a bug to" - "LKML, Simon Wood <simon@mungewell.org> or Michal Maly <madcatxster@gmail.com>\n"); - return -1; + hid_err(hid, "This device is flagged to be handled by the lg4ff module but this module does not know how to handle it. " + "Please report this as a bug to LKML, Simon Wood <simon@mungewell.org> or " + "Michal Maly <madcatxster@devoid-pointer.net>\n"); + error = -1; + goto err_init; } if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) { @@ -1035,7 +1192,8 @@ int lg4ff_init(struct hid_device *hid) if (mmode_idx == ARRAY_SIZE(lg4ff_multimode_wheels)) { hid_err(hid, "Device product ID %X is not listed as a multimode wheel", real_product_id); - return -1; + error = -1; + goto err_init; } } @@ -1043,37 +1201,17 @@ int lg4ff_init(struct hid_device *hid) for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++) set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit); - error = input_ff_create_memless(dev, NULL, hid_lg4ff_play); + error = input_ff_create_memless(dev, NULL, lg4ff_play); if (error) - return error; - - /* Get private driver data */ - drv_data = hid_get_drvdata(hid); - if (!drv_data) { - hid_err(hid, "Cannot add device, private driver data not allocated\n"); - return -1; - } + goto err_init; /* Initialize device properties */ - entry = kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL); - if (!entry) { - hid_err(hid, "Cannot add device, insufficient memory to allocate device properties.\n"); - return -ENOMEM; - } - drv_data->device_props = entry; - - entry->product_id = lg4ff_devices[i].product_id; - entry->real_product_id = real_product_id; - entry->min_range = lg4ff_devices[i].min_range; - entry->max_range = lg4ff_devices[i].max_range; - entry->set_range = lg4ff_devices[i].set_range; if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) { BUG_ON(mmode_idx == -1); - entry->alternate_modes = lg4ff_multimode_wheels[mmode_idx].alternate_modes; - entry->real_tag = lg4ff_multimode_wheels[mmode_idx].real_tag; - entry->real_name = lg4ff_multimode_wheels[mmode_idx].real_name; + mmode_wheel = &lg4ff_multimode_wheels[mmode_idx]; } + lg4ff_init_wheel_data(&entry->wdata, &lg4ff_devices[i], mmode_wheel, real_product_id); /* Check if autocentering is available and * set the centering force to zero by default */ @@ -1081,9 +1219,9 @@ int lg4ff_init(struct hid_device *hid) /* Formula Force EX expects different autocentering command */ if ((bcdDevice >> 8) == LG4FF_FFEX_REV_MAJ && (bcdDevice & 0xff) == LG4FF_FFEX_REV_MIN) - dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex; + dev->ff->set_autocenter = lg4ff_set_autocenter_ffex; else - dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default; + dev->ff->set_autocenter = lg4ff_set_autocenter_default; dev->ff->set_autocenter(dev, 0); } @@ -1091,27 +1229,27 @@ int lg4ff_init(struct hid_device *hid) /* Create sysfs interface */ error = device_create_file(&hid->dev, &dev_attr_range); if (error) - return error; + hid_warn(hid, "Unable to create sysfs interface for \"range\", errno %d\n", error); if (mmode_ret == LG4FF_MMODE_IS_MULTIMODE) { error = device_create_file(&hid->dev, &dev_attr_real_id); if (error) - return error; + hid_warn(hid, "Unable to create sysfs interface for \"real_id\", errno %d\n", error); error = device_create_file(&hid->dev, &dev_attr_alternate_modes); if (error) - return error; + hid_warn(hid, "Unable to create sysfs interface for \"alternate_modes\", errno %d\n", error); } dbg_hid("sysfs interface created\n"); /* Set the maximum range to start with */ - entry->range = entry->max_range; - if (entry->set_range != NULL) - entry->set_range(hid, entry->range); + entry->wdata.range = entry->wdata.max_range; + if (entry->wdata.set_range) + entry->wdata.set_range(hid, entry->wdata.range); #ifdef CONFIG_LEDS_CLASS /* register led subsystem - G27 only */ - entry->led_state = 0; + entry->wdata.led_state = 0; for (j = 0; j < 5; j++) - entry->led[j] = NULL; + entry->wdata.led[j] = NULL; if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_G27_WHEEL) { struct led_classdev *led; @@ -1126,7 +1264,7 @@ int lg4ff_init(struct hid_device *hid) led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL); if (!led) { hid_err(hid, "can't allocate memory for LED %d\n", j); - goto err; + goto err_leds; } name = (void *)(&led[1]); @@ -1137,16 +1275,16 @@ int lg4ff_init(struct hid_device *hid) led->brightness_get = lg4ff_led_get_brightness; led->brightness_set = lg4ff_led_set_brightness; - entry->led[j] = led; + entry->wdata.led[j] = led; error = led_classdev_register(&hid->dev, led); if (error) { hid_err(hid, "failed to register LED %d. Aborting.\n", j); -err: +err_leds: /* Deregister LEDs (if any) */ for (j = 0; j < 5; j++) { - led = entry->led[j]; - entry->led[j] = NULL; + led = entry->wdata.led[j]; + entry->wdata.led[j] = NULL; if (!led) continue; led_classdev_unregister(led); @@ -1160,6 +1298,11 @@ out: #endif hid_info(hid, "Force feedback support for Logitech Gaming Wheels\n"); return 0; + +err_init: + drv_data->device_props = NULL; + kfree(entry); + return error; } int lg4ff_deinit(struct hid_device *hid) @@ -1176,14 +1319,13 @@ int lg4ff_deinit(struct hid_device *hid) if (!entry) goto out; /* Nothing more to do */ - device_remove_file(&hid->dev, &dev_attr_range); - /* Multimode devices will have at least the "MODE_NATIVE" bit set */ - if (entry->alternate_modes) { + if (entry->wdata.alternate_modes) { device_remove_file(&hid->dev, &dev_attr_real_id); device_remove_file(&hid->dev, &dev_attr_alternate_modes); } + device_remove_file(&hid->dev, &dev_attr_range); #ifdef CONFIG_LEDS_CLASS { int j; @@ -1192,8 +1334,8 @@ int lg4ff_deinit(struct hid_device *hid) /* Deregister LEDs (if any) */ for (j = 0; j < 5; j++) { - led = entry->led[j]; - entry->led[j] = NULL; + led = entry->wdata.led[j]; + entry->wdata.led[j] = NULL; if (!led) continue; led_classdev_unregister(led); @@ -1201,10 +1343,10 @@ int lg4ff_deinit(struct hid_device *hid) } } #endif + hid_hw_stop(hid); + drv_data->device_props = NULL; - /* Deallocate memory */ kfree(entry); - out: dbg_hid("Device successfully unregistered\n"); return 0; diff --git a/drivers/hid/hid-lg4ff.h b/drivers/hid/hid-lg4ff.h index 5b6a5086c47f..66201af44da3 100644 --- a/drivers/hid/hid-lg4ff.h +++ b/drivers/hid/hid-lg4ff.h @@ -5,12 +5,12 @@ extern int lg4ff_no_autoswitch; /* From hid-lg.c */ int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field, - struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data); + struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data); int lg4ff_init(struct hid_device *hdev); int lg4ff_deinit(struct hid_device *hdev); #else static inline int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field, - struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data) { return 0; } + struct hid_usage *usage, s32 value, struct lg_drv_data *drv_data) { return 0; } static inline int lg4ff_init(struct hid_device *hdev) { return -1; } static inline int lg4ff_deinit(struct hid_device *hdev) { return -1; } #endif diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 5fd530acf747..484196459305 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -40,8 +40,9 @@ MODULE_PARM_DESC(disable_raw_mode, #define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_QUIRK_CLASS_WTP BIT(0) +#define HIDPP_QUIRK_CLASS_M560 BIT(1) -/* bits 1..20 are reserved for classes */ +/* bits 2..20 are reserved for classes */ #define HIDPP_QUIRK_DELAYED_INIT BIT(21) #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22) @@ -930,6 +931,207 @@ static int wtp_connect(struct hid_device *hdev, bool connected) true, true); } +/* ------------------------------------------------------------------------- */ +/* Logitech M560 devices */ +/* ------------------------------------------------------------------------- */ + +/* + * Logitech M560 protocol overview + * + * The Logitech M560 mouse, is designed for windows 8. When the middle and/or + * the sides buttons are pressed, it sends some keyboard keys events + * instead of buttons ones. + * To complicate things further, the middle button keys sequence + * is different from the odd press and the even press. + * + * forward button -> Super_R + * backward button -> Super_L+'d' (press only) + * middle button -> 1st time: Alt_L+SuperL+XF86TouchpadOff (press only) + * 2nd time: left-click (press only) + * NB: press-only means that when the button is pressed, the + * KeyPress/ButtonPress and KeyRelease/ButtonRelease events are generated + * together sequentially; instead when the button is released, no event is + * generated ! + * + * With the command + * 10<xx>0a 3500af03 (where <xx> is the mouse id), + * the mouse reacts differently: + * - it never sends a keyboard key event + * - for the three mouse button it sends: + * middle button press 11<xx>0a 3500af00... + * side 1 button (forward) press 11<xx>0a 3500b000... + * side 2 button (backward) press 11<xx>0a 3500ae00... + * middle/side1/side2 button release 11<xx>0a 35000000... + */ + +static const u8 m560_config_parameter[] = {0x00, 0xaf, 0x03}; + +struct m560_private_data { + struct input_dev *input; +}; + +/* how buttons are mapped in the report */ +#define M560_MOUSE_BTN_LEFT 0x01 +#define M560_MOUSE_BTN_RIGHT 0x02 +#define M560_MOUSE_BTN_WHEEL_LEFT 0x08 +#define M560_MOUSE_BTN_WHEEL_RIGHT 0x10 + +#define M560_SUB_ID 0x0a +#define M560_BUTTON_MODE_REGISTER 0x35 + +static int m560_send_config_command(struct hid_device *hdev, bool connected) +{ + struct hidpp_report response; + struct hidpp_device *hidpp_dev; + + hidpp_dev = hid_get_drvdata(hdev); + + if (!connected) + return -ENODEV; + + return hidpp_send_rap_command_sync( + hidpp_dev, + REPORT_ID_HIDPP_SHORT, + M560_SUB_ID, + M560_BUTTON_MODE_REGISTER, + (u8 *)m560_config_parameter, + sizeof(m560_config_parameter), + &response + ); +} + +static int m560_allocate(struct hid_device *hdev) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + struct m560_private_data *d; + + d = devm_kzalloc(&hdev->dev, sizeof(struct m560_private_data), + GFP_KERNEL); + if (!d) + return -ENOMEM; + + hidpp->private_data = d; + + return 0; +}; + +static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) +{ + struct hidpp_device *hidpp = hid_get_drvdata(hdev); + struct m560_private_data *mydata = hidpp->private_data; + + /* sanity check */ + if (!mydata || !mydata->input) { + hid_err(hdev, "error in parameter\n"); + return -EINVAL; + } + + if (size < 7) { + hid_err(hdev, "error in report\n"); + return 0; + } + + if (data[0] == REPORT_ID_HIDPP_LONG && + data[2] == M560_SUB_ID && data[6] == 0x00) { + /* + * m560 mouse report for middle, forward and backward button + * + * data[0] = 0x11 + * data[1] = device-id + * data[2] = 0x0a + * data[5] = 0xaf -> middle + * 0xb0 -> forward + * 0xae -> backward + * 0x00 -> release all + * data[6] = 0x00 + */ + + switch (data[5]) { + case 0xaf: + input_report_key(mydata->input, BTN_MIDDLE, 1); + break; + case 0xb0: + input_report_key(mydata->input, BTN_FORWARD, 1); + break; + case 0xae: + input_report_key(mydata->input, BTN_BACK, 1); + break; + case 0x00: + input_report_key(mydata->input, BTN_BACK, 0); + input_report_key(mydata->input, BTN_FORWARD, 0); + input_report_key(mydata->input, BTN_MIDDLE, 0); + break; + default: + hid_err(hdev, "error in report\n"); + return 0; + } + input_sync(mydata->input); + + } else if (data[0] == 0x02) { + /* + * Logitech M560 mouse report + * + * data[0] = type (0x02) + * data[1..2] = buttons + * data[3..5] = xy + * data[6] = wheel + */ + + int v; + + input_report_key(mydata->input, BTN_LEFT, + !!(data[1] & M560_MOUSE_BTN_LEFT)); + input_report_key(mydata->input, BTN_RIGHT, + !!(data[1] & M560_MOUSE_BTN_RIGHT)); + + if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) + input_report_rel(mydata->input, REL_HWHEEL, -1); + else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) + input_report_rel(mydata->input, REL_HWHEEL, 1); + + v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12); + input_report_rel(mydata->input, REL_X, v); + + v = hid_snto32(hid_field_extract(hdev, data+3, 12, 12), 12); + input_report_rel(mydata->input, REL_Y, v); + + v = hid_snto32(data[6], 8); + input_report_rel(mydata->input, REL_WHEEL, v); + + input_sync(mydata->input); + } + + return 1; +} + +static void m560_populate_input(struct hidpp_device *hidpp, + struct input_dev *input_dev, bool origin_is_hid_core) +{ + struct m560_private_data *mydata = hidpp->private_data; + + mydata->input = input_dev; + + __set_bit(EV_KEY, mydata->input->evbit); + __set_bit(BTN_MIDDLE, mydata->input->keybit); + __set_bit(BTN_RIGHT, mydata->input->keybit); + __set_bit(BTN_LEFT, mydata->input->keybit); + __set_bit(BTN_BACK, mydata->input->keybit); + __set_bit(BTN_FORWARD, mydata->input->keybit); + + __set_bit(EV_REL, mydata->input->evbit); + __set_bit(REL_X, mydata->input->relbit); + __set_bit(REL_Y, mydata->input->relbit); + __set_bit(REL_WHEEL, mydata->input->relbit); + __set_bit(REL_HWHEEL, mydata->input->relbit); +} + +static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + return -1; +} + /* -------------------------------------------------------------------------- */ /* Generic HID++ devices */ /* -------------------------------------------------------------------------- */ @@ -942,6 +1144,9 @@ static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi, if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) return wtp_input_mapping(hdev, hi, field, usage, bit, max); + else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560 && + field->application != HID_GD_MOUSE) + return m560_input_mapping(hdev, hi, field, usage, bit, max); return 0; } @@ -951,6 +1156,8 @@ static void hidpp_populate_input(struct hidpp_device *hidpp, { if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) wtp_populate_input(hidpp, input, origin_is_hid_core); + else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) + m560_populate_input(hidpp, input, origin_is_hid_core); } static void hidpp_input_configured(struct hid_device *hdev, @@ -1038,6 +1245,8 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report, if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) return wtp_raw_event(hdev, data, size); + else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) + return m560_raw_event(hdev, data, size); return 0; } @@ -1115,6 +1324,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp) ret = wtp_connect(hdev, connected); if (ret) return; + } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) { + ret = m560_send_config_command(hdev, connected); + if (ret) + return; } if (!connected || hidpp->delayed_input) @@ -1190,7 +1403,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) { ret = wtp_allocate(hdev, id); if (ret) - goto wtp_allocate_fail; + goto allocate_fail; + } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) { + ret = m560_allocate(hdev); + if (ret) + goto allocate_fail; } INIT_WORK(&hidpp->work, delayed_work_cb); @@ -1253,7 +1470,7 @@ hid_hw_start_fail: hid_parse_fail: cancel_work_sync(&hidpp->work); mutex_destroy(&hidpp->send_mutex); -wtp_allocate_fail: +allocate_fail: hid_set_drvdata(hdev, NULL); return ret; } @@ -1281,6 +1498,10 @@ static const struct hid_device_id hidpp_devices[] = { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651), .driver_data = HIDPP_QUIRK_CLASS_WTP }, + { /* Mouse logitech M560 */ + HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, + USB_VENDOR_ID_LOGITECH, 0x402d), + .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 }, { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, USB_VENDOR_ID_LOGITECH, HID_ANY_ID)}, diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index af935eb198c9..32a596f554af 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -280,6 +280,8 @@ static const struct hid_device_id ms_devices[] = { .driver_data = MS_HIDINPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP), .driver_data = MS_HIDINPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), + .driver_data = MS_HIDINPUT }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), .driver_data = MS_PRESENTER }, diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index 2180e0789b76..febb21ee190e 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -2,7 +2,7 @@ * Plantronics USB HID Driver * * Copyright (c) 2014 JD Cole <jd.cole@plantronics.com> - * Copyright (c) 2014 Terry Junge <terry.junge@plantronics.com> + * Copyright (c) 2015 Terry Junge <terry.junge@plantronics.com> */ /* @@ -17,23 +17,138 @@ #include <linux/hid.h> #include <linux/module.h> +#define PLT_HID_1_0_PAGE 0xffa00000 +#define PLT_HID_2_0_PAGE 0xffa20000 + +#define PLT_BASIC_TELEPHONY 0x0003 +#define PLT_BASIC_EXCEPTION 0x0005 + +#define PLT_VOL_UP 0x00b1 +#define PLT_VOL_DOWN 0x00b2 + +#define PLT1_VOL_UP (PLT_HID_1_0_PAGE | PLT_VOL_UP) +#define PLT1_VOL_DOWN (PLT_HID_1_0_PAGE | PLT_VOL_DOWN) +#define PLT2_VOL_UP (PLT_HID_2_0_PAGE | PLT_VOL_UP) +#define PLT2_VOL_DOWN (PLT_HID_2_0_PAGE | PLT_VOL_DOWN) + +#define PLT_DA60 0xda60 +#define PLT_BT300_MIN 0x0413 +#define PLT_BT300_MAX 0x0418 + + +#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \ + (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) + static int plantronics_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { - if (field->application == HID_CP_CONSUMERCONTROL - && (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) { - hid_dbg(hdev, "usage: %08x (appl: %08x) - defaulted\n", - usage->hid, field->application); - return 0; + unsigned short mapped_key; + unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev); + + /* handle volume up/down mapping */ + /* non-standard types or multi-HID interfaces - plt_type is PID */ + if (!(plt_type & HID_USAGE_PAGE)) { + switch (plt_type) { + case PLT_DA60: + if (PLT_ALLOW_CONSUMER) + goto defaulted; + goto ignored; + default: + if (PLT_ALLOW_CONSUMER) + goto defaulted; + } + } + /* handle standard types - plt_type is 0xffa0uuuu or 0xffa2uuuu */ + /* 'basic telephony compliant' - allow default consumer page map */ + else if ((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY && + (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) { + if (PLT_ALLOW_CONSUMER) + goto defaulted; + } + /* not 'basic telephony' - apply legacy mapping */ + /* only map if the field is in the device's primary vendor page */ + else if (!((field->application ^ plt_type) & HID_USAGE_PAGE)) { + switch (usage->hid) { + case PLT1_VOL_UP: + case PLT2_VOL_UP: + mapped_key = KEY_VOLUMEUP; + goto mapped; + case PLT1_VOL_DOWN: + case PLT2_VOL_DOWN: + mapped_key = KEY_VOLUMEDOWN; + goto mapped; + } } - hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n", - usage->hid, field->application); +/* + * Future mapping of call control or other usages, + * if and when keys are defined would go here + * otherwise, ignore everything else that was not mapped + */ +ignored: return -1; + +defaulted: + hid_dbg(hdev, "usage: %08x (appl: %08x) - defaulted\n", + usage->hid, field->application); + return 0; + +mapped: + hid_map_usage_clear(hi, usage, bit, max, EV_KEY, mapped_key); + hid_dbg(hdev, "usage: %08x (appl: %08x) - mapped to key %d\n", + usage->hid, field->application, mapped_key); + return 1; +} + +static unsigned long plantronics_device_type(struct hid_device *hdev) +{ + unsigned i, col_page; + unsigned long plt_type = hdev->product; + + /* multi-HID interfaces? - plt_type is PID */ + if (plt_type >= PLT_BT300_MIN && plt_type <= PLT_BT300_MAX) + goto exit; + + /* determine primary vendor page */ + for (i = 0; i < hdev->maxcollection; i++) { + col_page = hdev->collection[i].usage & HID_USAGE_PAGE; + if (col_page == PLT_HID_2_0_PAGE) { + plt_type = hdev->collection[i].usage; + break; + } + if (col_page == PLT_HID_1_0_PAGE) + plt_type = hdev->collection[i].usage; + } + +exit: + hid_dbg(hdev, "plt_type decoded as: %08lx\n", plt_type); + return plt_type; +} + +static int plantronics_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int ret; + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "parse failed\n"); + goto err; + } + + hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev)); + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | + HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE); + if (ret) + hid_err(hdev, "hw start failed\n"); + +err: + return ret; } static const struct hid_device_id plantronics_devices[] = { @@ -46,6 +161,7 @@ static struct hid_driver plantronics_driver = { .name = "plantronics", .id_table = plantronics_devices, .input_mapping = plantronics_input_mapping, + .probe = plantronics_probe, }; module_hid_driver(plantronics_driver); diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index 91fab975063c..e3e98ccf137b 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -395,11 +395,10 @@ static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data) /* break keys */ for (bit_index = 0; bit_index < 24; bit_index++) { - key = pm->last_key[bit_index]; if (!((0x01 << bit_index) & bit_mask)) { input_event(pm->input_ep82, EV_KEY, pm->last_key[bit_index], 0); - pm->last_key[bit_index] = 0; + pm->last_key[bit_index] = 0; } } diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 368ffdf2c0a3..4cf80bb276dc 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -29,9 +29,9 @@ #define RMI_SET_RMI_MODE_REPORT_ID 0x0f /* Feature Report */ /* flags */ -#define RMI_READ_REQUEST_PENDING BIT(0) -#define RMI_READ_DATA_PENDING BIT(1) -#define RMI_STARTED BIT(2) +#define RMI_READ_REQUEST_PENDING 0 +#define RMI_READ_DATA_PENDING 1 +#define RMI_STARTED 2 /* device flags */ #define RMI_DEVICE BIT(0) @@ -1013,6 +1013,7 @@ static int rmi_populate_f30(struct hid_device *hdev) static int rmi_populate(struct hid_device *hdev) { + struct rmi_data *data = hid_get_drvdata(hdev); int ret; ret = rmi_scan_pdt(hdev); @@ -1033,9 +1034,11 @@ static int rmi_populate(struct hid_device *hdev) return ret; } - ret = rmi_populate_f30(hdev); - if (ret) - hid_warn(hdev, "Error while initializing F30 (%d).\n", ret); + if (!(data->device_flags & RMI_DEVICE_HAS_PHYS_BUTTONS)) { + ret = rmi_populate_f30(hdev); + if (ret) + hid_warn(hdev, "Error while initializing F30 (%d).\n", ret); + } return 0; } diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c index 37845eccddb5..36b6470af947 100644 --- a/drivers/hid/hid-sjoy.c +++ b/drivers/hid/hid-sjoy.c @@ -166,6 +166,9 @@ static const struct hid_device_id sjoy_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII), + .driver_data = HID_QUIRK_MULTI_INPUT | + HID_QUIRK_SKIP_OUTPUT_REPORTS }, { } }; MODULE_DEVICE_TABLE(hid, sjoy_devices); diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 6ca96cebb44c..ed2f008f8403 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -46,20 +46,37 @@ #define PS3REMOTE BIT(4) #define DUALSHOCK4_CONTROLLER_USB BIT(5) #define DUALSHOCK4_CONTROLLER_BT BIT(6) +#define MOTION_CONTROLLER_USB BIT(7) +#define MOTION_CONTROLLER_BT BIT(8) +#define NAVIGATION_CONTROLLER_USB BIT(9) +#define NAVIGATION_CONTROLLER_BT BIT(10) #define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT) +#define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT) +#define NAVIGATION_CONTROLLER (NAVIGATION_CONTROLLER_USB |\ + NAVIGATION_CONTROLLER_BT) #define DUALSHOCK4_CONTROLLER (DUALSHOCK4_CONTROLLER_USB |\ DUALSHOCK4_CONTROLLER_BT) #define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER | BUZZ_CONTROLLER |\ - DUALSHOCK4_CONTROLLER) -#define SONY_BATTERY_SUPPORT (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER) -#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER) + DUALSHOCK4_CONTROLLER | MOTION_CONTROLLER |\ + NAVIGATION_CONTROLLER) +#define SONY_BATTERY_SUPPORT (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER |\ + MOTION_CONTROLLER_BT | NAVIGATION_CONTROLLER) +#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER |\ + MOTION_CONTROLLER) #define MAX_LEDS 4 +/* + * The Sixaxis reports both digital and analog values for each button on the + * controller except for Start, Select and the PS button. The controller ends + * up reporting 27 axes which causes them to spill over into the multi-touch + * axis values. Additionally, the controller only has 20 actual, physical axes + * so there are several unused axes in between the used ones. + */ static __u8 sixaxis_rdesc[] = { 0x05, 0x01, /* Usage Page (Desktop), */ - 0x09, 0x04, /* Usage (Joystik), */ + 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x85, 0x01, /* Report ID (1), */ @@ -134,6 +151,186 @@ static __u8 sixaxis_rdesc[] = { 0xC0 /* End Collection */ }; +/* PS/3 Motion controller */ +static __u8 motion_rdesc[] = { + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x09, 0x04, /* Usage (Joystick), */ + 0xA1, 0x01, /* Collection (Application), */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x85, 0x01, /* Report ID (1), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x15, /* Report Count (21), */ + 0x15, 0x00, /* Logical Minimum (0), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x35, 0x00, /* Physical Minimum (0), */ + 0x45, 0x01, /* Physical Maximum (1), */ + 0x05, 0x09, /* Usage Page (Button), */ + 0x19, 0x01, /* Usage Minimum (01h), */ + 0x29, 0x15, /* Usage Maximum (15h), */ + 0x81, 0x02, /* Input (Variable), * Buttons */ + 0x95, 0x0B, /* Report Count (11), */ + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ + 0x81, 0x03, /* Input (Constant, Variable), * Padding */ + 0x15, 0x00, /* Logical Minimum (0), */ + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0xA1, 0x00, /* Collection (Physical), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x01, /* Report Count (1), */ + 0x35, 0x00, /* Physical Minimum (0), */ + 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ + 0x09, 0x30, /* Usage (X), */ + 0x81, 0x02, /* Input (Variable), * Trigger */ + 0xC0, /* End Collection, */ + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x07, /* Report Count (7), * skip 7 bytes */ + 0x81, 0x02, /* Input (Variable), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x75, 0x10, /* Report Size (16), */ + 0x46, 0xFF, 0xFF, /* Physical Maximum (65535), */ + 0x27, 0xFF, 0xFF, 0x00, 0x00, /* Logical Maximum (65535), */ + 0x95, 0x03, /* Report Count (3), * 3x Accels */ + 0x09, 0x33, /* Usage (rX), */ + 0x09, 0x34, /* Usage (rY), */ + 0x09, 0x35, /* Usage (rZ), */ + 0x81, 0x02, /* Input (Variable), */ + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ + 0x95, 0x03, /* Report Count (3), * Skip Accels 2nd frame */ + 0x81, 0x02, /* Input (Variable), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0x95, 0x03, /* Report Count (3), * 3x Gyros */ + 0x81, 0x02, /* Input (Variable), */ + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ + 0x95, 0x03, /* Report Count (3), * Skip Gyros 2nd frame */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x0C, /* Report Size (12), */ + 0x46, 0xFF, 0x0F, /* Physical Maximum (4095), */ + 0x26, 0xFF, 0x0F, /* Logical Maximum (4095), */ + 0x95, 0x04, /* Report Count (4), * Skip Temp and Magnetometers */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x08, /* Report Size (8), */ + 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ + 0x95, 0x06, /* Report Count (6), * Skip Timestamp and Extension Bytes */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0x91, 0x02, /* Output (Variable), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x85, 0x02, /* Report ID (2), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x85, 0xEE, /* Report ID (238), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x85, 0xEF, /* Report ID (239), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0xC0 /* End Collection */ +}; + +/* PS/3 Navigation controller */ +static __u8 navigation_rdesc[] = { + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x09, 0x04, /* Usage (Joystik), */ + 0xA1, 0x01, /* Collection (Application), */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x85, 0x01, /* Report ID (1), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x01, /* Report Count (1), */ + 0x15, 0x00, /* Logical Minimum (0), */ + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x13, /* Report Count (19), */ + 0x15, 0x00, /* Logical Minimum (0), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x35, 0x00, /* Physical Minimum (0), */ + 0x45, 0x01, /* Physical Maximum (1), */ + 0x05, 0x09, /* Usage Page (Button), */ + 0x19, 0x01, /* Usage Minimum (01h), */ + 0x29, 0x13, /* Usage Maximum (13h), */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x0D, /* Report Count (13), */ + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x15, 0x00, /* Logical Minimum (0), */ + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xA1, 0x00, /* Collection (Physical), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x02, /* Report Count (2), */ + 0x35, 0x00, /* Physical Minimum (0), */ + 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ + 0x09, 0x30, /* Usage (X), */ + 0x09, 0x31, /* Usage (Y), */ + 0x81, 0x02, /* Input (Variable), */ + 0xC0, /* End Collection, */ + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ + 0x95, 0x06, /* Report Count (6), */ + 0x81, 0x03, /* Input (Constant, Variable), */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x05, /* Report Count (5), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0x81, 0x02, /* Input (Variable), */ + 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ + 0x95, 0x20, /* Report Count (26), */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0x91, 0x02, /* Output (Variable), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x85, 0x02, /* Report ID (2), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x85, 0xEE, /* Report ID (238), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x85, 0xEF, /* Report ID (239), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x30, /* Report Count (48), */ + 0x09, 0x01, /* Usage (Pointer), */ + 0xB1, 0x02, /* Feature (Variable), */ + 0xC0, /* End Collection, */ + 0xC0 /* End Collection */ +}; + /* * The default descriptor doesn't provide mapping for the accelerometers * or orientation sensors. This fixed descriptor maps the accelerometers @@ -798,12 +995,20 @@ union sixaxis_output_report_01 { __u8 buf[36]; }; +struct motion_output_report_02 { + u8 type, zero; + u8 r, g, b; + u8 zero2; + u8 rumble; +}; + #define DS4_REPORT_0x02_SIZE 37 #define DS4_REPORT_0x05_SIZE 32 #define DS4_REPORT_0x11_SIZE 78 #define DS4_REPORT_0x81_SIZE 7 #define SIXAXIS_REPORT_0xF2_SIZE 17 #define SIXAXIS_REPORT_0xF5_SIZE 8 +#define MOTION_REPORT_0x02_SIZE 49 static DEFINE_SPINLOCK(sony_dev_list_lock); static LIST_HEAD(sony_device_list); @@ -844,6 +1049,20 @@ static __u8 *sixaxis_fixup(struct hid_device *hdev, __u8 *rdesc, return sixaxis_rdesc; } +static u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc, + unsigned int *rsize) +{ + *rsize = sizeof(motion_rdesc); + return motion_rdesc; +} + +static u8 *navigation_fixup(struct hid_device *hdev, u8 *rdesc, + unsigned int *rsize) +{ + *rsize = sizeof(navigation_rdesc); + return navigation_rdesc; +} + static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { @@ -924,6 +1143,12 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc, if (sc->quirks & SIXAXIS_CONTROLLER) return sixaxis_fixup(hdev, rdesc, rsize); + if (sc->quirks & MOTION_CONTROLLER) + return motion_fixup(hdev, rdesc, rsize); + + if (sc->quirks & NAVIGATION_CONTROLLER) + return navigation_fixup(hdev, rdesc, rsize); + if (sc->quirks & PS3REMOTE) return ps3remote_fixup(hdev, rdesc, rsize); @@ -934,6 +1159,7 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size) { static const __u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 }; unsigned long flags; + int offset; __u8 cable_state, battery_capacity, battery_charging; /* @@ -942,12 +1168,14 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size) * It does not report the actual level while charging so it * is set to 100% while charging is in progress. */ - if (rd[30] >= 0xee) { + offset = (sc->quirks & MOTION_CONTROLLER) ? 12 : 30; + + if (rd[offset] >= 0xee) { battery_capacity = 100; - battery_charging = !(rd[30] & 0x01); + battery_charging = !(rd[offset] & 0x01); cable_state = 1; } else { - __u8 index = rd[30] <= 5 ? rd[30] : 5; + __u8 index = rd[offset] <= 5 ? rd[offset] : 5; battery_capacity = sixaxis_battery_capacity[index]; battery_charging = 0; cable_state = 0; @@ -1048,6 +1276,11 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report, swap(rd[47], rd[48]); sixaxis_parse_report(sc, rd, size); + } else if ((sc->quirks & MOTION_CONTROLLER_BT) && rd[0] == 0x01 && size == 49) { + sixaxis_parse_report(sc, rd, size); + } else if ((sc->quirks & NAVIGATION_CONTROLLER) && rd[0] == 0x01 && + size == 49) { + sixaxis_parse_report(sc, rd, size); } else if (((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && rd[0] == 0x01 && size == 64) || ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) && rd[0] == 0x11 && size == 78)) { @@ -1208,7 +1441,7 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev) return ret; } -static void sixaxis_set_leds_from_id(int id, __u8 values[MAX_LEDS]) +static void sixaxis_set_leds_from_id(struct sony_sc *sc) { static const __u8 sixaxis_leds[10][4] = { { 0x01, 0x00, 0x00, 0x00 }, @@ -1223,16 +1456,18 @@ static void sixaxis_set_leds_from_id(int id, __u8 values[MAX_LEDS]) { 0x01, 0x01, 0x01, 0x01 } }; - BUG_ON(MAX_LEDS < ARRAY_SIZE(sixaxis_leds[0])); + int id = sc->device_id; + + BUILD_BUG_ON(MAX_LEDS < ARRAY_SIZE(sixaxis_leds[0])); if (id < 0) return; id %= 10; - memcpy(values, sixaxis_leds[id], sizeof(sixaxis_leds[id])); + memcpy(sc->led_state, sixaxis_leds[id], sizeof(sixaxis_leds[id])); } -static void dualshock4_set_leds_from_id(int id, __u8 values[MAX_LEDS]) +static void dualshock4_set_leds_from_id(struct sony_sc *sc) { /* The first 4 color/index entries match what the PS4 assigns */ static const __u8 color_code[7][3] = { @@ -1245,46 +1480,44 @@ static void dualshock4_set_leds_from_id(int id, __u8 values[MAX_LEDS]) /* White */ { 0x01, 0x01, 0x01 } }; - BUG_ON(MAX_LEDS < ARRAY_SIZE(color_code[0])); + int id = sc->device_id; + + BUILD_BUG_ON(MAX_LEDS < ARRAY_SIZE(color_code[0])); if (id < 0) return; id %= 7; - memcpy(values, color_code[id], sizeof(color_code[id])); + memcpy(sc->led_state, color_code[id], sizeof(color_code[id])); } -static void buzz_set_leds(struct hid_device *hdev, const __u8 *leds) +static void buzz_set_leds(struct sony_sc *sc) { + struct hid_device *hdev = sc->hdev; struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); __s32 *value = report->field[0]->value; + BUILD_BUG_ON(MAX_LEDS < 4); + value[0] = 0x00; - value[1] = leds[0] ? 0xff : 0x00; - value[2] = leds[1] ? 0xff : 0x00; - value[3] = leds[2] ? 0xff : 0x00; - value[4] = leds[3] ? 0xff : 0x00; + value[1] = sc->led_state[0] ? 0xff : 0x00; + value[2] = sc->led_state[1] ? 0xff : 0x00; + value[3] = sc->led_state[2] ? 0xff : 0x00; + value[4] = sc->led_state[3] ? 0xff : 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hdev, report, HID_REQ_SET_REPORT); } -static void sony_set_leds(struct sony_sc *sc, const __u8 *leds, int count) +static void sony_set_leds(struct sony_sc *sc) { - int n; - - BUG_ON(count > MAX_LEDS); - - if (sc->quirks & BUZZ_CONTROLLER && count == 4) { - buzz_set_leds(sc->hdev, leds); - } else { - for (n = 0; n < count; n++) - sc->led_state[n] = leds[n]; + if (!(sc->quirks & BUZZ_CONTROLLER)) schedule_work(&sc->state_worker); - } + else + buzz_set_leds(sc); } static void sony_led_set_brightness(struct led_classdev *led, @@ -1324,8 +1557,7 @@ static void sony_led_set_brightness(struct led_classdev *led, drv_data->led_delay_on[n] = 0; drv_data->led_delay_off[n] = 0; - sony_set_leds(drv_data, drv_data->led_state, - drv_data->led_count); + sony_set_leds(drv_data); break; } } @@ -1431,7 +1663,6 @@ static int sony_leds_init(struct sony_sc *sc) const char *name_fmt; static const char * const ds4_name_str[] = { "red", "green", "blue", "global" }; - __u8 initial_values[MAX_LEDS] = { 0 }; __u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 }; __u8 use_hw_blink[MAX_LEDS] = { 0 }; @@ -1446,16 +1677,31 @@ static int sony_leds_init(struct sony_sc *sc) if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7)) return -ENODEV; } else if (sc->quirks & DUALSHOCK4_CONTROLLER) { - dualshock4_set_leds_from_id(sc->device_id, initial_values); - initial_values[3] = 1; + dualshock4_set_leds_from_id(sc); + sc->led_state[3] = 1; sc->led_count = 4; memset(max_brightness, 255, 3); use_hw_blink[3] = 1; use_ds4_names = 1; name_len = 0; name_fmt = "%s:%s"; + } else if (sc->quirks & MOTION_CONTROLLER) { + sc->led_count = 3; + memset(max_brightness, 255, 3); + use_ds4_names = 1; + name_len = 0; + name_fmt = "%s:%s"; + } else if (sc->quirks & NAVIGATION_CONTROLLER) { + static const __u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00}; + + memcpy(sc->led_state, navigation_leds, sizeof(navigation_leds)); + sc->led_count = 1; + memset(use_hw_blink, 1, 4); + use_ds4_names = 0; + name_len = strlen("::sony#"); + name_fmt = "%s::sony%d"; } else { - sixaxis_set_leds_from_id(sc->device_id, initial_values); + sixaxis_set_leds_from_id(sc); sc->led_count = 4; memset(use_hw_blink, 1, 4); use_ds4_names = 0; @@ -1468,7 +1714,7 @@ static int sony_leds_init(struct sony_sc *sc) * only relevant if the driver is loaded after somebody actively set the * LEDs to on */ - sony_set_leds(sc, initial_values, sc->led_count); + sony_set_leds(sc); name_sz = strlen(dev_name(&hdev->dev)) + name_len + 1; @@ -1491,7 +1737,7 @@ static int sony_leds_init(struct sony_sc *sc) else snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), n + 1); led->name = name; - led->brightness = initial_values[n]; + led->brightness = sc->led_state[n]; led->max_brightness = max_brightness[n]; led->brightness_get = sony_led_get_brightness; led->brightness_set = sony_led_set_brightness; @@ -1622,9 +1868,31 @@ static void dualshock4_state_worker(struct work_struct *work) HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); } +static void motion_state_worker(struct work_struct *work) +{ + struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); + struct hid_device *hdev = sc->hdev; + struct motion_output_report_02 *report = + (struct motion_output_report_02 *)sc->output_report_dmabuf; + + memset(report, 0, MOTION_REPORT_0x02_SIZE); + + report->type = 0x02; /* set leds */ + report->r = sc->led_state[0]; + report->g = sc->led_state[1]; + report->b = sc->led_state[2]; + +#ifdef CONFIG_SONY_FF + report->rumble = max(sc->right, sc->left); +#endif + + hid_hw_output_report(hdev, (__u8 *)report, MOTION_REPORT_0x02_SIZE); +} + static int sony_allocate_output_report(struct sony_sc *sc) { - if (sc->quirks & SIXAXIS_CONTROLLER) + if ((sc->quirks & SIXAXIS_CONTROLLER) || + (sc->quirks & NAVIGATION_CONTROLLER)) sc->output_report_dmabuf = kmalloc(sizeof(union sixaxis_output_report_01), GFP_KERNEL); @@ -1634,6 +1902,9 @@ static int sony_allocate_output_report(struct sony_sc *sc) else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE, GFP_KERNEL); + else if (sc->quirks & MOTION_CONTROLLER) + sc->output_report_dmabuf = kmalloc(MOTION_REPORT_0x02_SIZE, + GFP_KERNEL); else return 0; @@ -1839,6 +2110,8 @@ static int sony_check_add(struct sony_sc *sc) int n, ret; if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) || + (sc->quirks & MOTION_CONTROLLER_BT) || + (sc->quirks & NAVIGATION_CONTROLLER_BT) || (sc->quirks & SIXAXIS_CONTROLLER_BT)) { /* * sony_get_bt_devaddr() attempts to parse the Bluetooth MAC @@ -1871,7 +2144,8 @@ static int sony_check_add(struct sony_sc *sc) } memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address)); - } else if (sc->quirks & SIXAXIS_CONTROLLER_USB) { + } else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) || + (sc->quirks & NAVIGATION_CONTROLLER_USB)) { buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -1993,19 +2267,20 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; } - ret = sony_allocate_output_report(sc); + ret = sony_set_device_id(sc); if (ret < 0) { - hid_err(hdev, "failed to allocate the output report buffer\n"); + hid_err(hdev, "failed to allocate the device id\n"); goto err_stop; } - ret = sony_set_device_id(sc); + ret = sony_allocate_output_report(sc); if (ret < 0) { - hid_err(hdev, "failed to allocate the device id\n"); + hid_err(hdev, "failed to allocate the output report buffer\n"); goto err_stop; } - if (sc->quirks & SIXAXIS_CONTROLLER_USB) { + if ((sc->quirks & SIXAXIS_CONTROLLER_USB) || + (sc->quirks & NAVIGATION_CONTROLLER_USB)) { /* * The Sony Sixaxis does not handle HID Output Reports on the * Interrupt EP like it could, so we need to force HID Output @@ -2020,7 +2295,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) hdev->quirks |= HID_QUIRK_SKIP_OUTPUT_REPORT_ID; ret = sixaxis_set_operational_usb(hdev); sony_init_work(sc, sixaxis_state_worker); - } else if (sc->quirks & SIXAXIS_CONTROLLER_BT) { + } else if ((sc->quirks & SIXAXIS_CONTROLLER_BT) || + (sc->quirks & NAVIGATION_CONTROLLER_BT)) { /* * The Sixaxis wants output reports sent on the ctrl endpoint * when connected via Bluetooth. @@ -2043,6 +2319,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) } sony_init_work(sc, dualshock4_state_worker); + } else if (sc->quirks & MOTION_CONTROLLER) { + sony_init_work(sc, motion_state_worker); } else { ret = 0; } @@ -2122,7 +2400,13 @@ static const struct hid_device_id sony_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER), .driver_data = SIXAXIS_CONTROLLER_USB }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER), - .driver_data = SIXAXIS_CONTROLLER_USB }, + .driver_data = NAVIGATION_CONTROLLER_USB }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER), + .driver_data = NAVIGATION_CONTROLLER_BT }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER), + .driver_data = MOTION_CONTROLLER_USB }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER), + .driver_data = MOTION_CONTROLLER_BT }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER), .driver_data = SIXAXIS_CONTROLLER_BT }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 92d6cdf02460..f77469d4edfb 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -42,9 +42,9 @@ #include <linux/i2c/i2c-hid.h> /* flags */ -#define I2C_HID_STARTED (1 << 0) -#define I2C_HID_RESET_PENDING (1 << 1) -#define I2C_HID_READ_PENDING (1 << 2) +#define I2C_HID_STARTED 0 +#define I2C_HID_RESET_PENDING 1 +#define I2C_HID_READ_PENDING 2 #define I2C_HID_PWR_ON 0x00 #define I2C_HID_PWR_SLEEP 0x01 @@ -1019,7 +1019,6 @@ static int i2c_hid_probe(struct i2c_client *client, hid->driver_data = client; hid->ll_driver = &i2c_hid_ll_driver; hid->dev.parent = &client->dev; - ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev)); hid->bus = BUS_I2C; hid->version = le16_to_cpu(ihid->hdesc.bcdVersion); hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 4696895eb708..53e7de7cb9e2 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -52,7 +52,6 @@ static const struct hid_blacklist { { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, - { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, @@ -70,6 +69,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, @@ -89,6 +89,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, @@ -141,6 +142,9 @@ static const struct hid_blacklist { { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096, HID_QUIRK_NO_INIT_INPUT_REPORTS }, + { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT }, { 0, 0 } }; diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h index 024f4d89d579..a533787a6d85 100644 --- a/drivers/hid/wacom.h +++ b/drivers/hid/wacom.h @@ -134,8 +134,10 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac) extern const struct hid_device_id wacom_ids[]; void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len); -void wacom_setup_device_quirks(struct wacom_features *features); -int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev, +void wacom_setup_device_quirks(struct wacom *wacom); +int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, + struct wacom_wac *wacom_wac); +int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac); int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac); diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index e8607d096138..4c0ffca97bef 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -35,7 +35,11 @@ static int wacom_get_report(struct hid_device *hdev, u8 type, u8 *buf, do { retval = hid_hw_raw_request(hdev, buf[0], buf, size, type, HID_REQ_GET_REPORT); - } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries); + } while ((retval == -ETIMEDOUT || retval == -EAGAIN) && --retries); + + if (retval < 0) + hid_err(hdev, "wacom_get_report: ran out of retries " + "(last error = %d)\n", retval); return retval; } @@ -48,7 +52,11 @@ static int wacom_set_report(struct hid_device *hdev, u8 type, u8 *buf, do { retval = hid_hw_raw_request(hdev, buf[0], buf, size, type, HID_REQ_SET_REPORT); - } while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries); + } while ((retval == -ETIMEDOUT || retval == -EAGAIN) && --retries); + + if (retval < 0) + hid_err(hdev, "wacom_set_report: ran out of retries " + "(last error = %d)\n", retval); return retval; } @@ -117,9 +125,16 @@ static void wacom_feature_mapping(struct hid_device *hdev, break; data[0] = field->report->id; ret = wacom_get_report(hdev, HID_FEATURE_REPORT, - data, 2, 0); - if (ret == 2) + data, 2, WAC_CMD_RETRIES); + if (ret == 2) { features->touch_max = data[1]; + } else { + features->touch_max = 16; + hid_warn(hdev, "wacom_feature_mapping: " + "could not get HID_DG_CONTACTMAX, " + "defaulting to %d\n", + features->touch_max); + } kfree(data); } break; @@ -181,7 +196,11 @@ static void wacom_usage_mapping(struct hid_device *hdev, * X/Y values and some cases of invalid Digitizer X/Y * values commonly reported. */ - if (!pen && !finger) + if (pen) + features->device_type |= WACOM_DEVICETYPE_PEN; + else if (finger) + features->device_type |= WACOM_DEVICETYPE_TOUCH; + else return; /* @@ -198,14 +217,11 @@ static void wacom_usage_mapping(struct hid_device *hdev, case HID_GD_X: features->x_max = field->logical_maximum; if (finger) { - features->device_type = BTN_TOOL_FINGER; features->x_phy = field->physical_maximum; if (features->type != BAMBOO_PT) { features->unit = field->unit; features->unitExpo = field->unit_exponent; } - } else { - features->device_type = BTN_TOOL_PEN; } break; case HID_GD_Y: @@ -237,7 +253,7 @@ static void wacom_post_parse_hid(struct hid_device *hdev, if (features->type == HID_GENERIC) { /* Any last-minute generic device setup */ if (features->touch_max > 1) { - input_mt_init_slots(wacom_wac->input, wacom_wac->features.touch_max, + input_mt_init_slots(wacom_wac->touch_input, wacom_wac->features.touch_max, INPUT_MT_DIRECT); } } @@ -395,7 +411,7 @@ static int wacom_query_tablet_data(struct hid_device *hdev, if (features->type == HID_GENERIC) return wacom_hid_set_device_mode(hdev); - if (features->device_type == BTN_TOOL_FINGER) { + if (features->device_type & WACOM_DEVICETYPE_TOUCH) { if (features->type > TABLETPC) { /* MT Tablet PC touch */ return wacom_set_device_mode(hdev, 3, 4, 4); @@ -409,7 +425,7 @@ static int wacom_query_tablet_data(struct hid_device *hdev, else if (features->type == BAMBOO_PAD) { return wacom_set_device_mode(hdev, 2, 2, 2); } - } else if (features->device_type == BTN_TOOL_PEN) { + } else if (features->device_type & WACOM_DEVICETYPE_PEN) { if (features->type <= BAMBOO_PT && features->type != WIRELESS) { return wacom_set_device_mode(hdev, 2, 2, 2); } @@ -425,7 +441,6 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, struct usb_interface *intf = wacom->intf; /* default features */ - features->device_type = BTN_TOOL_PEN; features->x_fuzz = 4; features->y_fuzz = 4; features->pressure_fuzz = 0; @@ -439,17 +454,13 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, */ if (features->type == WIRELESS) { if (intf->cur_altsetting->desc.bInterfaceNumber == 0) { - features->device_type = 0; + features->device_type = WACOM_DEVICETYPE_NONE; } else if (intf->cur_altsetting->desc.bInterfaceNumber == 2) { - features->device_type = BTN_TOOL_FINGER; + features->device_type |= WACOM_DEVICETYPE_TOUCH; features->pktlen = WACOM_PKGLEN_BBTOUCH3; } } - /* only devices that support touch need to retrieve the info */ - if (features->type < BAMBOO_PT) - return; - wacom_parse_hid(hdev, features); } @@ -527,9 +538,9 @@ static int wacom_add_shared_data(struct hid_device *hdev) wacom_wac->shared = &data->shared; - if (wacom_wac->features.device_type == BTN_TOOL_FINGER) + if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) wacom_wac->shared->touch = hdev; - else if (wacom_wac->features.device_type == BTN_TOOL_PEN) + else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) wacom_wac->shared->pen = hdev; out: @@ -848,6 +859,9 @@ static int wacom_initialize_leds(struct wacom *wacom) { int error; + if (!(wacom->wacom_wac.features.device_type & WACOM_DEVICETYPE_PAD)) + return 0; + /* Initialize default values */ switch (wacom->wacom_wac.features.type) { case INTUOS4S: @@ -881,17 +895,14 @@ static int wacom_initialize_leds(struct wacom *wacom) case INTUOSPS: case INTUOSPM: case INTUOSPL: - if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN) { - wacom->led.select[0] = 0; - wacom->led.select[1] = 0; - wacom->led.llv = 32; - wacom->led.hlv = 0; - wacom->led.img_lum = 0; - - error = sysfs_create_group(&wacom->hdev->dev.kobj, - &intuos5_led_attr_group); - } else - return 0; + wacom->led.select[0] = 0; + wacom->led.select[1] = 0; + wacom->led.llv = 32; + wacom->led.hlv = 0; + wacom->led.img_lum = 0; + + error = sysfs_create_group(&wacom->hdev->dev.kobj, + &intuos5_led_attr_group); break; default: @@ -914,6 +925,9 @@ static void wacom_destroy_leds(struct wacom *wacom) if (!wacom->led_initialized) return; + if (!(wacom->wacom_wac.features.device_type & WACOM_DEVICETYPE_PAD)) + return; + wacom->led_initialized = false; switch (wacom->wacom_wac.features.type) { @@ -937,9 +951,8 @@ static void wacom_destroy_leds(struct wacom *wacom) case INTUOSPS: case INTUOSPM: case INTUOSPL: - if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN) - sysfs_remove_group(&wacom->hdev->dev.kobj, - &intuos5_led_attr_group); + sysfs_remove_group(&wacom->hdev->dev.kobj, + &intuos5_led_attr_group); break; } } @@ -1117,7 +1130,7 @@ static struct input_dev *wacom_allocate_input(struct wacom *wacom) if (!input_dev) return NULL; - input_dev->name = wacom_wac->name; + input_dev->name = wacom_wac->pen_name; input_dev->phys = hdev->phys; input_dev->dev.parent = &hdev->dev; input_dev->open = wacom_open; @@ -1136,27 +1149,33 @@ static void wacom_free_inputs(struct wacom *wacom) { struct wacom_wac *wacom_wac = &(wacom->wacom_wac); - if (wacom_wac->input) - input_free_device(wacom_wac->input); + if (wacom_wac->pen_input) + input_free_device(wacom_wac->pen_input); + if (wacom_wac->touch_input) + input_free_device(wacom_wac->touch_input); if (wacom_wac->pad_input) input_free_device(wacom_wac->pad_input); - wacom_wac->input = NULL; + wacom_wac->pen_input = NULL; + wacom_wac->touch_input = NULL; wacom_wac->pad_input = NULL; } static int wacom_allocate_inputs(struct wacom *wacom) { - struct input_dev *input_dev, *pad_input_dev; + struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; struct wacom_wac *wacom_wac = &(wacom->wacom_wac); - input_dev = wacom_allocate_input(wacom); + pen_input_dev = wacom_allocate_input(wacom); + touch_input_dev = wacom_allocate_input(wacom); pad_input_dev = wacom_allocate_input(wacom); - if (!input_dev || !pad_input_dev) { + if (!pen_input_dev || !touch_input_dev || !pad_input_dev) { wacom_free_inputs(wacom); return -ENOMEM; } - wacom_wac->input = input_dev; + wacom_wac->pen_input = pen_input_dev; + wacom_wac->touch_input = touch_input_dev; + wacom_wac->touch_input->name = wacom_wac->touch_name; wacom_wac->pad_input = pad_input_dev; wacom_wac->pad_input->name = wacom_wac->pad_name; @@ -1165,11 +1184,17 @@ static int wacom_allocate_inputs(struct wacom *wacom) static void wacom_clean_inputs(struct wacom *wacom) { - if (wacom->wacom_wac.input) { - if (wacom->wacom_wac.input_registered) - input_unregister_device(wacom->wacom_wac.input); + if (wacom->wacom_wac.pen_input) { + if (wacom->wacom_wac.pen_registered) + input_unregister_device(wacom->wacom_wac.pen_input); else - input_free_device(wacom->wacom_wac.input); + input_free_device(wacom->wacom_wac.pen_input); + } + if (wacom->wacom_wac.touch_input) { + if (wacom->wacom_wac.touch_registered) + input_unregister_device(wacom->wacom_wac.touch_input); + else + input_free_device(wacom->wacom_wac.touch_input); } if (wacom->wacom_wac.pad_input) { if (wacom->wacom_wac.pad_registered) @@ -1177,29 +1202,49 @@ static void wacom_clean_inputs(struct wacom *wacom) else input_free_device(wacom->wacom_wac.pad_input); } - wacom->wacom_wac.input = NULL; + wacom->wacom_wac.pen_input = NULL; + wacom->wacom_wac.touch_input = NULL; wacom->wacom_wac.pad_input = NULL; wacom_destroy_leds(wacom); } static int wacom_register_inputs(struct wacom *wacom) { - struct input_dev *input_dev, *pad_input_dev; + struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; struct wacom_wac *wacom_wac = &(wacom->wacom_wac); - int error; + int error = 0; - input_dev = wacom_wac->input; + pen_input_dev = wacom_wac->pen_input; + touch_input_dev = wacom_wac->touch_input; pad_input_dev = wacom_wac->pad_input; - if (!input_dev || !pad_input_dev) + if (!pen_input_dev || !touch_input_dev || !pad_input_dev) return -EINVAL; - error = wacom_setup_pentouch_input_capabilities(input_dev, wacom_wac); - if (!error) { - error = input_register_device(input_dev); + error = wacom_setup_pen_input_capabilities(pen_input_dev, wacom_wac); + if (error) { + /* no pen in use on this interface */ + input_free_device(pen_input_dev); + wacom_wac->pen_input = NULL; + pen_input_dev = NULL; + } else { + error = input_register_device(pen_input_dev); + if (error) + goto fail_register_pen_input; + wacom_wac->pen_registered = true; + } + + error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac); + if (error) { + /* no touch in use on this interface */ + input_free_device(touch_input_dev); + wacom_wac->touch_input = NULL; + touch_input_dev = NULL; + } else { + error = input_register_device(touch_input_dev); if (error) - return error; - wacom_wac->input_registered = true; + goto fail_register_touch_input; + wacom_wac->touch_registered = true; } error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); @@ -1226,9 +1271,14 @@ fail_leds: pad_input_dev = NULL; wacom_wac->pad_registered = false; fail_register_pad_input: - input_unregister_device(input_dev); - wacom_wac->input = NULL; - wacom_wac->input_registered = false; + input_unregister_device(touch_input_dev); + wacom_wac->touch_input = NULL; + wacom_wac->touch_registered = false; +fail_register_touch_input: + input_unregister_device(pen_input_dev); + wacom_wac->pen_input = NULL; + wacom_wac->pen_registered = false; +fail_register_pen_input: return error; } @@ -1285,8 +1335,11 @@ static void wacom_wireless_work(struct work_struct *work) /* Stylus interface */ wacom_wac1->features = *((struct wacom_features *)id->driver_data); - wacom_wac1->features.device_type = BTN_TOOL_PEN; - snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen", + wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PEN; + if (wacom_wac1->features.type != INTUOSHT && + wacom_wac1->features.type != BAMBOO_PT) + wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD; + snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen", wacom_wac1->features.name); snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", wacom_wac1->features.name); @@ -1304,16 +1357,16 @@ static void wacom_wireless_work(struct work_struct *work) wacom_wac2->features = *((struct wacom_features *)id->driver_data); wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; - wacom_wac2->features.device_type = BTN_TOOL_FINGER; wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; - if (wacom_wac2->features.touch_max) - snprintf(wacom_wac2->name, WACOM_NAME_MAX, - "%s (WL) Finger",wacom_wac2->features.name); - else - snprintf(wacom_wac2->name, WACOM_NAME_MAX, - "%s (WL) Pad",wacom_wac2->features.name); + snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX, + "%s (WL) Finger",wacom_wac2->features.name); snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, - "%s (WL) Pad", wacom_wac2->features.name); + "%s (WL) Pad",wacom_wac2->features.name); + if (wacom_wac1->features.touch_max) + wacom_wac2->features.device_type |= WACOM_DEVICETYPE_TOUCH; + if (wacom_wac1->features.type == INTUOSHT || + wacom_wac1->features.type == BAMBOO_PT) + wacom_wac2->features.device_type |= WACOM_DEVICETYPE_PAD; wacom_wac2->pid = wacom_wac->pid; error = wacom_allocate_inputs(wacom2) || wacom_register_inputs(wacom2); @@ -1322,7 +1375,7 @@ static void wacom_wireless_work(struct work_struct *work) if (wacom_wac1->features.type == INTUOSHT && wacom_wac1->features.touch_max) - wacom_wac->shared->touch_input = wacom_wac2->input; + wacom_wac->shared->touch_input = wacom_wac2->touch_input; } error = wacom_initialize_battery(wacom); @@ -1369,6 +1422,12 @@ static void wacom_set_default_phy(struct wacom_features *features) static void wacom_calculate_res(struct wacom_features *features) { + /* set unit to "100th of a mm" for devices not reported by HID */ + if (!features->unit) { + features->unit = 0x11; + features->unitExpo = -3; + } + features->x_resolution = wacom_calc_hid_res(features->x_max, features->x_phy, features->unit, @@ -1396,6 +1455,49 @@ static size_t wacom_compute_pktlen(struct hid_device *hdev) return size; } +static void wacom_update_name(struct wacom *wacom) +{ + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + struct wacom_features *features = &wacom_wac->features; + char name[WACOM_NAME_MAX]; + + /* Generic devices name unspecified */ + if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) { + if (strstr(wacom->hdev->name, "Wacom") || + strstr(wacom->hdev->name, "wacom") || + strstr(wacom->hdev->name, "WACOM")) { + /* name is in HID descriptor, use it */ + strlcpy(name, wacom->hdev->name, sizeof(name)); + + /* strip out excess whitespaces */ + while (1) { + char *gap = strstr(name, " "); + if (gap == NULL) + break; + /* shift everything including the terminator */ + memmove(gap, gap+1, strlen(gap)); + } + /* get rid of trailing whitespace */ + if (name[strlen(name)-1] == ' ') + name[strlen(name)-1] = '\0'; + } else { + /* no meaningful name retrieved. use product ID */ + snprintf(name, sizeof(name), + "%s %X", features->name, wacom->hdev->product); + } + } else { + strlcpy(name, features->name, sizeof(name)); + } + + /* Append the device type to the name */ + snprintf(wacom_wac->pen_name, sizeof(wacom_wac->pen_name), + "%s Pen", name); + snprintf(wacom_wac->touch_name, sizeof(wacom_wac->touch_name), + "%s Finger", name); + snprintf(wacom_wac->pad_name, sizeof(wacom_wac->pad_name), + "%s Pad", name); +} + static int wacom_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -1474,64 +1576,25 @@ static int wacom_probe(struct hid_device *hdev, /* Retrieve the physical and logical size for touch devices */ wacom_retrieve_hid_descriptor(hdev, features); + wacom_setup_device_quirks(wacom); - /* - * Intuos5 has no useful data about its touch interface in its - * HID descriptor. If this is the touch interface (PacketSize - * of WACOM_PKGLEN_BBTOUCH3), override the table values. - */ - if (features->type >= INTUOS5S && features->type <= INTUOSHT) { - if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { - features->device_type = BTN_TOOL_FINGER; + if (features->device_type == WACOM_DEVICETYPE_NONE && + features->type != WIRELESS) { + error = features->type == HID_GENERIC ? -ENODEV : 0; - features->x_max = 4096; - features->y_max = 4096; - } else { - features->device_type = BTN_TOOL_PEN; - } - } + dev_warn(&hdev->dev, "Unknown device_type for '%s'. %s.", + hdev->name, + error ? "Ignoring" : "Assuming pen"); - /* - * Same thing for Bamboo 3rd gen. - */ - if ((features->type == BAMBOO_PT) && - (features->pktlen == WACOM_PKGLEN_BBTOUCH3) && - (features->device_type == BTN_TOOL_PEN)) { - features->device_type = BTN_TOOL_FINGER; + if (error) + goto fail_shared_data; - features->x_max = 4096; - features->y_max = 4096; + features->device_type |= WACOM_DEVICETYPE_PEN; } - /* - * Same thing for Bamboo PAD - */ - if (features->type == BAMBOO_PAD) - features->device_type = BTN_TOOL_FINGER; - - if (hdev->bus == BUS_BLUETOOTH) - features->quirks |= WACOM_QUIRK_BATTERY; - - wacom_setup_device_quirks(features); - - /* set unit to "100th of a mm" for devices not reported by HID */ - if (!features->unit) { - features->unit = 0x11; - features->unitExpo = -3; - } wacom_calculate_res(features); - strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name)); - snprintf(wacom_wac->pad_name, sizeof(wacom_wac->pad_name), - "%s Pad", features->name); - - /* Append the device type to the name */ - if (features->device_type != BTN_TOOL_FINGER) - strlcat(wacom_wac->name, " Pen", WACOM_NAME_MAX); - else if (features->touch_max) - strlcat(wacom_wac->name, " Finger", WACOM_NAME_MAX); - else - strlcat(wacom_wac->name, " Pad", WACOM_NAME_MAX); + wacom_update_name(wacom); error = wacom_add_shared_data(hdev); if (error) @@ -1574,9 +1637,9 @@ static int wacom_probe(struct hid_device *hdev, if (features->quirks & WACOM_QUIRK_MONITOR) error = hid_hw_open(hdev); - if (wacom_wac->features.type == INTUOSHT && wacom_wac->features.touch_max) { - if (wacom_wac->features.device_type == BTN_TOOL_FINGER) - wacom_wac->shared->touch_input = wacom_wac->input; + if (wacom_wac->features.type == INTUOSHT && + wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) { + wacom_wac->shared->touch_input = wacom_wac->touch_input; } return 0; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index adf959dcfa5d..232da89f4e88 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -69,7 +69,7 @@ static void wacom_notify_battery(struct wacom_wac *wacom_wac, static int wacom_penpartner_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; switch (data[0]) { case 1: @@ -114,7 +114,7 @@ static int wacom_pl_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; int prox, pressure; if (data[0] != WACOM_REPORT_PENABLED) { @@ -186,7 +186,7 @@ static int wacom_pl_irq(struct wacom_wac *wacom) static int wacom_ptu_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; if (data[0] != WACOM_REPORT_PENABLED) { dev_dbg(input->dev.parent, @@ -215,7 +215,7 @@ static int wacom_ptu_irq(struct wacom_wac *wacom) static int wacom_dtu_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; int prox = data[1] & 0x20; dev_dbg(input->dev.parent, @@ -245,7 +245,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom) static int wacom_dtus_irq(struct wacom_wac *wacom) { char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; unsigned short prox, pressure = 0; if (data[0] != WACOM_REPORT_DTUS && data[0] != WACOM_REPORT_DTUSPAD) { @@ -297,7 +297,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; struct input_dev *pad_input = wacom->pad_input; int battery_capacity, ps_connected; int prox; @@ -464,7 +464,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; int idx = 0; /* tool number */ @@ -649,7 +649,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; unsigned int t; /* general pen packet */ @@ -681,7 +681,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; unsigned int t; int idx = 0, result; @@ -1025,7 +1025,7 @@ static void wacom_intuos_bt_process_data(struct wacom_wac *wacom, memcpy(wacom->data, data, 10); wacom_intuos_irq(wacom); - input_sync(wacom->input); + input_sync(wacom->pen_input); if (wacom->pad_input) input_sync(wacom->pad_input); } @@ -1057,7 +1057,7 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len) ps_connected); break; default: - dev_dbg(wacom->input->dev.parent, + dev_dbg(wacom->pen_input->dev.parent, "Unknown report: %d,%d size:%zu\n", data[0], data[1], len); return 0; @@ -1067,7 +1067,7 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len) static int wacom_wac_finger_count_touches(struct wacom_wac *wacom) { - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->touch_input; unsigned touch_max = wacom->features.touch_max; int count = 0; int i; @@ -1075,9 +1075,8 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom) if (!touch_max) return 0; - /* non-HID_GENERIC single touch input doesn't call this routine */ - if ((touch_max == 1) && (wacom->features.type == HID_GENERIC)) - return wacom->hid_data.tipswitch && + if (touch_max == 1) + return test_bit(BTN_TOUCH, input->key) && !wacom->shared->stylus_in_proximity; for (i = 0; i < input->mt->num_slots; i++) { @@ -1092,7 +1091,7 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom) static int wacom_24hdt_irq(struct wacom_wac *wacom) { - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->touch_input; unsigned char *data = wacom->data; int i; int current_num_contacts = data[61]; @@ -1160,7 +1159,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom) static int wacom_mt_touch(struct wacom_wac *wacom) { - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->touch_input; unsigned char *data = wacom->data; int i; int current_num_contacts = data[2]; @@ -1211,7 +1210,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom) static int wacom_tpc_mt_touch(struct wacom_wac *wacom) { - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->touch_input; unsigned char *data = wacom->data; int i; @@ -1240,7 +1239,7 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom) static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->touch_input; bool prox = !wacom->shared->stylus_in_proximity; int x = 0, y = 0; @@ -1276,7 +1275,7 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len) static int wacom_tpc_pen(struct wacom_wac *wacom) { unsigned char *data = wacom->data; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; bool prox = data[1] & 0x20; if (!wacom->shared->stylus_in_proximity) /* first in prox */ @@ -1305,8 +1304,12 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; - dev_dbg(wacom->input->dev.parent, - "%s: received report #%d\n", __func__, data[0]); + if (wacom->pen_input) + dev_dbg(wacom->pen_input->dev.parent, + "%s: received report #%d\n", __func__, data[0]); + else if (wacom->touch_input) + dev_dbg(wacom->touch_input->dev.parent, + "%s: received report #%d\n", __func__, data[0]); switch (len) { case WACOM_PKGLEN_TPC1FG: @@ -1338,11 +1341,9 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) return 0; } -static void wacom_map_usage(struct wacom *wacom, struct hid_usage *usage, +static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage, struct hid_field *field, __u8 type, __u16 code, int fuzz) { - struct wacom_wac *wacom_wac = &wacom->wacom_wac; - struct input_dev *input = wacom_wac->input; int fmin = field->logical_minimum; int fmax = field->logical_maximum; @@ -1370,36 +1371,38 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { struct wacom *wacom = hid_get_drvdata(hdev); + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + struct input_dev *input = wacom_wac->pen_input; switch (usage->hid) { case HID_GD_X: - wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4); + wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4); break; case HID_GD_Y: - wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4); + wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4); break; case HID_DG_TIPPRESSURE: - wacom_map_usage(wacom, usage, field, EV_ABS, ABS_PRESSURE, 0); + wacom_map_usage(input, usage, field, EV_ABS, ABS_PRESSURE, 0); break; case HID_DG_INRANGE: - wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOOL_PEN, 0); + wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0); break; case HID_DG_INVERT: - wacom_map_usage(wacom, usage, field, EV_KEY, + wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_RUBBER, 0); break; case HID_DG_ERASER: case HID_DG_TIPSWITCH: - wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0); + wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0); break; case HID_DG_BARRELSWITCH: - wacom_map_usage(wacom, usage, field, EV_KEY, BTN_STYLUS, 0); + wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS, 0); break; case HID_DG_BARRELSWITCH2: - wacom_map_usage(wacom, usage, field, EV_KEY, BTN_STYLUS2, 0); + wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS2, 0); break; case HID_DG_TOOLSERIALNUMBER: - wacom_map_usage(wacom, usage, field, EV_MSC, MSC_SERIAL, 0); + wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0); break; } } @@ -1409,7 +1412,7 @@ static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field, { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; - struct input_dev *input = wacom_wac->input; + struct input_dev *input = wacom_wac->pen_input; /* checking which Tool / tip switch to send */ switch (usage->hid) { @@ -1439,7 +1442,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev, { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; - struct input_dev *input = wacom_wac->input; + struct input_dev *input = wacom_wac->pen_input; bool prox = wacom_wac->hid_data.inrange_state; if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */ @@ -1468,23 +1471,24 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev, struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; struct wacom_features *features = &wacom_wac->features; + struct input_dev *input = wacom_wac->touch_input; unsigned touch_max = wacom_wac->features.touch_max; switch (usage->hid) { case HID_GD_X: features->last_slot_field = usage->hid; if (touch_max == 1) - wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4); + wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4); else - wacom_map_usage(wacom, usage, field, EV_ABS, + wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_POSITION_X, 4); break; case HID_GD_Y: features->last_slot_field = usage->hid; if (touch_max == 1) - wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4); + wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4); else - wacom_map_usage(wacom, usage, field, EV_ABS, + wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_POSITION_Y, 4); break; case HID_DG_CONTACTID: @@ -1498,7 +1502,7 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev, break; case HID_DG_TIPSWITCH: features->last_slot_field = usage->hid; - wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0); + wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0); break; } } @@ -1554,7 +1558,7 @@ static int wacom_wac_finger_event(struct hid_device *hdev, if (usage->usage_index + 1 == field->report_count) { if (usage->hid == wacom_wac->features.last_slot_field) - wacom_wac_finger_slot(wacom_wac, wacom_wac->input); + wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); } return 0; @@ -1565,7 +1569,7 @@ static void wacom_wac_finger_report(struct hid_device *hdev, { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; - struct input_dev *input = wacom_wac->input; + struct input_dev *input = wacom_wac->touch_input; unsigned touch_max = wacom_wac->features.touch_max; if (touch_max > 1) @@ -1582,10 +1586,10 @@ void wacom_wac_usage_mapping(struct hid_device *hdev, { struct wacom *wacom = hid_get_drvdata(hdev); struct wacom_wac *wacom_wac = &wacom->wacom_wac; - struct input_dev *input = wacom_wac->input; /* currently, only direct devices have proper hid report descriptors */ - __set_bit(INPUT_PROP_DIRECT, input->propbit); + __set_bit(INPUT_PROP_DIRECT, wacom_wac->pen_input->propbit); + __set_bit(INPUT_PROP_DIRECT, wacom_wac->touch_input->propbit); if (WACOM_PEN_FIELD(field)) return wacom_wac_pen_usage_mapping(hdev, field, usage); @@ -1630,7 +1634,7 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report) static int wacom_bpt_touch(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->touch_input; struct input_dev *pad_input = wacom->pad_input; unsigned char *data = wacom->data; int i; @@ -1678,7 +1682,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom) static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) { struct wacom_features *features = &wacom->features; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->touch_input; bool touch = data[1] & 0x80; int slot = input_mt_get_slot_by_key(input, data[0]); @@ -1736,7 +1740,6 @@ static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) static int wacom_bpt3_touch(struct wacom_wac *wacom) { - struct input_dev *input = wacom->input; unsigned char *data = wacom->data; int count = data[1] & 0x07; int i; @@ -1755,8 +1758,12 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) wacom_bpt3_button_msg(wacom, data + offset); } - input_mt_sync_frame(input); - wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); + + /* only update the touch if we actually have a touchpad */ + if (wacom->touch_registered) { + input_mt_sync_frame(wacom->touch_input); + wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom); + } return 1; } @@ -1764,7 +1771,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom) static int wacom_bpt_pen(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->pen_input; unsigned char *data = wacom->data; int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0; @@ -1873,7 +1880,7 @@ static void wacom_bamboo_pad_pen_event(struct wacom_wac *wacom, static int wacom_bamboo_pad_touch_event(struct wacom_wac *wacom, unsigned char *data) { - struct input_dev *input = wacom->input; + struct input_dev *input = wacom->touch_input; unsigned char *finger_data, prefix; unsigned id; int x, y; @@ -2117,7 +2124,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) } if (sync) { - input_sync(wacom_wac->input); + if (wacom_wac->pen_input) + input_sync(wacom_wac->pen_input); + if (wacom_wac->touch_input) + input_sync(wacom_wac->touch_input); if (wacom_wac->pad_input) input_sync(wacom_wac->pad_input); } @@ -2125,7 +2135,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) static void wacom_setup_cintiq(struct wacom_wac *wacom_wac) { - struct input_dev *input_dev = wacom_wac->input; + struct input_dev *input_dev = wacom_wac->pen_input; input_set_capability(input_dev, EV_MSC, MSC_SERIAL); @@ -2148,7 +2158,7 @@ static void wacom_setup_cintiq(struct wacom_wac *wacom_wac) static void wacom_setup_intuos(struct wacom_wac *wacom_wac) { - struct input_dev *input_dev = wacom_wac->input; + struct input_dev *input_dev = wacom_wac->pen_input; input_set_capability(input_dev, EV_REL, REL_WHEEL); @@ -2167,15 +2177,57 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac) input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0); } -void wacom_setup_device_quirks(struct wacom_features *features) +void wacom_setup_device_quirks(struct wacom *wacom) { + struct wacom_features *features = &wacom->wacom_wac.features; + + /* The pen and pad share the same interface on most devices */ + if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 || + features->type == DTUS || features->type == WACOM_MO || + (features->type >= INTUOS3S && features->type <= WACOM_13HD && + features->type != INTUOSHT)) { + if (features->device_type & WACOM_DEVICETYPE_PEN) + features->device_type |= WACOM_DEVICETYPE_PAD; + } /* touch device found but size is not defined. use default */ - if (features->device_type == BTN_TOOL_FINGER && !features->x_max) { + if (features->device_type & WACOM_DEVICETYPE_TOUCH && !features->x_max) { features->x_max = 1023; features->y_max = 1023; } + /* + * Intuos5/Pro and Bamboo 3rd gen have no useful data about its + * touch interface in its HID descriptor. If this is the touch + * interface (PacketSize of WACOM_PKGLEN_BBTOUCH3), override the + * tablet values. + */ + if ((features->type >= INTUOS5S && features->type <= INTUOSHT) || + (features->type == BAMBOO_PT)) { + if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { + if (features->touch_max) + features->device_type |= WACOM_DEVICETYPE_TOUCH; + if (features->type == BAMBOO_PT || features->type == INTUOSHT) + features->device_type |= WACOM_DEVICETYPE_PAD; + + features->x_max = 4096; + features->y_max = 4096; + } + } + + /* + * Raw Wacom-mode pen and touch events both come from interface + * 0, whose HID descriptor has an application usage of 0xFF0D + * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back + * out through the HID_GENERIC device created for interface 1, + * so rewrite this one to be of type BTN_TOOL_FINGER. + */ + if (features->type == BAMBOO_PAD) + features->device_type |= WACOM_DEVICETYPE_TOUCH; + + if (wacom->hdev->bus == BUS_BLUETOOTH) + features->quirks |= WACOM_QUIRK_BATTERY; + /* quirk for bamboo touch with 2 low res touches */ if (features->type == BAMBOO_PT && features->pktlen == WACOM_PKGLEN_BBTOUCH) { @@ -2192,61 +2244,23 @@ void wacom_setup_device_quirks(struct wacom_features *features) features->quirks |= WACOM_QUIRK_NO_INPUT; /* must be monitor interface if no device_type set */ - if (!features->device_type) { + if (features->device_type == WACOM_DEVICETYPE_NONE) { features->quirks |= WACOM_QUIRK_MONITOR; features->quirks |= WACOM_QUIRK_BATTERY; } } } -static void wacom_abs_set_axis(struct input_dev *input_dev, - struct wacom_wac *wacom_wac) -{ - struct wacom_features *features = &wacom_wac->features; - - if (features->device_type == BTN_TOOL_PEN) { - input_set_abs_params(input_dev, ABS_X, features->x_min, - features->x_max, features->x_fuzz, 0); - input_set_abs_params(input_dev, ABS_Y, features->y_min, - features->y_max, features->y_fuzz, 0); - input_set_abs_params(input_dev, ABS_PRESSURE, 0, - features->pressure_max, features->pressure_fuzz, 0); - - /* penabled devices have fixed resolution for each model */ - input_abs_set_res(input_dev, ABS_X, features->x_resolution); - input_abs_set_res(input_dev, ABS_Y, features->y_resolution); - } else { - if (features->touch_max == 1) { - input_set_abs_params(input_dev, ABS_X, 0, - features->x_max, features->x_fuzz, 0); - input_set_abs_params(input_dev, ABS_Y, 0, - features->y_max, features->y_fuzz, 0); - input_abs_set_res(input_dev, ABS_X, - features->x_resolution); - input_abs_set_res(input_dev, ABS_Y, - features->y_resolution); - } - - if (features->touch_max > 1) { - input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, - features->x_max, features->x_fuzz, 0); - input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, - features->y_max, features->y_fuzz, 0); - input_abs_set_res(input_dev, ABS_MT_POSITION_X, - features->x_resolution); - input_abs_set_res(input_dev, ABS_MT_POSITION_Y, - features->y_resolution); - } - } -} - -int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev, +int wacom_setup_pen_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac) { struct wacom_features *features = &wacom_wac->features; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + if (!(features->device_type & WACOM_DEVICETYPE_PEN)) + return -ENODEV; + if (features->type == HID_GENERIC) /* setup has already been done */ return 0; @@ -2254,7 +2268,17 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev, __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(ABS_MISC, input_dev->absbit); - wacom_abs_set_axis(input_dev, wacom_wac); + input_set_abs_params(input_dev, ABS_X, features->x_min, + features->x_max, features->x_fuzz, 0); + input_set_abs_params(input_dev, ABS_Y, features->y_min, + features->y_max, features->y_fuzz, 0); + input_set_abs_params(input_dev, ABS_PRESSURE, 0, + features->pressure_max, features->pressure_fuzz, 0); + + /* penabled devices have fixed resolution for each model */ + input_abs_set_res(input_dev, ABS_X, features->x_resolution); + input_abs_set_res(input_dev, ABS_Y, features->y_resolution); + switch (features->type) { case GRAPHIRE_BT: @@ -2323,53 +2347,25 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev, case INTUOSPS: __set_bit(INPUT_PROP_POINTER, input_dev->propbit); - if (features->device_type == BTN_TOOL_PEN) { - input_set_abs_params(input_dev, ABS_DISTANCE, 0, - features->distance_max, - 0, 0); - - input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); - input_abs_set_res(input_dev, ABS_Z, 287); + input_set_abs_params(input_dev, ABS_DISTANCE, 0, + features->distance_max, + 0, 0); - wacom_setup_intuos(wacom_wac); - } else if (features->device_type == BTN_TOOL_FINGER) { - __clear_bit(ABS_MISC, input_dev->absbit); + input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_abs_set_res(input_dev, ABS_Z, 287); - input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, - 0, features->x_max, 0, 0); - input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, - 0, features->y_max, 0, 0); - input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); - } + wacom_setup_intuos(wacom_wac); break; case WACOM_24HDT: - if (features->device_type == BTN_TOOL_FINGER) { - input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0); - input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0); - input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0); - input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); - } - /* fall through */ - case WACOM_27QHDT: case MTSCREEN: case MTTPC: case MTTPC_B: case TABLETPC2FG: - if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1) - input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT); - /* fall through */ - case TABLETPC: case TABLETPCE: __clear_bit(ABS_MISC, input_dev->absbit); - - __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); - - if (features->device_type != BTN_TOOL_PEN) - break; /* no need to process stylus stuff */ - /* fall through */ case DTUS: @@ -2397,50 +2393,114 @@ int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev, break; case INTUOSHT: - if (features->touch_max && - features->device_type == BTN_TOOL_FINGER) { - input_dev->evbit[0] |= BIT_MASK(EV_SW); - __set_bit(SW_MUTE_DEVICE, input_dev->swbit); - } - /* fall through */ - case BAMBOO_PT: __clear_bit(ABS_MISC, input_dev->absbit); - if (features->device_type == BTN_TOOL_FINGER) { + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); + __set_bit(BTN_TOOL_PEN, input_dev->keybit); + __set_bit(BTN_STYLUS, input_dev->keybit); + __set_bit(BTN_STYLUS2, input_dev->keybit); + input_set_abs_params(input_dev, ABS_DISTANCE, 0, + features->distance_max, + 0, 0); + break; + case BAMBOO_PAD: + __clear_bit(ABS_MISC, input_dev->absbit); + break; + } + return 0; +} - if (features->touch_max) { - if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { - input_set_abs_params(input_dev, - ABS_MT_TOUCH_MAJOR, - 0, features->x_max, 0, 0); - input_set_abs_params(input_dev, - ABS_MT_TOUCH_MINOR, - 0, features->y_max, 0, 0); - } - input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); - } else { - /* buttons/keys only interface */ - __clear_bit(ABS_X, input_dev->absbit); - __clear_bit(ABS_Y, input_dev->absbit); - __clear_bit(BTN_TOUCH, input_dev->keybit); +int wacom_setup_touch_input_capabilities(struct input_dev *input_dev, + struct wacom_wac *wacom_wac) +{ + struct wacom_features *features = &wacom_wac->features; - /* PAD is setup by wacom_setup_pad_input_capabilities later */ - return 1; - } - } else if (features->device_type == BTN_TOOL_PEN) { - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); - __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); - __set_bit(BTN_TOOL_PEN, input_dev->keybit); - __set_bit(BTN_STYLUS, input_dev->keybit); - __set_bit(BTN_STYLUS2, input_dev->keybit); - input_set_abs_params(input_dev, ABS_DISTANCE, 0, - features->distance_max, - 0, 0); + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + + if (!(features->device_type & WACOM_DEVICETYPE_TOUCH)) + return -ENODEV; + + if (features->type == HID_GENERIC) + /* setup has already been done */ + return 0; + + __set_bit(BTN_TOUCH, input_dev->keybit); + + if (features->touch_max == 1) { + input_set_abs_params(input_dev, ABS_X, 0, + features->x_max, features->x_fuzz, 0); + input_set_abs_params(input_dev, ABS_Y, 0, + features->y_max, features->y_fuzz, 0); + input_abs_set_res(input_dev, ABS_X, + features->x_resolution); + input_abs_set_res(input_dev, ABS_Y, + features->y_resolution); + } + else if (features->touch_max > 1) { + input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, + features->x_max, features->x_fuzz, 0); + input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, + features->y_max, features->y_fuzz, 0); + input_abs_set_res(input_dev, ABS_MT_POSITION_X, + features->x_resolution); + input_abs_set_res(input_dev, ABS_MT_POSITION_Y, + features->y_resolution); + } + + switch (features->type) { + case INTUOS5: + case INTUOS5L: + case INTUOSPM: + case INTUOSPL: + case INTUOS5S: + case INTUOSPS: + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + + input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0); + input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, features->y_max, 0, 0); + input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); + break; + + case WACOM_24HDT: + input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0); + input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, 0, features->x_max, 0, 0); + input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, 0, features->y_max, 0, 0); + input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); + /* fall through */ + + case WACOM_27QHDT: + case MTSCREEN: + case MTTPC: + case MTTPC_B: + case TABLETPC2FG: + input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT); + /*fall through */ + + case TABLETPC: + case TABLETPCE: + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + break; + + case INTUOSHT: + input_dev->evbit[0] |= BIT_MASK(EV_SW); + __set_bit(SW_MUTE_DEVICE, input_dev->swbit); + /* fall through */ + + case BAMBOO_PT: + if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { + input_set_abs_params(input_dev, + ABS_MT_TOUCH_MAJOR, + 0, features->x_max, 0, 0); + input_set_abs_params(input_dev, + ABS_MT_TOUCH_MINOR, + 0, features->y_max, 0, 0); } + input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); break; + case BAMBOO_PAD: - __clear_bit(ABS_MISC, input_dev->absbit); input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); __set_bit(BTN_LEFT, input_dev->keybit); @@ -2456,6 +2516,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, struct wacom_features *features = &wacom_wac->features; int i; + if (!(features->device_type & WACOM_DEVICETYPE_PAD)) + return -ENODEV; + input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); /* kept for making legacy xf86-input-wacom working with the wheels */ @@ -2592,10 +2655,6 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, case INTUOS5S: case INTUOSPS: - /* touch interface does not have the pad device */ - if (features->device_type != BTN_TOOL_PEN) - return -ENODEV; - for (i = 0; i < 7; i++) __set_bit(BTN_0 + i, input_dev->keybit); @@ -2637,12 +2696,6 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, case INTUOSHT: case BAMBOO_PT: - /* pad device is on the touch interface */ - if ((features->device_type != BTN_TOOL_FINGER) || - /* Bamboo Pen only tablet does not have pad */ - ((features->type == BAMBOO_PT) && !features->touch_max)) - return -ENODEV; - __clear_bit(ABS_MISC, input_dev->absbit); __set_bit(BTN_LEFT, input_dev->keybit); @@ -2922,6 +2975,9 @@ static const struct wacom_features wacom_features_0x32F = { "Wacom DTU1031X", 22472, 12728, 511, 0, DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; +static const struct wacom_features wacom_features_0x336 = + { "Wacom DTU1141", 23472, 13203, 1023, 0, + DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x57 = { "Wacom DTK2241", 95640, 54060, 2047, 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, @@ -3275,6 +3331,7 @@ const struct hid_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x32F) }, { USB_DEVICE_WACOM(0x333) }, { USB_DEVICE_WACOM(0x335) }, + { USB_DEVICE_WACOM(0x336) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x4004) }, { USB_DEVICE_WACOM(0x5000) }, diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index 4700ac994a3b..2978c303909d 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -18,10 +18,7 @@ #define WACOM_NAME_MAX 64 /* packet length for individual models */ -#define WACOM_PKGLEN_PENPRTN 7 -#define WACOM_PKGLEN_GRAPHIRE 8 #define WACOM_PKGLEN_BBFUN 9 -#define WACOM_PKGLEN_INTUOS 10 #define WACOM_PKGLEN_TPC1FG 5 #define WACOM_PKGLEN_TPC1FG_B 10 #define WACOM_PKGLEN_TPC2FG 14 @@ -29,9 +26,6 @@ #define WACOM_PKGLEN_BBTOUCH3 64 #define WACOM_PKGLEN_BBPEN 10 #define WACOM_PKGLEN_WIRELESS 32 -#define WACOM_PKGLEN_MTOUCH 62 -#define WACOM_PKGLEN_MTTPC 40 -#define WACOM_PKGLEN_DTUS 68 #define WACOM_PKGLEN_PENABLED 8 #define WACOM_PKGLEN_BPAD_TOUCH 32 #define WACOM_PKGLEN_BPAD_TOUCH_USB 64 @@ -78,10 +72,20 @@ #define WACOM_QUIRK_MONITOR 0x0004 #define WACOM_QUIRK_BATTERY 0x0008 +/* device types */ +#define WACOM_DEVICETYPE_NONE 0x0000 +#define WACOM_DEVICETYPE_PEN 0x0001 +#define WACOM_DEVICETYPE_TOUCH 0x0002 +#define WACOM_DEVICETYPE_PAD 0x0004 + +#define WACOM_VENDORDEFINED_PEN 0xff0d0001 + #define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \ ((f)->physical == HID_DG_STYLUS) || \ ((f)->physical == HID_DG_PEN) || \ - ((f)->application == HID_DG_PEN)) + ((f)->application == HID_DG_PEN) || \ + ((f)->application == HID_DG_DIGITIZER) || \ + ((f)->application == WACOM_VENDORDEFINED_PEN)) #define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \ ((f)->physical == HID_DG_FINGER) || \ ((f)->application == HID_DG_TOUCHSCREEN)) @@ -192,7 +196,8 @@ struct hid_data { }; struct wacom_wac { - char name[WACOM_NAME_MAX]; + char pen_name[WACOM_NAME_MAX]; + char touch_name[WACOM_NAME_MAX]; char pad_name[WACOM_NAME_MAX]; char bat_name[WACOM_NAME_MAX]; char ac_name[WACOM_NAME_MAX]; @@ -203,9 +208,11 @@ struct wacom_wac { bool reporting_data; struct wacom_features features; struct wacom_shared *shared; - struct input_dev *input; + struct input_dev *pen_input; + struct input_dev *touch_input; struct input_dev *pad_input; - bool input_registered; + bool pen_registered; + bool touch_registered; bool pad_registered; int pid; int battery_capacity; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 25d9e72627e9..54075a07d2a1 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -509,7 +509,7 @@ config SENSORS_G762 config SENSORS_GPIO_FAN tristate "GPIO fan" - depends on GPIOLIB + depends on GPIOLIB || COMPILE_TEST depends on THERMAL || THERMAL=n help If you say yes here you get support for fans connected to GPIO lines. @@ -1106,8 +1106,8 @@ config SENSORS_NTC_THERMISTOR send notifications about the temperature. Currently, this driver supports - NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333 - from Murata and B57330V2103 from EPCOS. + NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333, + and NCP03WF104 from Murata and B57330V2103 from EPCOS. This driver can also be built as a module. If so, the module will be called ntc-thermistor. @@ -1186,7 +1186,7 @@ config SENSORS_PWM_FAN config SENSORS_SHT15 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." - depends on GPIOLIB + depends on GPIOLIB || COMPILE_TEST help If you say yes here you get support for the Sensiron SHT10, SHT11, SHT15, SHT71, SHT75 humidity and temperature sensors. @@ -1452,6 +1452,16 @@ config SENSORS_INA2XX This driver can also be built as a module. If so, the module will be called ina2xx. +config SENSORS_TC74 + tristate "Microchip TC74" + depends on I2C + help + If you say yes here you get support for Microchip TC74 single + input temperature sensor chips. + + This driver can also be built as a module. If so, the module + will be called tc74. + config SENSORS_THMC50 tristate "Texas Instruments THMC50 / Analog Devices ADM1022" depends on I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index b4a40f17e2aa..ab904027f074 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -140,6 +140,7 @@ obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o +obj-$(CONFIG_SENSORS_TC74) += tc74.o obj-$(CONFIG_SENSORS_THMC50) += thmc50.o obj-$(CONFIG_SENSORS_TMP102) += tmp102.o obj-$(CONFIG_SENSORS_TMP103) += tmp103.o diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c index 4c829bb2f9db..f2f2f2fc755a 100644 --- a/drivers/hwmon/atxp1.c +++ b/drivers/hwmon/atxp1.c @@ -12,10 +12,9 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * + * The ATXP1 can reside on I2C addresses 0x37 or 0x4e. The chip is + * not auto-detected by the driver and must be instantiated explicitly. + * See Documentation/i2c/instantiating-devices for more information. */ #include <linux/kernel.h> @@ -43,8 +42,6 @@ MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>"); #define ATXP1_VIDMASK 0x1f #define ATXP1_GPIO1MASK 0x0f -static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END }; - struct atxp1_data { struct i2c_client *client; struct mutex update_lock; @@ -259,48 +256,6 @@ static struct attribute *atxp1_attrs[] = { }; ATTRIBUTE_GROUPS(atxp1); -/* Return 0 if detection is successful, -ENODEV otherwise */ -static int atxp1_detect(struct i2c_client *new_client, - struct i2c_board_info *info) -{ - struct i2c_adapter *adapter = new_client->adapter; - - u8 temp; - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -ENODEV; - - /* Detect ATXP1, checking if vendor ID registers are all zero */ - if (!((i2c_smbus_read_byte_data(new_client, 0x3e) == 0) && - (i2c_smbus_read_byte_data(new_client, 0x3f) == 0) && - (i2c_smbus_read_byte_data(new_client, 0xfe) == 0) && - (i2c_smbus_read_byte_data(new_client, 0xff) == 0))) - return -ENODEV; - - /* - * No vendor ID, now checking if registers 0x10,0x11 (non-existent) - * showing the same as register 0x00 - */ - temp = i2c_smbus_read_byte_data(new_client, 0x00); - - if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) && - (i2c_smbus_read_byte_data(new_client, 0x11) == temp))) - return -ENODEV; - - /* Get VRM */ - temp = vid_which_vrm(); - - if ((temp != 90) && (temp != 91)) { - dev_err(&adapter->dev, "atxp1: Not supporting VRM %d.%d\n", - temp / 10, temp % 10); - return -ENODEV; - } - - strlcpy(info->type, "atxp1", I2C_NAME_SIZE); - - return 0; -} - static int atxp1_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -314,6 +269,11 @@ static int atxp1_probe(struct i2c_client *client, /* Get VRM */ data->vrm = vid_which_vrm(); + if (data->vrm != 90 && data->vrm != 91) { + dev_err(dev, "atxp1: Not supporting VRM %d.%d\n", + data->vrm / 10, data->vrm % 10); + return -ENODEV; + } data->client = client; mutex_init(&data->update_lock); @@ -342,8 +302,6 @@ static struct i2c_driver atxp1_driver = { }, .probe = atxp1_probe, .id_table = atxp1_id, - .detect = atxp1_detect, - .address_list = normal_i2c, }; module_i2c_driver(atxp1_driver); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index ed303ba3a593..3e03379e7c5d 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -63,7 +63,8 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) #ifdef CONFIG_SMP -#define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu)) +#define for_each_sibling(i, cpu) \ + for_each_cpu(i, topology_sibling_cpumask(cpu)) #else #define for_each_sibling(i, cpu) for (i = 0; false; ) #endif diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c index cb0dcfda958c..07628569547a 100644 --- a/drivers/hwmon/max197.c +++ b/drivers/hwmon/max197.c @@ -324,7 +324,7 @@ static int max197_remove(struct platform_device *pdev) return 0; } -static struct platform_device_id max197_device_ids[] = { +static const struct platform_device_id max197_device_ids[] = { { "max197", max197 }, { "max199", max199 }, { } diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 68800115876b..dc0b76c5e302 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -53,6 +53,7 @@ static const struct platform_device_id ntc_thermistor_id[] = { { "ncp03wb473", TYPE_NCPXXWB473 }, { "ncp15wl333", TYPE_NCPXXWL333 }, { "b57330v2103", TYPE_B57330V2103}, + { "ncp03wf104", TYPE_NCPXXWF104 }, { }, }; @@ -135,6 +136,43 @@ static const struct ntc_compensation ncpXXwl333[] = { { .temp_c = 125, .ohm = 707 }, }; +static const struct ntc_compensation ncpXXwf104[] = { + { .temp_c = -40, .ohm = 4397119 }, + { .temp_c = -35, .ohm = 3088599 }, + { .temp_c = -30, .ohm = 2197225 }, + { .temp_c = -25, .ohm = 1581881 }, + { .temp_c = -20, .ohm = 1151037 }, + { .temp_c = -15, .ohm = 846579 }, + { .temp_c = -10, .ohm = 628988 }, + { .temp_c = -5, .ohm = 471632 }, + { .temp_c = 0, .ohm = 357012 }, + { .temp_c = 5, .ohm = 272500 }, + { .temp_c = 10, .ohm = 209710 }, + { .temp_c = 15, .ohm = 162651 }, + { .temp_c = 20, .ohm = 127080 }, + { .temp_c = 25, .ohm = 100000 }, + { .temp_c = 30, .ohm = 79222 }, + { .temp_c = 35, .ohm = 63167 }, + { .temp_c = 40, .ohm = 50677 }, + { .temp_c = 45, .ohm = 40904 }, + { .temp_c = 50, .ohm = 33195 }, + { .temp_c = 55, .ohm = 27091 }, + { .temp_c = 60, .ohm = 22224 }, + { .temp_c = 65, .ohm = 18323 }, + { .temp_c = 70, .ohm = 15184 }, + { .temp_c = 75, .ohm = 12635 }, + { .temp_c = 80, .ohm = 10566 }, + { .temp_c = 85, .ohm = 8873 }, + { .temp_c = 90, .ohm = 7481 }, + { .temp_c = 95, .ohm = 6337 }, + { .temp_c = 100, .ohm = 5384 }, + { .temp_c = 105, .ohm = 4594 }, + { .temp_c = 110, .ohm = 3934 }, + { .temp_c = 115, .ohm = 3380 }, + { .temp_c = 120, .ohm = 2916 }, + { .temp_c = 125, .ohm = 2522 }, +}; + /* * The following compensation table is from the specification of EPCOS NTC * Thermistors Datasheet @@ -190,20 +228,21 @@ struct ntc_data { static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) { struct iio_channel *channel = pdata->chan; - s64 result; - int val, ret; + int raw, uv, ret; - ret = iio_read_channel_raw(channel, &val); + ret = iio_read_channel_raw(channel, &raw); if (ret < 0) { pr_err("read channel() error: %d\n", ret); return ret; } - /* unit: mV */ - result = pdata->pullup_uv * (s64) val; - result >>= 12; + ret = iio_convert_raw_to_processed(channel, raw, &uv, 1000); + if (ret < 0) { + /* Assume 12 bit ADC with vref at pullup_uv */ + uv = (pdata->pullup_uv * (s64)raw) >> 12; + } - return (int)result; + return uv; } static const struct of_device_id ntc_match[] = { @@ -219,6 +258,8 @@ static const struct of_device_id ntc_match[] = { .data = &ntc_thermistor_id[4] }, { .compatible = "epcos,b57330v2103", .data = &ntc_thermistor_id[5]}, + { .compatible = "murata,ncp03wf104", + .data = &ntc_thermistor_id[6] }, /* Usage of vendor name "ntc" is deprecated */ { .compatible = "ntc,ncp15wb473", @@ -309,30 +350,27 @@ static inline u64 div64_u64_safe(u64 dividend, u64 divisor) static int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uv) { struct ntc_thermistor_platform_data *pdata = data->pdata; - u64 mv = uv / 1000; - u64 pmv = pdata->pullup_uv / 1000; + u32 puv = pdata->pullup_uv; u64 n, puo, pdo; puo = pdata->pullup_ohm; pdo = pdata->pulldown_ohm; - if (mv == 0) { - if (pdata->connect == NTC_CONNECTED_POSITIVE) - return INT_MAX; - return 0; - } - if (mv >= pmv) + if (uv == 0) + return (pdata->connect == NTC_CONNECTED_POSITIVE) ? + INT_MAX : 0; + if (uv >= puv) return (pdata->connect == NTC_CONNECTED_POSITIVE) ? 0 : INT_MAX; if (pdata->connect == NTC_CONNECTED_POSITIVE && puo == 0) - n = div64_u64_safe(pdo * (pmv - mv), mv); + n = div_u64(pdo * (puv - uv), uv); else if (pdata->connect == NTC_CONNECTED_GROUND && pdo == 0) - n = div64_u64_safe(puo * mv, pmv - mv); + n = div_u64(puo * uv, puv - uv); else if (pdata->connect == NTC_CONNECTED_POSITIVE) - n = div64_u64_safe(pdo * puo * (pmv - mv), - puo * mv - pdo * (pmv - mv)); + n = div64_u64_safe(pdo * puo * (puv - uv), + puo * uv - pdo * (puv - uv)); else - n = div64_u64_safe(pdo * puo * mv, pdo * (pmv - mv) - puo * mv); + n = div64_u64_safe(pdo * puo * uv, pdo * (puv - uv) - puo * uv); if (n > INT_MAX) n = INT_MAX; @@ -567,6 +605,10 @@ static int ntc_thermistor_probe(struct platform_device *pdev) data->comp = b57330v2103; data->n_comp = ARRAY_SIZE(b57330v2103); break; + case TYPE_NCPXXWF104: + data->comp = ncpXXwf104; + data->n_comp = ARRAY_SIZE(ncpXXwf104); + break; default: dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n", pdev_id->driver_data, pdev_id->name); diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index d4f0935daaa1..497a7f822a12 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -1074,7 +1074,7 @@ static int sht15_remove(struct platform_device *pdev) return 0; } -static struct platform_device_id sht15_device_ids[] = { +static const struct platform_device_id sht15_device_ids[] = { { "sht10", sht10 }, { "sht11", sht11 }, { "sht15", sht15 }, diff --git a/drivers/hwmon/tc74.c b/drivers/hwmon/tc74.c new file mode 100644 index 000000000000..d95165158800 --- /dev/null +++ b/drivers/hwmon/tc74.c @@ -0,0 +1,177 @@ +/* + * An hwmon driver for the Microchip TC74 + * + * Copyright 2015 Maciej Szmigiero <mail@maciej.szmigiero.name> + * + * Based on ad7414.c: + * Copyright 2006 Stefan Roese, DENX Software Engineering + * Copyright 2008 Sean MacLennan, PIKA Technologies + * Copyright 2008 Frank Edelhaeuser, Spansion Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/bitops.h> +#include <linux/err.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/i2c.h> +#include <linux/jiffies.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/sysfs.h> + +/* TC74 registers */ +#define TC74_REG_TEMP 0x00 +#define TC74_REG_CONFIG 0x01 + +struct tc74_data { + struct i2c_client *client; + struct mutex lock; /* atomic read data updates */ + bool valid; /* validity of fields below */ + unsigned long next_update; /* In jiffies */ + s8 temp_input; /* Temp value in dC */ +}; + +static int tc74_update_device(struct device *dev) +{ + struct tc74_data *data = dev_get_drvdata(dev); + struct i2c_client *client = data->client; + int ret; + + ret = mutex_lock_interruptible(&data->lock); + if (ret) + return ret; + + if (time_after(jiffies, data->next_update) || !data->valid) { + s32 value; + + value = i2c_smbus_read_byte_data(client, TC74_REG_CONFIG); + if (value < 0) { + dev_dbg(&client->dev, "TC74_REG_CONFIG read err %d\n", + (int)value); + + ret = value; + goto ret_unlock; + } + + if (!(value & BIT(6))) { + /* not ready yet */ + + ret = -EAGAIN; + goto ret_unlock; + } + + value = i2c_smbus_read_byte_data(client, TC74_REG_TEMP); + if (value < 0) { + dev_dbg(&client->dev, "TC74_REG_TEMP read err %d\n", + (int)value); + + ret = value; + goto ret_unlock; + } + + data->temp_input = value; + data->next_update = jiffies + HZ / 4; + data->valid = true; + } + +ret_unlock: + mutex_unlock(&data->lock); + + return ret; +} + +static ssize_t show_temp_input(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tc74_data *data = dev_get_drvdata(dev); + int ret; + + ret = tc74_update_device(dev); + if (ret) + return ret; + + return sprintf(buf, "%d\n", data->temp_input * 1000); +} +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0); + +static struct attribute *tc74_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(tc74); + +static int tc74_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct device *dev = &client->dev; + struct tc74_data *data; + struct device *hwmon_dev; + s32 conf; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -EOPNOTSUPP; + + data = devm_kzalloc(dev, sizeof(struct tc74_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + mutex_init(&data->lock); + + /* Make sure the chip is powered up. */ + conf = i2c_smbus_read_byte_data(client, TC74_REG_CONFIG); + if (conf < 0) { + dev_err(dev, "unable to read config register\n"); + + return conf; + } + + if (conf & 0x3f) { + dev_err(dev, "invalid config register value\n"); + + return -ENODEV; + } + + if (conf & BIT(7)) { + s32 ret; + + conf &= ~BIT(7); + + ret = i2c_smbus_write_byte_data(client, TC74_REG_CONFIG, conf); + if (ret) + dev_warn(dev, "unable to disable STANDBY\n"); + } + + hwmon_dev = devm_hwmon_device_register_with_groups(dev, + client->name, + data, tc74_groups); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id tc74_id[] = { + { "tc74", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, tc74_id); + +static struct i2c_driver tc74_driver = { + .driver = { + .name = "tc74", + }, + .probe = tc74_probe, + .id_table = tc74_id, +}; + +module_i2c_driver(tc74_driver); + +MODULE_AUTHOR("Maciej Szmigiero <mail@maciej.szmigiero.name>"); + +MODULE_DESCRIPTION("TC74 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 67cbec6796a0..630bce68bf38 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -245,7 +245,7 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && PIIX4_dev->revision >= 0x41) || (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD && - PIIX4_dev->device == 0x790b && + PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && PIIX4_dev->revision >= 0x49)) smb_en = 0x00; else @@ -545,7 +545,7 @@ static const struct pci_device_id piix4_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x790b) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4) }, { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 987c124432c5..fc2ee8213fb6 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -107,7 +107,7 @@ static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) if (sb->access_mode == ACPI_I2C_10BIT_MODE) info->flags |= I2C_CLIENT_TEN; } - } else if (info->irq < 0) { + } else if (!info->irq) { struct resource r; if (acpi_dev_resource_interrupt(ares, 0, &r)) @@ -134,7 +134,6 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level, memset(&info, 0, sizeof(info)); info.fwnode = acpi_fwnode_handle(adev); - info.irq = -1; INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, @@ -632,8 +631,13 @@ static int i2c_device_probe(struct device *dev) if (!client) return 0; - if (!client->irq && dev->of_node) { - int irq = of_irq_get(dev->of_node, 0); + if (!client->irq) { + int irq = -ENOENT; + + if (dev->of_node) + irq = of_irq_get(dev->of_node, 0); + else if (ACPI_COMPANION(dev)) + irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0); if (irq == -EPROBE_DEFER) return irq; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 38339d220d7f..746cdf56bc76 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -457,8 +457,8 @@ static void resolve_cb(int status, struct sockaddr *src_addr, complete(&((struct resolve_cb_context *)context)->comp); } -int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac, - u16 *vlan_id) +int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid, + u8 *dmac, u16 *vlan_id) { int ret = 0; struct rdma_dev_addr dev_addr; diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index f6d29614cb01..c7dcfe4ca5f1 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -54,7 +54,7 @@ static DEFINE_SPINLOCK(ib_agent_port_list_lock); static LIST_HEAD(ib_agent_port_list); static struct ib_agent_port_private * -__ib_get_agent_port(struct ib_device *device, int port_num) +__ib_get_agent_port(const struct ib_device *device, int port_num) { struct ib_agent_port_private *entry; @@ -67,7 +67,7 @@ __ib_get_agent_port(struct ib_device *device, int port_num) } static struct ib_agent_port_private * -ib_get_agent_port(struct ib_device *device, int port_num) +ib_get_agent_port(const struct ib_device *device, int port_num) { struct ib_agent_port_private *entry; unsigned long flags; @@ -78,9 +78,9 @@ ib_get_agent_port(struct ib_device *device, int port_num) return entry; } -void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, - struct ib_wc *wc, struct ib_device *device, - int port_num, int qpn) +void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh, + const struct ib_wc *wc, const struct ib_device *device, + int port_num, int qpn, size_t resp_mad_len, bool opa) { struct ib_agent_port_private *port_priv; struct ib_mad_agent *agent; @@ -106,15 +106,20 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, return; } + if (opa && mad_hdr->base_version != OPA_MGMT_BASE_VERSION) + resp_mad_len = IB_MGMT_MAD_SIZE; + send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, - IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, - GFP_KERNEL); + IB_MGMT_MAD_HDR, + resp_mad_len - IB_MGMT_MAD_HDR, + GFP_KERNEL, + mad_hdr->base_version); if (IS_ERR(send_buf)) { dev_err(&device->dev, "ib_create_send_mad error\n"); goto err1; } - memcpy(send_buf->mad, mad, sizeof *mad); + memcpy(send_buf->mad, mad_hdr, resp_mad_len); send_buf->ah = ah; if (device->node_type == RDMA_NODE_IB_SWITCH) { @@ -156,7 +161,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) goto error1; } - if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) { + if (rdma_cap_ib_smi(device, port_num)) { /* Obtain send only MAD agent for SMI QP */ port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, diff --git a/drivers/infiniband/core/agent.h b/drivers/infiniband/core/agent.h index 6669287009c2..65f92bedae44 100644 --- a/drivers/infiniband/core/agent.h +++ b/drivers/infiniband/core/agent.h @@ -44,8 +44,8 @@ extern int ib_agent_port_open(struct ib_device *device, int port_num); extern int ib_agent_port_close(struct ib_device *device, int port_num); -extern void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, - struct ib_wc *wc, struct ib_device *device, - int port_num, int qpn); +extern void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh, + const struct ib_wc *wc, const struct ib_device *device, + int port_num, int qpn, size_t resp_mad_len, bool opa); #endif /* __AGENT_H_ */ diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 80f6cf2449fb..871da832d016 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -58,17 +58,6 @@ struct ib_update_work { u8 port_num; }; -static inline int start_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; -} - -static inline int end_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? - 0 : device->phys_port_cnt; -} - int ib_get_cached_gid(struct ib_device *device, u8 port_num, int index, @@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.gid_cache[port_num - start_port(device)]; + cache = device->cache.gid_cache[port_num - rdma_start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; @@ -96,10 +85,10 @@ int ib_get_cached_gid(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_gid); -int ib_find_cached_gid(struct ib_device *device, - union ib_gid *gid, - u8 *port_num, - u16 *index) +int ib_find_cached_gid(struct ib_device *device, + const union ib_gid *gid, + u8 *port_num, + u16 *index) { struct ib_gid_cache *cache; unsigned long flags; @@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device, read_lock_irqsave(&device->cache.lock, flags); - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { cache = device->cache.gid_cache[p]; for (i = 0; i < cache->table_len; ++i) { if (!memcmp(gid, &cache->table[i], sizeof *gid)) { - *port_num = p + start_port(device); + *port_num = p + rdma_start_port(device); if (index) *index = i; ret = 0; @@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; @@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device, int ret = -ENOENT; int partial_ix = -1; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; *index = -1; @@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device, int i; int ret = -ENOENT; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; *index = -1; @@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - *lmc = device->cache.lmc_cache[port_num - start_port(device)]; + *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)]; read_unlock_irqrestore(&device->cache.lock, flags); return ret; @@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device, write_lock_irq(&device->cache.lock); - old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; - old_gid_cache = device->cache.gid_cache [port - start_port(device)]; + old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)]; + old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)]; - device->cache.pkey_cache[port - start_port(device)] = pkey_cache; - device->cache.gid_cache [port - start_port(device)] = gid_cache; + device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache; + device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache; - device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; + device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc; write_unlock_irq(&device->cache.lock); @@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device) device->cache.pkey_cache = kmalloc(sizeof *device->cache.pkey_cache * - (end_port(device) - start_port(device) + 1), GFP_KERNEL); + (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); device->cache.gid_cache = kmalloc(sizeof *device->cache.gid_cache * - (end_port(device) - start_port(device) + 1), GFP_KERNEL); + (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * - (end_port(device) - - start_port(device) + 1), + (rdma_end_port(device) - + rdma_start_port(device) + 1), GFP_KERNEL); if (!device->cache.pkey_cache || !device->cache.gid_cache || @@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device) goto err; } - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { device->cache.pkey_cache[p] = NULL; device->cache.gid_cache [p] = NULL; - ib_cache_update(device, p + start_port(device)); + ib_cache_update(device, p + rdma_start_port(device)); } INIT_IB_EVENT_HANDLER(&device->cache.event_handler, @@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device) return; err_cache: - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } @@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device) ib_unregister_event_handler(&device->cache.event_handler); flush_workqueue(ib_wq); - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 0271608a51c4..dbddddd6fb5d 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -267,7 +267,8 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, cm_id_priv->av.pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, - GFP_ATOMIC); + GFP_ATOMIC, + IB_MGMT_BASE_VERSION); if (IS_ERR(m)) { ib_destroy_ah(ah); return PTR_ERR(m); @@ -297,7 +298,8 @@ static int cm_alloc_response_msg(struct cm_port *port, m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, - GFP_ATOMIC); + GFP_ATOMIC, + IB_MGMT_BASE_VERSION); if (IS_ERR(m)) { ib_destroy_ah(ah); return PTR_ERR(m); @@ -3759,11 +3761,9 @@ static void cm_add_one(struct ib_device *ib_device) }; unsigned long flags; int ret; + int count = 0; u8 i; - if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB) - return; - cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) * ib_device->phys_port_cnt, GFP_KERNEL); if (!cm_dev) @@ -3782,6 +3782,9 @@ static void cm_add_one(struct ib_device *ib_device) set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); for (i = 1; i <= ib_device->phys_port_cnt; i++) { + if (!rdma_cap_ib_cm(ib_device, i)) + continue; + port = kzalloc(sizeof *port, GFP_KERNEL); if (!port) goto error1; @@ -3808,7 +3811,13 @@ static void cm_add_one(struct ib_device *ib_device) ret = ib_modify_port(ib_device, i, 0, &port_modify); if (ret) goto error3; + + count++; } + + if (!count) + goto free; + ib_set_client_data(ib_device, &cm_client, cm_dev); write_lock_irqsave(&cm.device_lock, flags); @@ -3824,11 +3833,15 @@ error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; while (--i) { + if (!rdma_cap_ib_cm(ib_device, i)) + continue; + port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); cm_remove_port_fs(port); } +free: device_unregister(cm_dev->device); kfree(cm_dev); } @@ -3852,6 +3865,9 @@ static void cm_remove_one(struct ib_device *ib_device) write_unlock_irqrestore(&cm.device_lock, flags); for (i = 1; i <= ib_device->phys_port_cnt; i++) { + if (!rdma_cap_ib_cm(ib_device, i)) + continue; + port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 38ffe0981503..143ded2bbe7c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -65,6 +65,34 @@ MODULE_LICENSE("Dual BSD/GPL"); #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_IBOE_PACKET_LIFETIME 18 +static const char * const cma_events[] = { + [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", + [RDMA_CM_EVENT_ADDR_ERROR] = "address error", + [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", + [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", + [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", + [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", + [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", + [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", + [RDMA_CM_EVENT_REJECTED] = "rejected", + [RDMA_CM_EVENT_ESTABLISHED] = "established", + [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", + [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", + [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", + [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", + [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", + [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", +}; + +const char *rdma_event_msg(enum rdma_cm_event_type event) +{ + size_t index = event; + + return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? + cma_events[index] : "unrecognized event"; +} +EXPORT_SYMBOL(rdma_event_msg); + static void cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device); @@ -349,18 +377,35 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a return ret; } +static inline int cma_validate_port(struct ib_device *device, u8 port, + union ib_gid *gid, int dev_type) +{ + u8 found_port; + int ret = -ENODEV; + + if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) + return ret; + + if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) + return ret; + + ret = ib_find_cached_gid(device, gid, &found_port, NULL); + if (port != found_port) + return -ENODEV; + + return ret; +} + static int cma_acquire_dev(struct rdma_id_private *id_priv, struct rdma_id_private *listen_id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct cma_device *cma_dev; - union ib_gid gid, iboe_gid; + union ib_gid gid, iboe_gid, *gidp; int ret = -ENODEV; - u8 port, found_port; - enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? - IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; + u8 port; - if (dev_ll != IB_LINK_LAYER_INFINIBAND && + if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) return -EINVAL; @@ -370,41 +415,36 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof gid); - if (listen_id_priv && - rdma_port_get_link_layer(listen_id_priv->id.device, - listen_id_priv->id.port_num) == dev_ll) { + + if (listen_id_priv) { cma_dev = listen_id_priv->cma_dev; port = listen_id_priv->id.port_num; - if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && - rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) - ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, - &found_port, NULL); - else - ret = ib_find_cached_gid(cma_dev->device, &gid, - &found_port, NULL); + gidp = rdma_protocol_roce(cma_dev->device, port) ? + &iboe_gid : &gid; - if (!ret && (port == found_port)) { - id_priv->id.port_num = found_port; + ret = cma_validate_port(cma_dev->device, port, gidp, + dev_addr->dev_type); + if (!ret) { + id_priv->id.port_num = port; goto out; } } + list_for_each_entry(cma_dev, &dev_list, list) { for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { if (listen_id_priv && listen_id_priv->cma_dev == cma_dev && listen_id_priv->id.port_num == port) continue; - if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) { - if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && - rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) - ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL); - else - ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL); - - if (!ret && (port == found_port)) { - id_priv->id.port_num = found_port; - goto out; - } + + gidp = rdma_protocol_roce(cma_dev->device, port) ? + &iboe_gid : &gid; + + ret = cma_validate_port(cma_dev->device, port, gidp, + dev_addr->dev_type); + if (!ret) { + id_priv->id.port_num = port; + goto out; } } } @@ -435,10 +475,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) pkey = ntohs(addr->sib_pkey); list_for_each_entry(cur_dev, &dev_list, list) { - if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB) - continue; - for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { + if (!rdma_cap_af_ib(cur_dev->device, p)) + continue; + if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) continue; @@ -633,10 +673,9 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, if (ret) goto out; - if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) - == RDMA_TRANSPORT_IB && - rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) - == IB_LINK_LAYER_ETHERNET) { + BUG_ON(id_priv->cma_dev->device != id_priv->id.device); + + if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); if (ret) @@ -700,11 +739,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, int ret; u16 pkey; - if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == - IB_LINK_LAYER_INFINIBAND) - pkey = ib_addr_get_pkey(dev_addr); - else + if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) pkey = 0xffff; + else + pkey = ib_addr_get_pkey(dev_addr); ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, pkey, &qp_attr->pkey_index); @@ -735,8 +773,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int ret = 0; id_priv = container_of(id, struct rdma_id_private, id); - switch (rdma_node_get_transport(id_priv->id.device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); else @@ -745,19 +782,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { if (!id_priv->cm_id.iw) { qp_attr->qp_access_flags = 0; *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; } else ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, qp_attr_mask); - break; - default: + } else ret = -ENOSYS; - break; - } return ret; } @@ -945,13 +978,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv) static void cma_cancel_route(struct rdma_id_private *id_priv) { - switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { - case IB_LINK_LAYER_INFINIBAND: + if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { if (id_priv->query) ib_sa_cancel_query(id_priv->query_id, id_priv->query); - break; - default: - break; } } @@ -1023,17 +1052,12 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) mc = container_of(id_priv->mc_list.next, struct cma_multicast, list); list_del(&mc->list); - switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { - case IB_LINK_LAYER_INFINIBAND: + if (rdma_cap_ib_mcast(id_priv->cma_dev->device, + id_priv->id.port_num)) { ib_sa_free_multicast(mc->multicast.ib); kfree(mc); - break; - case IB_LINK_LAYER_ETHERNET: + } else kref_put(&mc->mcref, release_mc); - break; - default: - break; - } } } @@ -1054,17 +1078,12 @@ void rdma_destroy_id(struct rdma_cm_id *id) mutex_unlock(&id_priv->handler_mutex); if (id_priv->cma_dev) { - switch (rdma_node_get_transport(id_priv->id.device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_cap_ib_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.iw) iw_destroy_cm_id(id_priv->cm_id.iw); - break; - default: - break; } cma_leave_mc_groups(id_priv); cma_release_dev(id_priv); @@ -1610,6 +1629,7 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) if (IS_ERR(id)) return PTR_ERR(id); + id->tos = id_priv->tos; id_priv->cm_id.iw = id; memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), @@ -1642,8 +1662,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct rdma_cm_id *id; int ret; - if (cma_family(id_priv) == AF_IB && - rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB) + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return; id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, @@ -1984,26 +2003,15 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) return -EINVAL; atomic_inc(&id_priv->refcount); - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: - switch (rdma_port_get_link_layer(id->device, id->port_num)) { - case IB_LINK_LAYER_INFINIBAND: - ret = cma_resolve_ib_route(id_priv, timeout_ms); - break; - case IB_LINK_LAYER_ETHERNET: - ret = cma_resolve_iboe_route(id_priv); - break; - default: - ret = -ENOSYS; - } - break; - case RDMA_TRANSPORT_IWARP: + if (rdma_cap_ib_sa(id->device, id->port_num)) + ret = cma_resolve_ib_route(id_priv, timeout_ms); + else if (rdma_protocol_roce(id->device, id->port_num)) + ret = cma_resolve_iboe_route(id_priv); + else if (rdma_protocol_iwarp(id->device, id->port_num)) ret = cma_resolve_iw_route(id_priv, timeout_ms); - break; - default: + else ret = -ENOSYS; - break; - } + if (ret) goto err; @@ -2045,7 +2053,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { if (cma_family(id_priv) == AF_IB && - rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB) + !rdma_cap_ib_cm(cur_dev->device, 1)) continue; if (!cma_dev) @@ -2077,7 +2085,7 @@ port_found: goto out; id_priv->id.route.addr.dev_addr.dev_type = - (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? + (rdma_protocol_ib(cma_dev->device, p)) ? ARPHRD_INFINIBAND : ARPHRD_ETHER; rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); @@ -2554,18 +2562,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) id_priv->backlog = backlog; if (id->device) { - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_cap_ib_cm(id->device, 1)) { ret = cma_ib_listen(id_priv); if (ret) goto err; - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_cap_iw_cm(id->device, 1)) { ret = cma_iw_listen(id_priv, backlog); if (ret) goto err; - break; - default: + } else { ret = -ENOSYS; goto err; } @@ -2857,6 +2862,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv, if (IS_ERR(cm_id)) return PTR_ERR(cm_id); + cm_id->tos = id_priv->tos; id_priv->cm_id.iw = cm_id; memcpy(&cm_id->local_addr, cma_src_addr(id_priv), @@ -2901,20 +2907,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_cap_iw_cm(id->device, id->port_num)) ret = cma_connect_iw(id_priv, conn_param); - break; - default: + else ret = -ENOSYS; - break; - } if (ret) goto err; @@ -3017,8 +3018,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) { if (conn_param) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, @@ -3034,14 +3034,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) else ret = cma_rep_recv(id_priv); } - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_cap_iw_cm(id->device, id->port_num)) ret = cma_accept_iw(id_priv, conn_param); - break; - default: + else ret = -ENOSYS; - break; - } if (ret) goto reject; @@ -3085,8 +3081,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, if (!id_priv->cm_id.ib) return -EINVAL; - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, private_data, private_data_len); @@ -3094,15 +3089,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, ret = ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, private_data, private_data_len); - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_reject(id_priv->cm_id.iw, private_data, private_data_len); - break; - default: + } else ret = -ENOSYS; - break; - } + return ret; } EXPORT_SYMBOL(rdma_reject); @@ -3116,22 +3108,18 @@ int rdma_disconnect(struct rdma_cm_id *id) if (!id_priv->cm_id.ib) return -EINVAL; - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: + if (rdma_cap_ib_cm(id->device, id->port_num)) { ret = cma_modify_qp_err(id_priv); if (ret) goto out; /* Initiate or respond to a disconnect. */ if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); - break; - case RDMA_TRANSPORT_IWARP: + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); - break; - default: + } else ret = -EINVAL; - break; - } + out: return ret; } @@ -3377,24 +3365,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, list_add(&mc->list, &id_priv->mc_list); spin_unlock(&id_priv->lock); - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: - switch (rdma_port_get_link_layer(id->device, id->port_num)) { - case IB_LINK_LAYER_INFINIBAND: - ret = cma_join_ib_multicast(id_priv, mc); - break; - case IB_LINK_LAYER_ETHERNET: - kref_init(&mc->mcref); - ret = cma_iboe_join_multicast(id_priv, mc); - break; - default: - ret = -EINVAL; - } - break; - default: + if (rdma_protocol_roce(id->device, id->port_num)) { + kref_init(&mc->mcref); + ret = cma_iboe_join_multicast(id_priv, mc); + } else if (rdma_cap_ib_mcast(id->device, id->port_num)) + ret = cma_join_ib_multicast(id_priv, mc); + else ret = -ENOSYS; - break; - } if (ret) { spin_lock_irq(&id_priv->lock); @@ -3422,19 +3399,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) ib_detach_mcast(id->qp, &mc->multicast.ib->rec.mgid, be16_to_cpu(mc->multicast.ib->rec.mlid)); - if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { - switch (rdma_port_get_link_layer(id->device, id->port_num)) { - case IB_LINK_LAYER_INFINIBAND: - ib_sa_free_multicast(mc->multicast.ib); - kfree(mc); - break; - case IB_LINK_LAYER_ETHERNET: - kref_put(&mc->mcref, release_mc); - break; - default: - break; - } - } + + BUG_ON(id_priv->cma_dev->device != id->device); + + if (rdma_cap_ib_mcast(id->device, id->port_num)) { + ib_sa_free_multicast(mc->multicast.ib); + kfree(mc); + } else if (rdma_protocol_roce(id->device, id->port_num)) + kref_put(&mc->mcref, release_mc); + return; } } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 18c1ece765f2..9567756ca4f9 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -92,7 +92,8 @@ static int ib_device_check_mandatory(struct ib_device *device) IB_MANDATORY_FUNC(poll_cq), IB_MANDATORY_FUNC(req_notify_cq), IB_MANDATORY_FUNC(get_dma_mr), - IB_MANDATORY_FUNC(dereg_mr) + IB_MANDATORY_FUNC(dereg_mr), + IB_MANDATORY_FUNC(get_port_immutable) }; int i; @@ -151,18 +152,6 @@ static int alloc_name(char *name) return 0; } -static int start_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; -} - - -static int end_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? - 0 : device->phys_port_cnt; -} - /** * ib_alloc_device - allocate an IB device struct * @size:size of structure to allocate @@ -222,42 +211,49 @@ static int add_client_context(struct ib_device *device, struct ib_client *client return 0; } -static int read_port_table_lengths(struct ib_device *device) +static int verify_immutable(const struct ib_device *dev, u8 port) { - struct ib_port_attr *tprops = NULL; - int num_ports, ret = -ENOMEM; - u8 port_index; - - tprops = kmalloc(sizeof *tprops, GFP_KERNEL); - if (!tprops) - goto out; - - num_ports = end_port(device) - start_port(device) + 1; + return WARN_ON(!rdma_cap_ib_mad(dev, port) && + rdma_max_mad_size(dev, port) != 0); +} - device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports, - GFP_KERNEL); - device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports, - GFP_KERNEL); - if (!device->pkey_tbl_len || !device->gid_tbl_len) +static int read_port_immutable(struct ib_device *device) +{ + int ret = -ENOMEM; + u8 start_port = rdma_start_port(device); + u8 end_port = rdma_end_port(device); + u8 port; + + /** + * device->port_immutable is indexed directly by the port number to make + * access to this data as efficient as possible. + * + * Therefore port_immutable is declared as a 1 based array with + * potential empty slots at the beginning. + */ + device->port_immutable = kzalloc(sizeof(*device->port_immutable) + * (end_port + 1), + GFP_KERNEL); + if (!device->port_immutable) goto err; - for (port_index = 0; port_index < num_ports; ++port_index) { - ret = ib_query_port(device, port_index + start_port(device), - tprops); + for (port = start_port; port <= end_port; ++port) { + ret = device->get_port_immutable(device, port, + &device->port_immutable[port]); if (ret) goto err; - device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len; - device->gid_tbl_len[port_index] = tprops->gid_tbl_len; + + if (verify_immutable(device, port)) { + ret = -EINVAL; + goto err; + } } ret = 0; goto out; - err: - kfree(device->gid_tbl_len); - kfree(device->pkey_tbl_len); + kfree(device->port_immutable); out: - kfree(tprops); return ret; } @@ -294,9 +290,9 @@ int ib_register_device(struct ib_device *device, spin_lock_init(&device->event_handler_lock); spin_lock_init(&device->client_data_lock); - ret = read_port_table_lengths(device); + ret = read_port_immutable(device); if (ret) { - printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n", + printk(KERN_WARNING "Couldn't create per port immutable data %s\n", device->name); goto out; } @@ -305,8 +301,7 @@ int ib_register_device(struct ib_device *device, if (ret) { printk(KERN_WARNING "Couldn't register device %s with driver model\n", device->name); - kfree(device->gid_tbl_len); - kfree(device->pkey_tbl_len); + kfree(device->port_immutable); goto out; } @@ -348,9 +343,6 @@ void ib_unregister_device(struct ib_device *device) list_del(&device->core_list); - kfree(device->gid_tbl_len); - kfree(device->pkey_tbl_len); - mutex_unlock(&device_mutex); ib_device_unregister_sysfs(device); @@ -558,7 +550,11 @@ EXPORT_SYMBOL(ib_dispatch_event); int ib_query_device(struct ib_device *device, struct ib_device_attr *device_attr) { - return device->query_device(device, device_attr); + struct ib_udata uhw = {.outlen = 0, .inlen = 0}; + + memset(device_attr, 0, sizeof(*device_attr)); + + return device->query_device(device, device_attr, &uhw); } EXPORT_SYMBOL(ib_query_device); @@ -575,7 +571,7 @@ int ib_query_port(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr) { - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; return device->query_port(device, port_num, port_attr); @@ -653,7 +649,7 @@ int ib_modify_port(struct ib_device *device, if (!device->modify_port) return -ENOSYS; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; return device->modify_port(device, port_num, port_modify_mask, @@ -676,8 +672,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, union ib_gid tmp_gid; int ret, port, i; - for (port = start_port(device); port <= end_port(device); ++port) { - for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) { + for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { + for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { ret = ib_query_gid(device, port, i, &tmp_gid); if (ret) return ret; @@ -709,7 +705,7 @@ int ib_find_pkey(struct ib_device *device, u16 tmp_pkey; int partial_ix = -1; - for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) { + for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { ret = ib_query_pkey(device, port_num, i, &tmp_pkey); if (ret) return ret; diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 74c30f4c557e..a4b1466c1bf6 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -3,6 +3,7 @@ * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2009 HNR Consulting. All rights reserved. + * Copyright (c) 2014 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -44,6 +45,7 @@ #include "mad_priv.h" #include "mad_rmpp.h" #include "smi.h" +#include "opa_smi.h" #include "agent.h" MODULE_LICENSE("Dual BSD/GPL"); @@ -59,8 +61,6 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests module_param_named(recv_queue_size, mad_recvq_size, int, 0444); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); -static struct kmem_cache *ib_mad_cache; - static struct list_head ib_mad_port_list; static u32 ib_mad_client_id = 0; @@ -73,7 +73,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method, static void remove_mad_reg_req(struct ib_mad_agent_private *priv); static struct ib_mad_agent_private *find_mad_agent( struct ib_mad_port_private *port_priv, - struct ib_mad *mad); + const struct ib_mad_hdr *mad); static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad); static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); @@ -179,12 +179,12 @@ static int is_vendor_method_in_use( return 0; } -int ib_response_mad(struct ib_mad *mad) +int ib_response_mad(const struct ib_mad_hdr *hdr) { - return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) || - (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || - ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) && - (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP))); + return ((hdr->method & IB_MGMT_METHOD_RESP) || + (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || + ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && + (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); } EXPORT_SYMBOL(ib_response_mad); @@ -717,6 +717,32 @@ static void build_smp_wc(struct ib_qp *qp, wc->port_num = port_num; } +static size_t mad_priv_size(const struct ib_mad_private *mp) +{ + return sizeof(struct ib_mad_private) + mp->mad_size; +} + +static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags) +{ + size_t size = sizeof(struct ib_mad_private) + mad_size; + struct ib_mad_private *ret = kzalloc(size, flags); + + if (ret) + ret->mad_size = mad_size; + + return ret; +} + +static size_t port_mad_size(const struct ib_mad_port_private *port_priv) +{ + return rdma_max_mad_size(port_priv->device, port_priv->port_num); +} + +static size_t mad_priv_dma_size(const struct ib_mad_private *mp) +{ + return sizeof(struct ib_grh) + mp->mad_size; +} + /* * Return 0 if SMP is to be sent * Return 1 if SMP was consumed locally (whether or not solicited) @@ -727,6 +753,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, { int ret = 0; struct ib_smp *smp = mad_send_wr->send_buf.mad; + struct opa_smp *opa_smp = (struct opa_smp *)smp; unsigned long flags; struct ib_mad_local_private *local; struct ib_mad_private *mad_priv; @@ -736,6 +763,11 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, u8 port_num; struct ib_wc mad_wc; struct ib_send_wr *send_wr = &mad_send_wr->send_wr; + size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); + u16 out_mad_pkey_index = 0; + u16 drslid; + bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, + mad_agent_priv->qp_info->port_priv->port_num); if (device->node_type == RDMA_NODE_IB_SWITCH && smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) @@ -749,19 +781,48 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, * If we are at the start of the LID routed part, don't update the * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. */ - if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == - IB_LID_PERMISSIVE && - smi_handle_dr_smp_send(smp, device->node_type, port_num) == - IB_SMI_DISCARD) { - ret = -EINVAL; - dev_err(&device->dev, "Invalid directed route\n"); - goto out; - } + if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) { + u32 opa_drslid; + + if ((opa_get_smp_direction(opa_smp) + ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == + OPA_LID_PERMISSIVE && + opa_smi_handle_dr_smp_send(opa_smp, device->node_type, + port_num) == IB_SMI_DISCARD) { + ret = -EINVAL; + dev_err(&device->dev, "OPA Invalid directed route\n"); + goto out; + } + opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); + if (opa_drslid != OPA_LID_PERMISSIVE && + opa_drslid & 0xffff0000) { + ret = -EINVAL; + dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", + opa_drslid); + goto out; + } + drslid = (u16)(opa_drslid & 0x0000ffff); - /* Check to post send on QP or process locally */ - if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && - smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) - goto out; + /* Check to post send on QP or process locally */ + if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD && + opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) + goto out; + } else { + if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == + IB_LID_PERMISSIVE && + smi_handle_dr_smp_send(smp, device->node_type, port_num) == + IB_SMI_DISCARD) { + ret = -EINVAL; + dev_err(&device->dev, "Invalid directed route\n"); + goto out; + } + drslid = be16_to_cpu(smp->dr_slid); + + /* Check to post send on QP or process locally */ + if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD && + smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) + goto out; + } local = kmalloc(sizeof *local, GFP_ATOMIC); if (!local) { @@ -771,7 +832,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, } local->mad_priv = NULL; local->recv_mad_agent = NULL; - mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); + mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); if (!mad_priv) { ret = -ENOMEM; dev_err(&device->dev, "No memory for local response MAD\n"); @@ -780,18 +841,25 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, } build_smp_wc(mad_agent_priv->agent.qp, - send_wr->wr_id, be16_to_cpu(smp->dr_slid), + send_wr->wr_id, drslid, send_wr->wr.ud.pkey_index, send_wr->wr.ud.port_num, &mad_wc); + if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { + mad_wc.byte_len = mad_send_wr->send_buf.hdr_len + + mad_send_wr->send_buf.data_len + + sizeof(struct ib_grh); + } + /* No GRH for DR SMP */ ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, - (struct ib_mad *)smp, - (struct ib_mad *)&mad_priv->mad); + (const struct ib_mad_hdr *)smp, mad_size, + (struct ib_mad_hdr *)mad_priv->mad, + &mad_size, &out_mad_pkey_index); switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: - if (ib_response_mad(&mad_priv->mad.mad) && + if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && mad_agent_priv->agent.recv_handler) { local->mad_priv = mad_priv; local->recv_mad_agent = mad_agent_priv; @@ -801,39 +869,43 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, */ atomic_inc(&mad_agent_priv->refcount); } else - kmem_cache_free(ib_mad_cache, mad_priv); + kfree(mad_priv); break; case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED: - kmem_cache_free(ib_mad_cache, mad_priv); + kfree(mad_priv); break; case IB_MAD_RESULT_SUCCESS: /* Treat like an incoming receive MAD */ port_priv = ib_get_mad_port(mad_agent_priv->agent.device, mad_agent_priv->agent.port_num); if (port_priv) { - memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad)); + memcpy(mad_priv->mad, smp, mad_priv->mad_size); recv_mad_agent = find_mad_agent(port_priv, - &mad_priv->mad.mad); + (const struct ib_mad_hdr *)mad_priv->mad); } if (!port_priv || !recv_mad_agent) { /* * No receiving agent so drop packet and * generate send completion. */ - kmem_cache_free(ib_mad_cache, mad_priv); + kfree(mad_priv); break; } local->mad_priv = mad_priv; local->recv_mad_agent = recv_mad_agent; break; default: - kmem_cache_free(ib_mad_cache, mad_priv); + kfree(mad_priv); kfree(local); ret = -EINVAL; goto out; } local->mad_send_wr = mad_send_wr; + if (opa) { + local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index; + local->return_wc_byte_len = mad_size; + } /* Reference MAD agent until send side of local completion handled */ atomic_inc(&mad_agent_priv->refcount); /* Queue local completion to local list */ @@ -847,11 +919,11 @@ out: return ret; } -static int get_pad_size(int hdr_len, int data_len) +static int get_pad_size(int hdr_len, int data_len, size_t mad_size) { int seg_size, pad; - seg_size = sizeof(struct ib_mad) - hdr_len; + seg_size = mad_size - hdr_len; if (data_len && seg_size) { pad = seg_size - data_len % seg_size; return pad == seg_size ? 0 : pad; @@ -870,14 +942,15 @@ static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr) } static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, - gfp_t gfp_mask) + size_t mad_size, gfp_t gfp_mask) { struct ib_mad_send_buf *send_buf = &send_wr->send_buf; struct ib_rmpp_mad *rmpp_mad = send_buf->mad; struct ib_rmpp_segment *seg = NULL; int left, seg_size, pad; - send_buf->seg_size = sizeof (struct ib_mad) - send_buf->hdr_len; + send_buf->seg_size = mad_size - send_buf->hdr_len; + send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; seg_size = send_buf->seg_size; pad = send_wr->pad; @@ -910,7 +983,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, return 0; } -int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent) +int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) { return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); } @@ -920,26 +993,37 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, int rmpp_active, int hdr_len, int data_len, - gfp_t gfp_mask) + gfp_t gfp_mask, + u8 base_version) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_wr_private *mad_send_wr; int pad, message_size, ret, size; void *buf; + size_t mad_size; + bool opa; mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, agent); - pad = get_pad_size(hdr_len, data_len); + + opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); + + if (opa && base_version == OPA_MGMT_BASE_VERSION) + mad_size = sizeof(struct opa_mad); + else + mad_size = sizeof(struct ib_mad); + + pad = get_pad_size(hdr_len, data_len, mad_size); message_size = hdr_len + data_len + pad; if (ib_mad_kernel_rmpp_agent(mad_agent)) { - if (!rmpp_active && message_size > sizeof(struct ib_mad)) + if (!rmpp_active && message_size > mad_size) return ERR_PTR(-EINVAL); } else - if (rmpp_active || message_size > sizeof(struct ib_mad)) + if (rmpp_active || message_size > mad_size) return ERR_PTR(-EINVAL); - size = rmpp_active ? hdr_len : sizeof(struct ib_mad); + size = rmpp_active ? hdr_len : mad_size; buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); if (!buf) return ERR_PTR(-ENOMEM); @@ -954,7 +1038,14 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, mad_send_wr->mad_agent_priv = mad_agent_priv; mad_send_wr->sg_list[0].length = hdr_len; mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; - mad_send_wr->sg_list[1].length = sizeof(struct ib_mad) - hdr_len; + + /* OPA MADs don't have to be the full 2048 bytes */ + if (opa && base_version == OPA_MGMT_BASE_VERSION && + data_len < mad_size - hdr_len) + mad_send_wr->sg_list[1].length = data_len; + else + mad_send_wr->sg_list[1].length = mad_size - hdr_len; + mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey; mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; @@ -967,7 +1058,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; if (rmpp_active) { - ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask); + ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); if (ret) { kfree(buf); return ERR_PTR(ret); @@ -1237,7 +1328,7 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) recv_wc); priv = container_of(mad_priv_hdr, struct ib_mad_private, header); - kmem_cache_free(ib_mad_cache, priv); + kfree(priv); } } EXPORT_SYMBOL(ib_free_recv_mad); @@ -1324,7 +1415,7 @@ static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class) } static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, - char *oui) + const char *oui) { int i; @@ -1622,13 +1713,13 @@ out: static struct ib_mad_agent_private * find_mad_agent(struct ib_mad_port_private *port_priv, - struct ib_mad *mad) + const struct ib_mad_hdr *mad_hdr) { struct ib_mad_agent_private *mad_agent = NULL; unsigned long flags; spin_lock_irqsave(&port_priv->reg_lock, flags); - if (ib_response_mad(mad)) { + if (ib_response_mad(mad_hdr)) { u32 hi_tid; struct ib_mad_agent_private *entry; @@ -1636,7 +1727,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, * Routing is based on high 32 bits of transaction ID * of MAD. */ - hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; + hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; list_for_each_entry(entry, &port_priv->agent_list, agent_list) { if (entry->agent.hi_tid == hi_tid) { mad_agent = entry; @@ -1648,45 +1739,45 @@ find_mad_agent(struct ib_mad_port_private *port_priv, struct ib_mad_mgmt_method_table *method; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; - struct ib_vendor_mad *vendor_mad; + const struct ib_vendor_mad *vendor_mad; int index; /* * Routing is based on version, class, and method * For "newer" vendor MADs, also based on OUI */ - if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION) + if (mad_hdr->class_version >= MAX_MGMT_VERSION) goto out; - if (!is_vendor_class(mad->mad_hdr.mgmt_class)) { + if (!is_vendor_class(mad_hdr->mgmt_class)) { class = port_priv->version[ - mad->mad_hdr.class_version].class; + mad_hdr->class_version].class; if (!class) goto out; - if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= + if (convert_mgmt_class(mad_hdr->mgmt_class) >= IB_MGMT_MAX_METHODS) goto out; method = class->method_table[convert_mgmt_class( - mad->mad_hdr.mgmt_class)]; + mad_hdr->mgmt_class)]; if (method) - mad_agent = method->agent[mad->mad_hdr.method & + mad_agent = method->agent[mad_hdr->method & ~IB_MGMT_METHOD_RESP]; } else { vendor = port_priv->version[ - mad->mad_hdr.class_version].vendor; + mad_hdr->class_version].vendor; if (!vendor) goto out; vendor_class = vendor->vendor_class[vendor_class_index( - mad->mad_hdr.mgmt_class)]; + mad_hdr->mgmt_class)]; if (!vendor_class) goto out; /* Find matching OUI */ - vendor_mad = (struct ib_vendor_mad *)mad; + vendor_mad = (const struct ib_vendor_mad *)mad_hdr; index = find_vendor_oui(vendor_class, vendor_mad->oui); if (index == -1) goto out; method = vendor_class->method_table[index]; if (method) { - mad_agent = method->agent[mad->mad_hdr.method & + mad_agent = method->agent[mad_hdr->method & ~IB_MGMT_METHOD_RESP]; } } @@ -1708,20 +1799,24 @@ out: return mad_agent; } -static int validate_mad(struct ib_mad *mad, u32 qp_num) +static int validate_mad(const struct ib_mad_hdr *mad_hdr, + const struct ib_mad_qp_info *qp_info, + bool opa) { int valid = 0; + u32 qp_num = qp_info->qp->qp_num; /* Make sure MAD base version is understood */ - if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { - pr_err("MAD received with unsupported base version %d\n", - mad->mad_hdr.base_version); + if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && + (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { + pr_err("MAD received with unsupported base version %d %s\n", + mad_hdr->base_version, opa ? "(opa)" : ""); goto out; } /* Filter SMI packets sent to other than QP0 */ - if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || - (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { + if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || + (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { if (qp_num == 0) valid = 1; } else { @@ -1734,8 +1829,8 @@ out: return valid; } -static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_hdr *mad_hdr) +static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_hdr *mad_hdr) { struct ib_rmpp_mad *rmpp_mad; @@ -1747,16 +1842,16 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); } -static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr, - struct ib_mad_recv_wc *rwc) +static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, + const struct ib_mad_recv_wc *rwc) { - return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class == + return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == rwc->recv_buf.mad->mad_hdr.mgmt_class; } -static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_send_wr_private *wr, - struct ib_mad_recv_wc *rwc ) +static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_send_wr_private *wr, + const struct ib_mad_recv_wc *rwc ) { struct ib_ah_attr attr; u8 send_resp, rcv_resp; @@ -1765,8 +1860,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv, u8 port_num = mad_agent_priv->agent.port_num; u8 lmc; - send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad); - rcv_resp = ib_response_mad(rwc->recv_buf.mad); + send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); + rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); if (send_resp == rcv_resp) /* both requests, or both responses. GIDs different */ @@ -1811,22 +1906,22 @@ static inline int is_direct(u8 class) } struct ib_mad_send_wr_private* -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_recv_wc *wc) +ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_recv_wc *wc) { struct ib_mad_send_wr_private *wr; - struct ib_mad *mad; + const struct ib_mad_hdr *mad_hdr; - mad = (struct ib_mad *)wc->recv_buf.mad; + mad_hdr = &wc->recv_buf.mad->mad_hdr; list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { - if ((wr->tid == mad->mad_hdr.tid) && + if ((wr->tid == mad_hdr->tid) && rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs. */ - (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || + (is_direct(mad_hdr->mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) return (wr->status == IB_WC_SUCCESS) ? wr : NULL; } @@ -1836,15 +1931,15 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, * been notified that the send has completed */ list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { - if (is_data_mad(mad_agent_priv, wr->send_buf.mad) && - wr->tid == mad->mad_hdr.tid && + if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && + wr->tid == mad_hdr->tid && wr->timeout && rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs. */ - (is_direct(wc->recv_buf.mad->mad_hdr.mgmt_class) || + (is_direct(mad_hdr->mgmt_class) || rcv_has_same_gid(mad_agent_priv, wr, wc))) /* Verify request has not been canceled */ return (wr->status == IB_WC_SUCCESS) ? wr : NULL; @@ -1879,7 +1974,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } /* Complete corresponding request */ - if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { + if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { @@ -1924,26 +2019,163 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } } -static bool generate_unmatched_resp(struct ib_mad_private *recv, - struct ib_mad_private *response) +static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, + const struct ib_mad_qp_info *qp_info, + const struct ib_wc *wc, + int port_num, + struct ib_mad_private *recv, + struct ib_mad_private *response) +{ + enum smi_forward_action retsmi; + struct ib_smp *smp = (struct ib_smp *)recv->mad; + + if (smi_handle_dr_smp_recv(smp, + port_priv->device->node_type, + port_num, + port_priv->device->phys_port_cnt) == + IB_SMI_DISCARD) + return IB_SMI_DISCARD; + + retsmi = smi_check_forward_dr_smp(smp); + if (retsmi == IB_SMI_LOCAL) + return IB_SMI_HANDLE; + + if (retsmi == IB_SMI_SEND) { /* don't forward */ + if (smi_handle_dr_smp_send(smp, + port_priv->device->node_type, + port_num) == IB_SMI_DISCARD) + return IB_SMI_DISCARD; + + if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) + return IB_SMI_DISCARD; + } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { + /* forward case for switches */ + memcpy(response, recv, mad_priv_size(response)); + response->header.recv_wc.wc = &response->header.wc; + response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; + response->header.recv_wc.recv_buf.grh = &response->grh; + + agent_send_response((const struct ib_mad_hdr *)response->mad, + &response->grh, wc, + port_priv->device, + smi_get_fwd_port(smp), + qp_info->qp->qp_num, + response->mad_size, + false); + + return IB_SMI_DISCARD; + } + return IB_SMI_HANDLE; +} + +static bool generate_unmatched_resp(const struct ib_mad_private *recv, + struct ib_mad_private *response, + size_t *resp_len, bool opa) { - if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || - recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { - memcpy(response, recv, sizeof *response); + const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; + struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; + + if (recv_hdr->method == IB_MGMT_METHOD_GET || + recv_hdr->method == IB_MGMT_METHOD_SET) { + memcpy(response, recv, mad_priv_size(response)); response->header.recv_wc.wc = &response->header.wc; - response->header.recv_wc.recv_buf.mad = &response->mad.mad; + response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; response->header.recv_wc.recv_buf.grh = &response->grh; - response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; - response->mad.mad.mad_hdr.status = - cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); - if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) - response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION; + resp_hdr->method = IB_MGMT_METHOD_GET_RESP; + resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); + if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + resp_hdr->status |= IB_SMP_DIRECTION; + + if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { + if (recv_hdr->mgmt_class == + IB_MGMT_CLASS_SUBN_LID_ROUTED || + recv_hdr->mgmt_class == + IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + *resp_len = opa_get_smp_header_size( + (struct opa_smp *)recv->mad); + else + *resp_len = sizeof(struct ib_mad_hdr); + } return true; } else { return false; } } + +static enum smi_action +handle_opa_smi(struct ib_mad_port_private *port_priv, + struct ib_mad_qp_info *qp_info, + struct ib_wc *wc, + int port_num, + struct ib_mad_private *recv, + struct ib_mad_private *response) +{ + enum smi_forward_action retsmi; + struct opa_smp *smp = (struct opa_smp *)recv->mad; + + if (opa_smi_handle_dr_smp_recv(smp, + port_priv->device->node_type, + port_num, + port_priv->device->phys_port_cnt) == + IB_SMI_DISCARD) + return IB_SMI_DISCARD; + + retsmi = opa_smi_check_forward_dr_smp(smp); + if (retsmi == IB_SMI_LOCAL) + return IB_SMI_HANDLE; + + if (retsmi == IB_SMI_SEND) { /* don't forward */ + if (opa_smi_handle_dr_smp_send(smp, + port_priv->device->node_type, + port_num) == IB_SMI_DISCARD) + return IB_SMI_DISCARD; + + if (opa_smi_check_local_smp(smp, port_priv->device) == + IB_SMI_DISCARD) + return IB_SMI_DISCARD; + + } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { + /* forward case for switches */ + memcpy(response, recv, mad_priv_size(response)); + response->header.recv_wc.wc = &response->header.wc; + response->header.recv_wc.recv_buf.opa_mad = + (struct opa_mad *)response->mad; + response->header.recv_wc.recv_buf.grh = &response->grh; + + agent_send_response((const struct ib_mad_hdr *)response->mad, + &response->grh, wc, + port_priv->device, + opa_smi_get_fwd_port(smp), + qp_info->qp->qp_num, + recv->header.wc.byte_len, + true); + + return IB_SMI_DISCARD; + } + + return IB_SMI_HANDLE; +} + +static enum smi_action +handle_smi(struct ib_mad_port_private *port_priv, + struct ib_mad_qp_info *qp_info, + struct ib_wc *wc, + int port_num, + struct ib_mad_private *recv, + struct ib_mad_private *response, + bool opa) +{ + struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; + + if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && + mad_hdr->class_version == OPA_SMI_CLASS_VERSION) + return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, + response); + + return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); +} + static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { @@ -1954,35 +2186,49 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, struct ib_mad_agent_private *mad_agent; int port_num; int ret = IB_MAD_RESULT_SUCCESS; + size_t mad_size; + u16 resp_mad_pkey_index = 0; + bool opa; mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; qp_info = mad_list->mad_queue->qp_info; dequeue_mad(mad_list); + opa = rdma_cap_opa_mad(qp_info->port_priv->device, + qp_info->port_priv->port_num); + mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); recv = container_of(mad_priv_hdr, struct ib_mad_private, header); ib_dma_unmap_single(port_priv->device, recv->header.mapping, - sizeof(struct ib_mad_private) - - sizeof(struct ib_mad_private_header), + mad_priv_dma_size(recv), DMA_FROM_DEVICE); /* Setup MAD receive work completion from "normal" work completion */ recv->header.wc = *wc; recv->header.recv_wc.wc = &recv->header.wc; - recv->header.recv_wc.mad_len = sizeof(struct ib_mad); - recv->header.recv_wc.recv_buf.mad = &recv->mad.mad; + + if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { + recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); + recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); + } else { + recv->header.recv_wc.mad_len = sizeof(struct ib_mad); + recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); + } + + recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; recv->header.recv_wc.recv_buf.grh = &recv->grh; if (atomic_read(&qp_info->snoop_count)) snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); /* Validate MAD */ - if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) + if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) goto out; - response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); + mad_size = recv->mad_size; + response = alloc_mad_private(mad_size, GFP_KERNEL); if (!response) { dev_err(&port_priv->device->dev, "ib_mad_recv_done_handler no memory for response buffer\n"); @@ -1994,69 +2240,43 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, else port_num = port_priv->port_num; - if (recv->mad.mad.mad_hdr.mgmt_class == + if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { - enum smi_forward_action retsmi; - - if (smi_handle_dr_smp_recv(&recv->mad.smp, - port_priv->device->node_type, - port_num, - port_priv->device->phys_port_cnt) == - IB_SMI_DISCARD) + if (handle_smi(port_priv, qp_info, wc, port_num, recv, + response, opa) + == IB_SMI_DISCARD) goto out; - - retsmi = smi_check_forward_dr_smp(&recv->mad.smp); - if (retsmi == IB_SMI_LOCAL) - goto local; - - if (retsmi == IB_SMI_SEND) { /* don't forward */ - if (smi_handle_dr_smp_send(&recv->mad.smp, - port_priv->device->node_type, - port_num) == IB_SMI_DISCARD) - goto out; - - if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD) - goto out; - } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { - /* forward case for switches */ - memcpy(response, recv, sizeof(*response)); - response->header.recv_wc.wc = &response->header.wc; - response->header.recv_wc.recv_buf.mad = &response->mad.mad; - response->header.recv_wc.recv_buf.grh = &response->grh; - - agent_send_response(&response->mad.mad, - &response->grh, wc, - port_priv->device, - smi_get_fwd_port(&recv->mad.smp), - qp_info->qp->qp_num); - - goto out; - } } -local: /* Give driver "right of first refusal" on incoming MAD */ if (port_priv->device->process_mad) { ret = port_priv->device->process_mad(port_priv->device, 0, port_priv->port_num, wc, &recv->grh, - &recv->mad.mad, - &response->mad.mad); + (const struct ib_mad_hdr *)recv->mad, + recv->mad_size, + (struct ib_mad_hdr *)response->mad, + &mad_size, &resp_mad_pkey_index); + + if (opa) + wc->pkey_index = resp_mad_pkey_index; + if (ret & IB_MAD_RESULT_SUCCESS) { if (ret & IB_MAD_RESULT_CONSUMED) goto out; if (ret & IB_MAD_RESULT_REPLY) { - agent_send_response(&response->mad.mad, + agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, port_priv->device, port_num, - qp_info->qp->qp_num); + qp_info->qp->qp_num, + mad_size, opa); goto out; } } } - mad_agent = find_mad_agent(port_priv, &recv->mad.mad); + mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); if (mad_agent) { ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); /* @@ -2065,17 +2285,17 @@ local: */ recv = NULL; } else if ((ret & IB_MAD_RESULT_SUCCESS) && - generate_unmatched_resp(recv, response)) { - agent_send_response(&response->mad.mad, &recv->grh, wc, - port_priv->device, port_num, qp_info->qp->qp_num); + generate_unmatched_resp(recv, response, &mad_size, opa)) { + agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, + port_priv->device, port_num, + qp_info->qp->qp_num, mad_size, opa); } out: /* Post another receive request for this QP */ if (response) { ib_mad_post_receive_mads(qp_info, response); - if (recv) - kmem_cache_free(ib_mad_cache, recv); + kfree(recv); } else ib_mad_post_receive_mads(qp_info, recv); } @@ -2411,7 +2631,8 @@ find_send_wr(struct ib_mad_agent_private *mad_agent_priv, list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, agent_list) { - if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && + if (is_rmpp_data_mad(mad_agent_priv, + mad_send_wr->send_buf.mad) && &mad_send_wr->send_buf == send_buf) return mad_send_wr; } @@ -2468,10 +2689,14 @@ static void local_completions(struct work_struct *work) int free_mad; struct ib_wc wc; struct ib_mad_send_wc mad_send_wc; + bool opa; mad_agent_priv = container_of(work, struct ib_mad_agent_private, local_work); + opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, + mad_agent_priv->qp_info->port_priv->port_num); + spin_lock_irqsave(&mad_agent_priv->lock, flags); while (!list_empty(&mad_agent_priv->local_list)) { local = list_entry(mad_agent_priv->local_list.next, @@ -2481,6 +2706,7 @@ static void local_completions(struct work_struct *work) spin_unlock_irqrestore(&mad_agent_priv->lock, flags); free_mad = 0; if (local->mad_priv) { + u8 base_version; recv_mad_agent = local->recv_mad_agent; if (!recv_mad_agent) { dev_err(&mad_agent_priv->agent.device->dev, @@ -2496,17 +2722,26 @@ static void local_completions(struct work_struct *work) build_smp_wc(recv_mad_agent->agent.qp, (unsigned long) local->mad_send_wr, be16_to_cpu(IB_LID_PERMISSIVE), - 0, recv_mad_agent->agent.port_num, &wc); + local->mad_send_wr->send_wr.wr.ud.pkey_index, + recv_mad_agent->agent.port_num, &wc); local->mad_priv->header.recv_wc.wc = &wc; - local->mad_priv->header.recv_wc.mad_len = - sizeof(struct ib_mad); + + base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; + if (opa && base_version == OPA_MGMT_BASE_VERSION) { + local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; + local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); + } else { + local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); + local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); + } + INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); list_add(&local->mad_priv->header.recv_wc.recv_buf.list, &local->mad_priv->header.recv_wc.rmpp_list); local->mad_priv->header.recv_wc.recv_buf.grh = NULL; local->mad_priv->header.recv_wc.recv_buf.mad = - &local->mad_priv->mad.mad; + (struct ib_mad *)local->mad_priv->mad; if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) snoop_recv(recv_mad_agent->qp_info, &local->mad_priv->header.recv_wc, @@ -2534,7 +2769,7 @@ local_send_completion: spin_lock_irqsave(&mad_agent_priv->lock, flags); atomic_dec(&mad_agent_priv->refcount); if (free_mad) - kmem_cache_free(ib_mad_cache, local->mad_priv); + kfree(local->mad_priv); kfree(local); } spin_unlock_irqrestore(&mad_agent_priv->lock, flags); @@ -2649,7 +2884,6 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_queue *recv_queue = &qp_info->recv_queue; /* Initialize common scatter list fields */ - sg_list.length = sizeof *mad_priv - sizeof mad_priv->header; sg_list.lkey = (*qp_info->port_priv->mr).lkey; /* Initialize common receive WR fields */ @@ -2663,7 +2897,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, mad_priv = mad; mad = NULL; } else { - mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); + mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), + GFP_ATOMIC); if (!mad_priv) { dev_err(&qp_info->port_priv->device->dev, "No memory for receive buffer\n"); @@ -2671,10 +2906,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, break; } } + sg_list.length = mad_priv_dma_size(mad_priv); sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, &mad_priv->grh, - sizeof *mad_priv - - sizeof mad_priv->header, + mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { @@ -2698,10 +2933,9 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, spin_unlock_irqrestore(&recv_queue->lock, flags); ib_dma_unmap_single(qp_info->port_priv->device, mad_priv->header.mapping, - sizeof *mad_priv - - sizeof mad_priv->header, + mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); - kmem_cache_free(ib_mad_cache, mad_priv); + kfree(mad_priv); dev_err(&qp_info->port_priv->device->dev, "ib_post_recv failed: %d\n", ret); break; @@ -2738,10 +2972,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) ib_dma_unmap_single(qp_info->port_priv->device, recv->header.mapping, - sizeof(struct ib_mad_private) - - sizeof(struct ib_mad_private_header), + mad_priv_dma_size(recv), DMA_FROM_DEVICE); - kmem_cache_free(ib_mad_cache, recv); + kfree(recv); } qp_info->recv_queue.count = 0; @@ -2922,6 +3155,14 @@ static int ib_mad_port_open(struct ib_device *device, unsigned long flags; char name[sizeof "ib_mad123"]; int has_smi; + struct ib_cq_init_attr cq_attr = {}; + + if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE)) + return -EFAULT; + + if (WARN_ON(rdma_cap_opa_mad(device, port_num) && + rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE)) + return -EFAULT; /* Create new device info */ port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); @@ -2938,13 +3179,14 @@ static int ib_mad_port_open(struct ib_device *device, init_mad_qp(port_priv, &port_priv->qp_info[1]); cq_size = mad_sendq_size + mad_recvq_size; - has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND; + has_smi = rdma_cap_ib_smi(device, port_num); if (has_smi) cq_size *= 2; + cq_attr.cqe = cq_size; port_priv->cq = ib_create_cq(port_priv->device, ib_mad_thread_completion_handler, - NULL, port_priv, cq_size, 0); + NULL, port_priv, &cq_attr); if (IS_ERR(port_priv->cq)) { dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); ret = PTR_ERR(port_priv->cq); @@ -3057,9 +3299,6 @@ static void ib_mad_init_device(struct ib_device *device) { int start, end, i; - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; - if (device->node_type == RDMA_NODE_IB_SWITCH) { start = 0; end = 0; @@ -3069,6 +3308,9 @@ static void ib_mad_init_device(struct ib_device *device) } for (i = start; i <= end; i++) { + if (!rdma_cap_ib_mad(device, i)) + continue; + if (ib_mad_port_open(device, i)) { dev_err(&device->dev, "Couldn't open port %d\n", i); goto error; @@ -3086,40 +3328,39 @@ error_agent: dev_err(&device->dev, "Couldn't close port %d\n", i); error: - i--; + while (--i >= start) { + if (!rdma_cap_ib_mad(device, i)) + continue; - while (i >= start) { if (ib_agent_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d for agents\n", i); if (ib_mad_port_close(device, i)) dev_err(&device->dev, "Couldn't close port %d\n", i); - i--; } } static void ib_mad_remove_device(struct ib_device *device) { - int i, num_ports, cur_port; - - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; + int start, end, i; if (device->node_type == RDMA_NODE_IB_SWITCH) { - num_ports = 1; - cur_port = 0; + start = 0; + end = 0; } else { - num_ports = device->phys_port_cnt; - cur_port = 1; + start = 1; + end = device->phys_port_cnt; } - for (i = 0; i < num_ports; i++, cur_port++) { - if (ib_agent_port_close(device, cur_port)) + + for (i = start; i <= end; i++) { + if (!rdma_cap_ib_mad(device, i)) + continue; + + if (ib_agent_port_close(device, i)) dev_err(&device->dev, - "Couldn't close port %d for agents\n", - cur_port); - if (ib_mad_port_close(device, cur_port)) - dev_err(&device->dev, "Couldn't close port %d\n", - cur_port); + "Couldn't close port %d for agents\n", i); + if (ib_mad_port_close(device, i)) + dev_err(&device->dev, "Couldn't close port %d\n", i); } } @@ -3131,45 +3372,25 @@ static struct ib_client mad_client = { static int __init ib_mad_init_module(void) { - int ret; - mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE); - ib_mad_cache = kmem_cache_create("ib_mad", - sizeof(struct ib_mad_private), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - if (!ib_mad_cache) { - pr_err("Couldn't create ib_mad cache\n"); - ret = -ENOMEM; - goto error1; - } - INIT_LIST_HEAD(&ib_mad_port_list); if (ib_register_client(&mad_client)) { pr_err("Couldn't register ib_mad client\n"); - ret = -EINVAL; - goto error2; + return -EINVAL; } return 0; - -error2: - kmem_cache_destroy(ib_mad_cache); -error1: - return ret; } static void __exit ib_mad_cleanup_module(void) { ib_unregister_client(&mad_client); - kmem_cache_destroy(ib_mad_cache); } module_init(ib_mad_init_module); diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index d1a0b0ee9444..5be89f98928f 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -41,6 +41,7 @@ #include <linux/workqueue.h> #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> +#include <rdma/opa_smi.h> #define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */ @@ -56,7 +57,7 @@ /* Registration table sizes */ #define MAX_MGMT_CLASS 80 -#define MAX_MGMT_VERSION 8 +#define MAX_MGMT_VERSION 0x83 #define MAX_MGMT_OUI 8 #define MAX_MGMT_VENDOR_RANGE2 (IB_MGMT_CLASS_VENDOR_RANGE2_END - \ IB_MGMT_CLASS_VENDOR_RANGE2_START + 1) @@ -75,12 +76,9 @@ struct ib_mad_private_header { struct ib_mad_private { struct ib_mad_private_header header; + size_t mad_size; struct ib_grh grh; - union { - struct ib_mad mad; - struct ib_rmpp_mad rmpp_mad; - struct ib_smp smp; - } mad; + u8 mad[0]; } __attribute__ ((packed)); struct ib_rmpp_segment { @@ -150,6 +148,7 @@ struct ib_mad_local_private { struct ib_mad_private *mad_priv; struct ib_mad_agent_private *recv_mad_agent; struct ib_mad_send_wr_private *mad_send_wr; + size_t return_wc_byte_len; }; struct ib_mad_mgmt_method_table { @@ -213,8 +212,8 @@ struct ib_mad_port_private { int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); struct ib_mad_send_wr_private * -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_recv_wc *mad_recv_wc); +ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_recv_wc *mad_recv_wc); void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc); diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index f37878c9c06e..382941b46e43 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005 Intel Inc. All rights reserved. * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2014 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -67,6 +68,7 @@ struct mad_rmpp_recv { u8 mgmt_class; u8 class_version; u8 method; + u8 base_version; }; static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) @@ -139,7 +141,8 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, - 0, GFP_KERNEL); + 0, GFP_KERNEL, + IB_MGMT_BASE_VERSION); if (IS_ERR(msg)) return; @@ -165,7 +168,8 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, - hdr_len, 0, GFP_KERNEL); + hdr_len, 0, GFP_KERNEL, + IB_MGMT_BASE_VERSION); if (IS_ERR(msg)) ib_destroy_ah(ah); else { @@ -316,6 +320,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, rmpp_recv->mgmt_class = mad_hdr->mgmt_class; rmpp_recv->class_version = mad_hdr->class_version; rmpp_recv->method = mad_hdr->method; + rmpp_recv->base_version = mad_hdr->base_version; return rmpp_recv; error: kfree(rmpp_recv); @@ -431,14 +436,23 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *rmpp_mad; int hdr_size, data_size, pad; + bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device, + rmpp_recv->agent->qp_info->port_priv->port_num); rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); - data_size = sizeof(struct ib_rmpp_mad) - hdr_size; - pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); - if (pad > IB_MGMT_RMPP_DATA || pad < 0) - pad = 0; + if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) { + data_size = sizeof(struct opa_rmpp_mad) - hdr_size; + pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); + if (pad > OPA_MGMT_RMPP_DATA || pad < 0) + pad = 0; + } else { + data_size = sizeof(struct ib_rmpp_mad) - hdr_size; + pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); + if (pad > IB_MGMT_RMPP_DATA || pad < 0) + pad = 0; + } return hdr_size + rmpp_recv->seg_num * data_size - pad; } @@ -570,13 +584,14 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) if (mad_send_wr->seg_num == 1) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; - paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA - - mad_send_wr->pad; + paylen = (mad_send_wr->send_buf.seg_count * + mad_send_wr->send_buf.seg_rmpp_size) - + mad_send_wr->pad; } if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; - paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad; + paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad; } rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen); diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index fa17b552ff78..1244f02a5c6d 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -780,8 +780,7 @@ static void mcast_event_handler(struct ib_event_handler *handler, int index; dev = container_of(handler, struct mcast_device, event_handler); - if (rdma_port_get_link_layer(dev->device, event->element.port_num) != - IB_LINK_LAYER_INFINIBAND) + if (!rdma_cap_ib_mcast(dev->device, event->element.port_num)) return; index = event->element.port_num - dev->start_port; @@ -808,9 +807,6 @@ static void mcast_add_one(struct ib_device *device) int i; int count = 0; - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; - dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port, GFP_KERNEL); if (!dev) @@ -824,8 +820,7 @@ static void mcast_add_one(struct ib_device *device) } for (i = 0; i <= dev->end_port - dev->start_port; i++) { - if (rdma_port_get_link_layer(device, dev->start_port + i) != - IB_LINK_LAYER_INFINIBAND) + if (!rdma_cap_ib_mcast(device, dev->start_port + i)) continue; port = &dev->port[i]; port->dev = dev; @@ -863,8 +858,7 @@ static void mcast_remove_one(struct ib_device *device) flush_workqueue(mcast_wq); for (i = 0; i <= dev->end_port - dev->start_port; i++) { - if (rdma_port_get_link_layer(device, dev->start_port + i) == - IB_LINK_LAYER_INFINIBAND) { + if (rdma_cap_ib_mcast(device, dev->start_port + i)) { port = &dev->port[i]; deref_port(port); wait_for_completion(&port->comp); diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h new file mode 100644 index 000000000000..62d91bfa4cb7 --- /dev/null +++ b/drivers/infiniband/core/opa_smi.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2014 Intel Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __OPA_SMI_H_ +#define __OPA_SMI_H_ + +#include <rdma/ib_smi.h> +#include <rdma/opa_smi.h> + +#include "smi.h" + +enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, + int port_num, int phys_port_cnt); +int opa_smi_get_fwd_port(struct opa_smp *smp); +extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); +extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, + u8 node_type, int port_num); + +/* + * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM + * via process_mad + */ +static inline enum smi_action opa_smi_check_local_smp(struct opa_smp *smp, + struct ib_device *device) +{ + /* C14-9:3 -- We're at the end of the DR segment of path */ + /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */ + return (device->process_mad && + !opa_get_smp_direction(smp) && + (smp->hop_ptr == smp->hop_cnt + 1)) ? + IB_SMI_HANDLE : IB_SMI_DISCARD; +} + +/* + * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM + * via process_mad + */ +static inline enum smi_action opa_smi_check_local_returning_smp(struct opa_smp *smp, + struct ib_device *device) +{ + /* C14-13:3 -- We're at the end of the DR segment of path */ + /* C14-13:4 -- Hop Pointer == 0 -> give to SM */ + return (device->process_mad && + opa_get_smp_direction(smp) && + !smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD; +} + +#endif /* __OPA_SMI_H_ */ diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index c38f030f0dc9..0fae85062a65 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -450,7 +450,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event struct ib_sa_port *port = &sa_dev->port[event->element.port_num - sa_dev->start_port]; - if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND) + if (!rdma_cap_ib_sa(handler->device, port->port_num)) return; spin_lock_irqsave(&port->ah_lock, flags); @@ -540,7 +540,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, ah_attr->port_num = port_num; ah_attr->static_rate = rec->rate; - force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET; + force_grh = rdma_cap_eth_ah(device, port_num); if (rec->hop_limit > 1 || force_grh) { ah_attr->ah_flags = IB_AH_GRH; @@ -583,7 +583,8 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) query->mad_buf = ib_create_send_mad(query->port->agent, 1, query->sm_ah->pkey_index, 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, - gfp_mask); + gfp_mask, + IB_MGMT_BASE_VERSION); if (IS_ERR(query->mad_buf)) { kref_put(&query->sm_ah->ref, free_sm_ah); return -ENOMEM; @@ -1153,9 +1154,7 @@ static void ib_sa_add_one(struct ib_device *device) { struct ib_sa_device *sa_dev; int s, e, i; - - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; + int count = 0; if (device->node_type == RDMA_NODE_IB_SWITCH) s = e = 0; @@ -1175,7 +1174,7 @@ static void ib_sa_add_one(struct ib_device *device) for (i = 0; i <= e - s; ++i) { spin_lock_init(&sa_dev->port[i].ah_lock); - if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND) + if (!rdma_cap_ib_sa(device, i + 1)) continue; sa_dev->port[i].sm_ah = NULL; @@ -1189,8 +1188,13 @@ static void ib_sa_add_one(struct ib_device *device) goto err; INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); + + count++; } + if (!count) + goto free; + ib_set_client_data(device, &sa_client, sa_dev); /* @@ -1204,19 +1208,20 @@ static void ib_sa_add_one(struct ib_device *device) if (ib_register_event_handler(&sa_dev->event_handler)) goto err; - for (i = 0; i <= e - s; ++i) - if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) + for (i = 0; i <= e - s; ++i) { + if (rdma_cap_ib_sa(device, i + 1)) update_sm_ah(&sa_dev->port[i].update_task); + } return; err: - while (--i >= 0) - if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) + while (--i >= 0) { + if (rdma_cap_ib_sa(device, i + 1)) ib_unregister_mad_agent(sa_dev->port[i].agent); - + } +free: kfree(sa_dev); - return; } @@ -1233,7 +1238,7 @@ static void ib_sa_remove_one(struct ib_device *device) flush_workqueue(ib_wq); for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { - if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) { + if (rdma_cap_ib_sa(device, i + 1)) { ib_unregister_mad_agent(sa_dev->port[i].agent); if (sa_dev->port[i].sm_ah) kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 5855e4405d9b..368a561d1a5d 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -5,6 +5,7 @@ * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. * Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2014 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -38,85 +39,82 @@ #include <rdma/ib_smi.h> #include "smi.h" - -/* - * Fixup a directed route SMP for sending - * Return 0 if the SMP should be discarded - */ -enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, - u8 node_type, int port_num) +#include "opa_smi.h" + +static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, + u8 *hop_ptr, u8 hop_cnt, + const u8 *initial_path, + const u8 *return_path, + u8 direction, + bool dr_dlid_is_permissive, + bool dr_slid_is_permissive) { - u8 hop_ptr, hop_cnt; - - hop_ptr = smp->hop_ptr; - hop_cnt = smp->hop_cnt; - /* See section 14.2.2.2, Vol 1 IB spec */ /* C14-6 -- valid hop_cnt values are from 0 to 63 */ if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) return IB_SMI_DISCARD; - if (!ib_get_smp_direction(smp)) { + if (!direction) { /* C14-9:1 */ - if (hop_cnt && hop_ptr == 0) { - smp->hop_ptr++; - return (smp->initial_path[smp->hop_ptr] == + if (hop_cnt && *hop_ptr == 0) { + (*hop_ptr)++; + return (initial_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:2 */ - if (hop_ptr && hop_ptr < hop_cnt) { + if (*hop_ptr && *hop_ptr < hop_cnt) { if (node_type != RDMA_NODE_IB_SWITCH) return IB_SMI_DISCARD; - /* smp->return_path set when received */ - smp->hop_ptr++; - return (smp->initial_path[smp->hop_ptr] == + /* return_path set when received */ + (*hop_ptr)++; + return (initial_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:3 -- We're at the end of the DR segment of path */ - if (hop_ptr == hop_cnt) { - /* smp->return_path set when received */ - smp->hop_ptr++; + if (*hop_ptr == hop_cnt) { + /* return_path set when received */ + (*hop_ptr)++; return (node_type == RDMA_NODE_IB_SWITCH || - smp->dr_dlid == IB_LID_PERMISSIVE ? + dr_dlid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ /* C14-9:5 -- Fail unreasonable hop pointer */ - return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); + return (*hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); } else { /* C14-13:1 */ - if (hop_cnt && hop_ptr == hop_cnt + 1) { - smp->hop_ptr--; - return (smp->return_path[smp->hop_ptr] == + if (hop_cnt && *hop_ptr == hop_cnt + 1) { + (*hop_ptr)--; + return (return_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:2 */ - if (2 <= hop_ptr && hop_ptr <= hop_cnt) { + if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { if (node_type != RDMA_NODE_IB_SWITCH) return IB_SMI_DISCARD; - smp->hop_ptr--; - return (smp->return_path[smp->hop_ptr] == + (*hop_ptr)--; + return (return_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:3 -- at the end of the DR segment of path */ - if (hop_ptr == 1) { - smp->hop_ptr--; + if (*hop_ptr == 1) { + (*hop_ptr)--; /* C14-13:3 -- SMPs destined for SM shouldn't be here */ return (node_type == RDMA_NODE_IB_SWITCH || - smp->dr_slid == IB_LID_PERMISSIVE ? + dr_slid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */ - if (hop_ptr == 0) + if (*hop_ptr == 0) return IB_SMI_HANDLE; /* C14-13:5 -- Check for unreasonable hop pointer */ @@ -125,105 +123,164 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, } /* - * Adjust information for a received SMP - * Return 0 if the SMP should be dropped + * Fixup a directed route SMP for sending + * Return IB_SMI_DISCARD if the SMP should be discarded */ -enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, - int port_num, int phys_port_cnt) +enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, + u8 node_type, int port_num) { - u8 hop_ptr, hop_cnt; + return __smi_handle_dr_smp_send(node_type, port_num, + &smp->hop_ptr, smp->hop_cnt, + smp->initial_path, + smp->return_path, + ib_get_smp_direction(smp), + smp->dr_dlid == IB_LID_PERMISSIVE, + smp->dr_slid == IB_LID_PERMISSIVE); +} - hop_ptr = smp->hop_ptr; - hop_cnt = smp->hop_cnt; +enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, + u8 node_type, int port_num) +{ + return __smi_handle_dr_smp_send(node_type, port_num, + &smp->hop_ptr, smp->hop_cnt, + smp->route.dr.initial_path, + smp->route.dr.return_path, + opa_get_smp_direction(smp), + smp->route.dr.dr_dlid == + OPA_LID_PERMISSIVE, + smp->route.dr.dr_slid == + OPA_LID_PERMISSIVE); +} +static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, + int phys_port_cnt, + u8 *hop_ptr, u8 hop_cnt, + const u8 *initial_path, + u8 *return_path, + u8 direction, + bool dr_dlid_is_permissive, + bool dr_slid_is_permissive) +{ /* See section 14.2.2.2, Vol 1 IB spec */ /* C14-6 -- valid hop_cnt values are from 0 to 63 */ if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) return IB_SMI_DISCARD; - if (!ib_get_smp_direction(smp)) { + if (!direction) { /* C14-9:1 -- sender should have incremented hop_ptr */ - if (hop_cnt && hop_ptr == 0) + if (hop_cnt && *hop_ptr == 0) return IB_SMI_DISCARD; /* C14-9:2 -- intermediate hop */ - if (hop_ptr && hop_ptr < hop_cnt) { + if (*hop_ptr && *hop_ptr < hop_cnt) { if (node_type != RDMA_NODE_IB_SWITCH) return IB_SMI_DISCARD; - smp->return_path[hop_ptr] = port_num; - /* smp->hop_ptr updated when sending */ - return (smp->initial_path[hop_ptr+1] <= phys_port_cnt ? + return_path[*hop_ptr] = port_num; + /* hop_ptr updated when sending */ + return (initial_path[*hop_ptr+1] <= phys_port_cnt ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:3 -- We're at the end of the DR segment of path */ - if (hop_ptr == hop_cnt) { + if (*hop_ptr == hop_cnt) { if (hop_cnt) - smp->return_path[hop_ptr] = port_num; - /* smp->hop_ptr updated when sending */ + return_path[*hop_ptr] = port_num; + /* hop_ptr updated when sending */ return (node_type == RDMA_NODE_IB_SWITCH || - smp->dr_dlid == IB_LID_PERMISSIVE ? + dr_dlid_is_permissive ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ /* C14-9:5 -- fail unreasonable hop pointer */ - return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); + return (*hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD); } else { /* C14-13:1 */ - if (hop_cnt && hop_ptr == hop_cnt + 1) { - smp->hop_ptr--; - return (smp->return_path[smp->hop_ptr] == + if (hop_cnt && *hop_ptr == hop_cnt + 1) { + (*hop_ptr)--; + return (return_path[*hop_ptr] == port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:2 */ - if (2 <= hop_ptr && hop_ptr <= hop_cnt) { + if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { if (node_type != RDMA_NODE_IB_SWITCH) return IB_SMI_DISCARD; - /* smp->hop_ptr updated when sending */ - return (smp->return_path[hop_ptr-1] <= phys_port_cnt ? + /* hop_ptr updated when sending */ + return (return_path[*hop_ptr-1] <= phys_port_cnt ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:3 -- We're at the end of the DR segment of path */ - if (hop_ptr == 1) { - if (smp->dr_slid == IB_LID_PERMISSIVE) { + if (*hop_ptr == 1) { + if (dr_slid_is_permissive) { /* giving SMP to SM - update hop_ptr */ - smp->hop_ptr--; + (*hop_ptr)--; return IB_SMI_HANDLE; } - /* smp->hop_ptr updated when sending */ + /* hop_ptr updated when sending */ return (node_type == RDMA_NODE_IB_SWITCH ? IB_SMI_HANDLE : IB_SMI_DISCARD); } /* C14-13:4 -- hop_ptr = 0 -> give to SM */ /* C14-13:5 -- Check for unreasonable hop pointer */ - return (hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD); + return (*hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD); } } -enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp) +/* + * Adjust information for a received SMP + * Return IB_SMI_DISCARD if the SMP should be dropped + */ +enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, + int port_num, int phys_port_cnt) { - u8 hop_ptr, hop_cnt; + return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, + &smp->hop_ptr, smp->hop_cnt, + smp->initial_path, + smp->return_path, + ib_get_smp_direction(smp), + smp->dr_dlid == IB_LID_PERMISSIVE, + smp->dr_slid == IB_LID_PERMISSIVE); +} - hop_ptr = smp->hop_ptr; - hop_cnt = smp->hop_cnt; +/* + * Adjust information for a received SMP + * Return IB_SMI_DISCARD if the SMP should be dropped + */ +enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, + int port_num, int phys_port_cnt) +{ + return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, + &smp->hop_ptr, smp->hop_cnt, + smp->route.dr.initial_path, + smp->route.dr.return_path, + opa_get_smp_direction(smp), + smp->route.dr.dr_dlid == + OPA_LID_PERMISSIVE, + smp->route.dr.dr_slid == + OPA_LID_PERMISSIVE); +} - if (!ib_get_smp_direction(smp)) { +static enum smi_forward_action __smi_check_forward_dr_smp(u8 hop_ptr, u8 hop_cnt, + u8 direction, + bool dr_dlid_is_permissive, + bool dr_slid_is_permissive) +{ + if (!direction) { /* C14-9:2 -- intermediate hop */ if (hop_ptr && hop_ptr < hop_cnt) return IB_SMI_FORWARD; /* C14-9:3 -- at the end of the DR segment of path */ if (hop_ptr == hop_cnt) - return (smp->dr_dlid == IB_LID_PERMISSIVE ? + return (dr_dlid_is_permissive ? IB_SMI_SEND : IB_SMI_LOCAL); /* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */ @@ -236,10 +293,29 @@ enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp) /* C14-13:3 -- at the end of the DR segment of path */ if (hop_ptr == 1) - return (smp->dr_slid != IB_LID_PERMISSIVE ? + return (!dr_slid_is_permissive ? IB_SMI_SEND : IB_SMI_LOCAL); } return IB_SMI_LOCAL; + +} + +enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp) +{ + return __smi_check_forward_dr_smp(smp->hop_ptr, smp->hop_cnt, + ib_get_smp_direction(smp), + smp->dr_dlid == IB_LID_PERMISSIVE, + smp->dr_slid == IB_LID_PERMISSIVE); +} + +enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp) +{ + return __smi_check_forward_dr_smp(smp->hop_ptr, smp->hop_cnt, + opa_get_smp_direction(smp), + smp->route.dr.dr_dlid == + OPA_LID_PERMISSIVE, + smp->route.dr.dr_slid == + OPA_LID_PERMISSIVE); } /* @@ -251,3 +327,13 @@ int smi_get_fwd_port(struct ib_smp *smp) return (!ib_get_smp_direction(smp) ? smp->initial_path[smp->hop_ptr+1] : smp->return_path[smp->hop_ptr-1]); } + +/* + * Return the forwarding port number from initial_path for outgoing SMP and + * from return_path for returning SMP + */ +int opa_smi_get_fwd_port(struct opa_smp *smp) +{ + return !opa_get_smp_direction(smp) ? smp->route.dr.initial_path[smp->hop_ptr+1] : + smp->route.dr.return_path[smp->hop_ptr-1]; +} diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index cbd0383f622e..ed6b6c85c334 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -326,6 +326,8 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, int width = (tab_attr->index >> 16) & 0xff; struct ib_mad *in_mad = NULL; struct ib_mad *out_mad = NULL; + size_t mad_size = sizeof(*out_mad); + u16 out_mad_pkey_index = 0; ssize_t ret; if (!p->ibdev->process_mad) @@ -347,7 +349,10 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, in_mad->data[41] = p->port_num; /* PortSelect field */ if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY, - p->port_num, NULL, NULL, in_mad, out_mad) & + p->port_num, NULL, NULL, + (const struct ib_mad_hdr *)in_mad, mad_size, + (struct ib_mad_hdr *)out_mad, &mad_size, + &out_mad_pkey_index) & (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) != (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) { ret = -EINVAL; @@ -456,6 +461,7 @@ static void ib_device_release(struct device *device) { struct ib_device *dev = container_of(device, struct ib_device, dev); + kfree(dev->port_immutable); kfree(dev); } diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index f2f63933e8a9..62c24b1452b8 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1253,8 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device) dev_t base; struct ib_ucm_device *ucm_dev; - if (!device->alloc_ucontext || - rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1)) return; ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 45d67e9228d7..ad45469f7582 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -722,26 +722,13 @@ static ssize_t ucma_query_route(struct ucma_file *file, resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; resp.port_num = ctx->cm_id->port_num; - switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { - case RDMA_TRANSPORT_IB: - switch (rdma_port_get_link_layer(ctx->cm_id->device, - ctx->cm_id->port_num)) { - case IB_LINK_LAYER_INFINIBAND: - ucma_copy_ib_route(&resp, &ctx->cm_id->route); - break; - case IB_LINK_LAYER_ETHERNET: - ucma_copy_iboe_route(&resp, &ctx->cm_id->route); - break; - default: - break; - } - break; - case RDMA_TRANSPORT_IWARP: + + if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) + ucma_copy_ib_route(&resp, &ctx->cm_id->route); + else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) + ucma_copy_iboe_route(&resp, &ctx->cm_id->route); + else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iw_route(&resp, &ctx->cm_id->route); - break; - default: - break; - } out: if (copy_to_user((void __user *)(unsigned long)cmd.response, diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 928cdd20e2d1..35567fffaa4e 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -99,7 +99,6 @@ struct ib_umad_port { }; struct ib_umad_device { - int start_port, end_port; struct kobject kobj; struct ib_umad_port port[0]; }; @@ -263,20 +262,23 @@ static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, { struct ib_mad_recv_buf *recv_buf; int left, seg_payload, offset, max_seg_payload; + size_t seg_size; - /* We need enough room to copy the first (or only) MAD segment. */ recv_buf = &packet->recv_wc->recv_buf; - if ((packet->length <= sizeof (*recv_buf->mad) && + seg_size = packet->recv_wc->mad_seg_size; + + /* We need enough room to copy the first (or only) MAD segment. */ + if ((packet->length <= seg_size && count < hdr_size(file) + packet->length) || - (packet->length > sizeof (*recv_buf->mad) && - count < hdr_size(file) + sizeof (*recv_buf->mad))) + (packet->length > seg_size && + count < hdr_size(file) + seg_size)) return -EINVAL; if (copy_to_user(buf, &packet->mad, hdr_size(file))) return -EFAULT; buf += hdr_size(file); - seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad)); + seg_payload = min_t(int, packet->length, seg_size); if (copy_to_user(buf, recv_buf->mad, seg_payload)) return -EFAULT; @@ -293,7 +295,7 @@ static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, return -ENOSPC; } offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); - max_seg_payload = sizeof (struct ib_mad) - offset; + max_seg_payload = seg_size - offset; for (left = packet->length - seg_payload, buf += seg_payload; left; left -= seg_payload, buf += seg_payload) { @@ -426,11 +428,11 @@ static int is_duplicate(struct ib_umad_file *file, * the same TID, reject the second as a duplicate. This is more * restrictive than required by the spec. */ - if (!ib_response_mad((struct ib_mad *) hdr)) { - if (!ib_response_mad((struct ib_mad *) sent_hdr)) + if (!ib_response_mad(hdr)) { + if (!ib_response_mad(sent_hdr)) return 1; continue; - } else if (!ib_response_mad((struct ib_mad *) sent_hdr)) + } else if (!ib_response_mad(sent_hdr)) continue; if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) @@ -451,6 +453,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, struct ib_rmpp_mad *rmpp_mad; __be64 *tid; int ret, data_len, hdr_len, copy_offset, rmpp_active; + u8 base_version; if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) return -EINVAL; @@ -517,11 +520,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, rmpp_active = 0; } + base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version; data_len = count - hdr_size(file) - hdr_len; packet->msg = ib_create_send_mad(agent, be32_to_cpu(packet->mad.hdr.qpn), packet->mad.hdr.pkey_index, rmpp_active, - hdr_len, data_len, GFP_KERNEL); + hdr_len, data_len, GFP_KERNEL, + base_version); if (IS_ERR(packet->msg)) { ret = PTR_ERR(packet->msg); goto err_ah; @@ -1273,16 +1278,10 @@ static void ib_umad_add_one(struct ib_device *device) { struct ib_umad_device *umad_dev; int s, e, i; + int count = 0; - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; - - if (device->node_type == RDMA_NODE_IB_SWITCH) - s = e = 0; - else { - s = 1; - e = device->phys_port_cnt; - } + s = rdma_start_port(device); + e = rdma_end_port(device); umad_dev = kzalloc(sizeof *umad_dev + (e - s + 1) * sizeof (struct ib_umad_port), @@ -1292,25 +1291,34 @@ static void ib_umad_add_one(struct ib_device *device) kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype); - umad_dev->start_port = s; - umad_dev->end_port = e; - for (i = s; i <= e; ++i) { + if (!rdma_cap_ib_mad(device, i)) + continue; + umad_dev->port[i - s].umad_dev = umad_dev; if (ib_umad_init_port(device, i, umad_dev, &umad_dev->port[i - s])) goto err; + + count++; } + if (!count) + goto free; + ib_set_client_data(device, &umad_client, umad_dev); return; err: - while (--i >= s) - ib_umad_kill_port(&umad_dev->port[i - s]); + while (--i >= s) { + if (!rdma_cap_ib_mad(device, i)) + continue; + ib_umad_kill_port(&umad_dev->port[i - s]); + } +free: kobject_put(&umad_dev->kobj); } @@ -1322,8 +1330,10 @@ static void ib_umad_remove_one(struct ib_device *device) if (!umad_dev) return; - for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) - ib_umad_kill_port(&umad_dev->port[i]); + for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) { + if (rdma_cap_ib_mad(device, i + rdma_start_port(device))) + ib_umad_kill_port(&umad_dev->port[i]); + } kobject_put(&umad_dev->kobj); } diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index b716b0815644..ba365b6d1e8d 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -259,5 +259,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd); IB_UVERBS_DECLARE_EX_CMD(create_flow); IB_UVERBS_DECLARE_EX_CMD(destroy_flow); IB_UVERBS_DECLARE_EX_CMD(query_device); +IB_UVERBS_DECLARE_EX_CMD(create_cq); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index a9f048990dfc..bbb02ffe87df 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1330,40 +1330,37 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, return in_len; } -ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw, + struct ib_uverbs_ex_create_cq *cmd, + size_t cmd_sz, + int (*cb)(struct ib_uverbs_file *file, + struct ib_ucq_object *obj, + struct ib_uverbs_ex_create_cq_resp *resp, + struct ib_udata *udata, + void *context), + void *context) { - struct ib_uverbs_create_cq cmd; - struct ib_uverbs_create_cq_resp resp; - struct ib_udata udata; struct ib_ucq_object *obj; struct ib_uverbs_event_file *ev_file = NULL; struct ib_cq *cq; int ret; + struct ib_uverbs_ex_create_cq_resp resp; + struct ib_cq_init_attr attr = {}; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - INIT_UDATA(&udata, buf + sizeof cmd, - (unsigned long) cmd.response + sizeof resp, - in_len - sizeof cmd, out_len - sizeof resp); - - if (cmd.comp_vector >= file->device->num_comp_vectors) - return -EINVAL; + if (cmd->comp_vector >= file->device->num_comp_vectors) + return ERR_PTR(-EINVAL); obj = kmalloc(sizeof *obj, GFP_KERNEL); if (!obj) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class); + init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class); down_write(&obj->uobject.mutex); - if (cmd.comp_channel >= 0) { - ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel); + if (cmd->comp_channel >= 0) { + ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel); if (!ev_file) { ret = -EINVAL; goto err; @@ -1376,9 +1373,14 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, INIT_LIST_HEAD(&obj->comp_list); INIT_LIST_HEAD(&obj->async_list); - cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, - cmd.comp_vector, - file->ucontext, &udata); + attr.cqe = cmd->cqe; + attr.comp_vector = cmd->comp_vector; + + if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) + attr.flags = cmd->flags; + + cq = file->device->ib_dev->create_cq(file->device->ib_dev, &attr, + file->ucontext, uhw); if (IS_ERR(cq)) { ret = PTR_ERR(cq); goto err_file; @@ -1397,14 +1399,15 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, goto err_free; memset(&resp, 0, sizeof resp); - resp.cq_handle = obj->uobject.id; - resp.cqe = cq->cqe; + resp.base.cq_handle = obj->uobject.id; + resp.base.cqe = cq->cqe; - if (copy_to_user((void __user *) (unsigned long) cmd.response, - &resp, sizeof resp)) { - ret = -EFAULT; - goto err_copy; - } + resp.response_length = offsetof(typeof(resp), response_length) + + sizeof(resp.response_length); + + ret = cb(file, obj, &resp, ucore, context); + if (ret) + goto err_cb; mutex_lock(&file->mutex); list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); @@ -1414,9 +1417,9 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, up_write(&obj->uobject.mutex); - return in_len; + return obj; -err_copy: +err_cb: idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); err_free: @@ -1428,7 +1431,106 @@ err_file: err: put_uobj_write(&obj->uobject); - return ret; + + return ERR_PTR(ret); +} + +static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, + struct ib_ucq_object *obj, + struct ib_uverbs_ex_create_cq_resp *resp, + struct ib_udata *ucore, void *context) +{ + if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) + return -EFAULT; + + return 0; +} + +ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, + const char __user *buf, int in_len, + int out_len) +{ + struct ib_uverbs_create_cq cmd; + struct ib_uverbs_ex_create_cq cmd_ex; + struct ib_uverbs_create_cq_resp resp; + struct ib_udata ucore; + struct ib_udata uhw; + struct ib_ucq_object *obj; + + if (out_len < sizeof(resp)) + return -ENOSPC; + + if (copy_from_user(&cmd, buf, sizeof(cmd))) + return -EFAULT; + + INIT_UDATA(&ucore, buf, cmd.response, sizeof(cmd), sizeof(resp)); + + INIT_UDATA(&uhw, buf + sizeof(cmd), + (unsigned long)cmd.response + sizeof(resp), + in_len - sizeof(cmd), out_len - sizeof(resp)); + + memset(&cmd_ex, 0, sizeof(cmd_ex)); + cmd_ex.user_handle = cmd.user_handle; + cmd_ex.cqe = cmd.cqe; + cmd_ex.comp_vector = cmd.comp_vector; + cmd_ex.comp_channel = cmd.comp_channel; + + obj = create_cq(file, &ucore, &uhw, &cmd_ex, + offsetof(typeof(cmd_ex), comp_channel) + + sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, + NULL); + + if (IS_ERR(obj)) + return PTR_ERR(obj); + + return in_len; +} + +static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, + struct ib_ucq_object *obj, + struct ib_uverbs_ex_create_cq_resp *resp, + struct ib_udata *ucore, void *context) +{ + if (ib_copy_to_udata(ucore, resp, resp->response_length)) + return -EFAULT; + + return 0; +} + +int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw) +{ + struct ib_uverbs_ex_create_cq_resp resp; + struct ib_uverbs_ex_create_cq cmd; + struct ib_ucq_object *obj; + int err; + + if (ucore->inlen < sizeof(cmd)) + return -EINVAL; + + err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + if (err) + return err; + + if (cmd.comp_mask) + return -EINVAL; + + if (cmd.reserved) + return -EINVAL; + + if (ucore->outlen < (offsetof(typeof(resp), response_length) + + sizeof(resp.response_length))) + return -ENOSPC; + + obj = create_cq(file, ucore, uhw, &cmd, + min(ucore->inlen, sizeof(cmd)), + ib_uverbs_ex_create_cq_cb, NULL); + + if (IS_ERR(obj)) + return PTR_ERR(obj); + + return 0; } ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, @@ -3324,7 +3426,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, if (ucore->outlen < resp.response_length) return -ENOSPC; - err = device->query_device(device, &attr); + memset(&attr, 0, sizeof(attr)); + + err = device->query_device(device, &attr, uhw); if (err) return err; @@ -3348,6 +3452,18 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, #endif resp.response_length += sizeof(resp.odp_caps); + if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) + goto end; + + resp.timestamp_mask = attr.timestamp_mask; + resp.response_length += sizeof(resp.timestamp_mask); + + if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) + goto end; + + resp.hca_core_clock = attr.hca_core_clock; + resp.response_length += sizeof(resp.hca_core_clock); + end: err = ib_copy_to_udata(ucore, &resp, resp.response_length); if (err) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 88cce9bb72fe..f6eef2da7097 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -124,6 +124,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device, + [IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq, }; static void ib_uverbs_add_one(struct ib_device *device); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index f93eb8da7b5a..bac3fb406a74 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -48,6 +48,71 @@ #include "core_priv.h" +static const char * const ib_events[] = { + [IB_EVENT_CQ_ERR] = "CQ error", + [IB_EVENT_QP_FATAL] = "QP fatal error", + [IB_EVENT_QP_REQ_ERR] = "QP request error", + [IB_EVENT_QP_ACCESS_ERR] = "QP access error", + [IB_EVENT_COMM_EST] = "communication established", + [IB_EVENT_SQ_DRAINED] = "send queue drained", + [IB_EVENT_PATH_MIG] = "path migration successful", + [IB_EVENT_PATH_MIG_ERR] = "path migration error", + [IB_EVENT_DEVICE_FATAL] = "device fatal error", + [IB_EVENT_PORT_ACTIVE] = "port active", + [IB_EVENT_PORT_ERR] = "port error", + [IB_EVENT_LID_CHANGE] = "LID change", + [IB_EVENT_PKEY_CHANGE] = "P_key change", + [IB_EVENT_SM_CHANGE] = "SM change", + [IB_EVENT_SRQ_ERR] = "SRQ error", + [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", + [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", + [IB_EVENT_CLIENT_REREGISTER] = "client reregister", + [IB_EVENT_GID_CHANGE] = "GID changed", +}; + +const char *ib_event_msg(enum ib_event_type event) +{ + size_t index = event; + + return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? + ib_events[index] : "unrecognized event"; +} +EXPORT_SYMBOL(ib_event_msg); + +static const char * const wc_statuses[] = { + [IB_WC_SUCCESS] = "success", + [IB_WC_LOC_LEN_ERR] = "local length error", + [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", + [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", + [IB_WC_LOC_PROT_ERR] = "local protection error", + [IB_WC_WR_FLUSH_ERR] = "WR flushed", + [IB_WC_MW_BIND_ERR] = "memory management operation error", + [IB_WC_BAD_RESP_ERR] = "bad response error", + [IB_WC_LOC_ACCESS_ERR] = "local access error", + [IB_WC_REM_INV_REQ_ERR] = "invalid request error", + [IB_WC_REM_ACCESS_ERR] = "remote access error", + [IB_WC_REM_OP_ERR] = "remote operation error", + [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", + [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", + [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", + [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", + [IB_WC_REM_ABORT_ERR] = "operation aborted", + [IB_WC_INV_EECN_ERR] = "invalid EE context number", + [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", + [IB_WC_FATAL_ERR] = "fatal error", + [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", + [IB_WC_GENERAL_ERR] = "general error", +}; + +const char *ib_wc_status_msg(enum ib_wc_status status) +{ + size_t index = status; + + return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? + wc_statuses[index] : "unrecognized status"; +} +EXPORT_SYMBOL(ib_wc_status_msg); + __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) { switch (rate) { @@ -192,17 +257,16 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) } EXPORT_SYMBOL(ib_create_ah); -int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, - struct ib_grh *grh, struct ib_ah_attr *ah_attr) +int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, + const struct ib_wc *wc, const struct ib_grh *grh, + struct ib_ah_attr *ah_attr) { u32 flow_class; u16 gid_index; int ret; - int is_eth = (rdma_port_get_link_layer(device, port_num) == - IB_LINK_LAYER_ETHERNET); memset(ah_attr, 0, sizeof *ah_attr); - if (is_eth) { + if (rdma_cap_eth_ah(device, port_num)) { if (!(wc->wc_flags & IB_WC_GRH)) return -EPROTOTYPE; @@ -244,8 +308,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, } EXPORT_SYMBOL(ib_init_ah_from_wc); -struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, - struct ib_grh *grh, u8 port_num) +struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, + const struct ib_grh *grh, u8 port_num) { struct ib_ah_attr ah_attr; int ret; @@ -871,7 +935,7 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp, union ib_gid sgid; if ((*qp_attr_mask & IB_QP_AV) && - (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) { + (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) { ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num, qp_attr->ah_attr.grh.sgid_index, &sgid); if (ret) @@ -1012,11 +1076,12 @@ EXPORT_SYMBOL(ib_destroy_qp); struct ib_cq *ib_create_cq(struct ib_device *device, ib_comp_handler comp_handler, void (*event_handler)(struct ib_event *, void *), - void *cq_context, int cqe, int comp_vector) + void *cq_context, + const struct ib_cq_init_attr *cq_attr) { struct ib_cq *cq; - cq = device->create_cq(device, cqe, comp_vector, NULL, NULL); + cq = device->create_cq(device, cq_attr, NULL, NULL); if (!IS_ERR(cq)) { cq->device = device; diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index bdf3507810cb..25c3f0085563 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -63,13 +63,16 @@ #include "c2_provider.h" #include "c2_user.h" -static int c2_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) +static int c2_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw) { struct c2_dev *c2dev = to_c2dev(ibdev); pr_debug("%s:%u\n", __func__, __LINE__); + if (uhw->inlen || uhw->outlen) + return -EINVAL; + *props = c2dev->props; return 0; } @@ -286,13 +289,18 @@ static int c2_destroy_qp(struct ib_qp *ib_qp) return 0; } -static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector, +static struct ib_cq *c2_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata) { + int entries = attr->cqe; struct c2_cq *cq; int err; + if (attr->flags) + return ERR_PTR(-EINVAL); + cq = kmalloc(sizeof(*cq), GFP_KERNEL); if (!cq) { pr_debug("%s: Unable to allocate CQ\n", __func__); @@ -582,9 +590,13 @@ static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) static int c2_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, - struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, + size_t in_mad_size, + struct ib_mad_hdr *out_mad, + size_t *out_mad_size, + u16 *out_mad_pkey_index) { pr_debug("%s:%u\n", __func__, __LINE__); return -ENOSYS; @@ -757,6 +769,23 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) return netdev; } +static int c2_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = c2_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; + + return 0; +} + int c2_register_device(struct c2_dev *dev) { int ret = -ENOMEM; @@ -820,6 +849,7 @@ int c2_register_device(struct c2_dev *dev) dev->ibdev.reg_phys_mr = c2_reg_phys_mr; dev->ibdev.reg_user_mr = c2_reg_user_mr; dev->ibdev.dereg_mr = c2_dereg_mr; + dev->ibdev.get_port_immutable = c2_port_immutable; dev->ibdev.alloc_fmr = NULL; dev->ibdev.unmap_fmr = NULL; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 811b24a539c0..b1b73232f217 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -85,9 +85,13 @@ static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) static int iwch_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, - struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, + size_t in_mad_size, + struct ib_mad_hdr *out_mad, + size_t *out_mad_size, + u16 *out_mad_pkey_index) { return -ENOSYS; } @@ -138,10 +142,12 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq) return 0; } -static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, - struct ib_ucontext *ib_context, - struct ib_udata *udata) +static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *ib_context, + struct ib_udata *udata) { + int entries = attr->cqe; struct iwch_dev *rhp; struct iwch_cq *chp; struct iwch_create_cq_resp uresp; @@ -151,6 +157,9 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve size_t resplen; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); + if (attr->flags) + return ERR_PTR(-EINVAL); + rhp = to_iwch_dev(ibdev); chp = kzalloc(sizeof(*chp), GFP_KERNEL); if (!chp) @@ -1145,13 +1154,17 @@ static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev) (fw_mic & 0xffff); } -static int iwch_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) +static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw) { struct iwch_dev *dev; + PDBG("%s ibdev %p\n", __func__, ibdev); + if (uhw->inlen || uhw->outlen) + return -EINVAL; + dev = to_iwch_dev(ibdev); memset(props, 0, sizeof *props); memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); @@ -1343,6 +1356,23 @@ static struct device_attribute *iwch_class_attributes[] = { &dev_attr_board_id, }; +static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = iwch_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; + + return 0; +} + int iwch_register_device(struct iwch_dev *dev) { int ret; @@ -1420,6 +1450,7 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.post_recv = iwch_post_receive; dev->ibdev.get_protocol_stats = iwch_get_mib; dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; + dev->ibdev.get_port_immutable = iwch_port_immutable; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); if (!dev->ibdev.iwcm) diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 68ddb3710215..c7aab48f07cd 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -156,19 +156,17 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, goto err4; cq->gen = 1; + cq->gts = rdev->lldi.gts_reg; cq->rdev = rdev; - if (user) { - u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK; - cq->ugts = (u64)rdev->bar2_pa + off; - } else if (is_t4(rdev->lldi.adapter_type)) { - cq->gts = rdev->lldi.gts_reg; - cq->qid_mask = -1U; - } else { - u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12; - - cq->gts = rdev->bar2_kva + off; - cq->qid_mask = rdev->qpmask; + cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, + &cq->bar2_qid, + user ? &cq->bar2_pa : NULL); + if (user && !cq->bar2_va) { + pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", + pci_name(rdev->lldi.pdev), cq->cqid); + ret = -EINVAL; + goto err4; } return 0; err4: @@ -866,10 +864,13 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq) return 0; } -struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, - int vector, struct ib_ucontext *ib_context, +struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *ib_context, struct ib_udata *udata) { + int entries = attr->cqe; + int vector = attr->comp_vector; struct c4iw_dev *rhp; struct c4iw_cq *chp; struct c4iw_create_cq_resp uresp; @@ -879,6 +880,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, struct c4iw_mm_entry *mm, *mm2; PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); + if (attr->flags) + return ERR_PTR(-EINVAL); rhp = to_c4iw_dev(ibdev); @@ -971,7 +974,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, insert_mmap(ucontext, mm); mm2->key = uresp.gts_key; - mm2->addr = chp->cq.ugts; + mm2->addr = chp->cq.bar2_pa; mm2->len = PAGE_SIZE; insert_mmap(ucontext, mm2); } diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 7e895d714b19..1a297391b54c 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -795,13 +795,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) goto err1; } - /* - * qpshift is the number of bits to shift the qpid left in order - * to get the correct address of the doorbell for that qp. - */ - rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density); rdev->qpmask = rdev->lldi.udb_density - 1; - rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density); rdev->cqmask = rdev->lldi.ucq_density - 1; PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d " "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x " @@ -815,14 +809,12 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.start, rdev->lldi.vr->cq.size); - PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " - "qpmask 0x%x cqshift %lu cqmask 0x%x\n", + PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p " + "qpmask 0x%x cqmask 0x%x\n", (unsigned)pci_resource_len(rdev->lldi.pdev, 2), (void *)pci_resource_start(rdev->lldi.pdev, 2), - rdev->lldi.db_reg, - rdev->lldi.gts_reg, - rdev->qpshift, rdev->qpmask, - rdev->cqshift, rdev->cqmask); + rdev->lldi.db_reg, rdev->lldi.gts_reg, + rdev->qpmask, rdev->cqmask); if (c4iw_num_stags(rdev) == 0) { err = -EINVAL; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 97bb5550a6cf..cc77844fada3 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -165,9 +165,7 @@ struct wr_log_entry { struct c4iw_rdev { struct c4iw_resource resource; - unsigned long qpshift; u32 qpmask; - unsigned long cqshift; u32 cqmask; struct c4iw_dev_ucontext uctx; struct gen_pool *pbl_pool; @@ -992,10 +990,10 @@ int c4iw_reregister_phys_mem(struct ib_mr *mr, int acc, u64 *iova_start); int c4iw_dereg_mr(struct ib_mr *ib_mr); int c4iw_destroy_cq(struct ib_cq *ib_cq); -struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, - int vector, - struct ib_ucontext *ib_context, - struct ib_udata *udata); +struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *ib_context, + struct ib_udata *udata); int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int c4iw_destroy_qp(struct ib_qp *ib_qp); @@ -1032,6 +1030,9 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe); extern struct cxgb4_client t4c_client; extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; +void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid, + enum cxgb4_bar2_qtype qtype, + unsigned int *pbar2_qid, u64 *pbar2_pa); extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe); extern int c4iw_wr_log; extern int db_fc_threshold; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 66bd6a2ad83b..62c816af46e4 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -80,9 +80,13 @@ static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) } static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags, - u8 port_num, struct ib_wc *in_wc, - struct ib_grh *in_grh, struct ib_mad *in_mad, - struct ib_mad *out_mad) + u8 port_num, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, + size_t in_mad_size, + struct ib_mad_hdr *out_mad, + size_t *out_mad_size, + u16 *out_mad_pkey_index) { return -ENOSYS; } @@ -301,13 +305,17 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, return 0; } -static int c4iw_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) +static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw) { struct c4iw_dev *dev; + PDBG("%s ibdev %p\n", __func__, ibdev); + if (uhw->inlen || uhw->outlen) + return -EINVAL; + dev = to_c4iw_dev(ibdev); memset(props, 0, sizeof *props); memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); @@ -465,6 +473,23 @@ static struct device_attribute *c4iw_class_attributes[] = { &dev_attr_board_id, }; +static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = c4iw_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; + + return 0; +} + int c4iw_register_device(struct c4iw_dev *dev) { int ret; @@ -542,6 +567,7 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.post_recv = c4iw_post_receive; dev->ibdev.get_protocol_stats = c4iw_get_mib; dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; + dev->ibdev.get_port_immutable = c4iw_port_immutable; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); if (!dev->ibdev.iwcm) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 389ced335bc5..6517e1208ccb 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -165,6 +165,29 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, return 0; } +/* + * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL, + * then this is a user mapping so compute the page-aligned physical address + * for mapping. + */ +void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid, + enum cxgb4_bar2_qtype qtype, + unsigned int *pbar2_qid, u64 *pbar2_pa) +{ + u64 bar2_qoffset; + int ret; + + ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype, + pbar2_pa ? 1 : 0, + &bar2_qoffset, pbar2_qid); + if (ret) + return NULL; + + if (pbar2_pa) + *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; + return rdev->bar2_kva + bar2_qoffset; +} + static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx) @@ -236,25 +259,23 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); wq->db = rdev->lldi.db_reg; - wq->gts = rdev->lldi.gts_reg; - if (user || is_t5(rdev->lldi.adapter_type)) { - u32 off; - off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK; - if (user) { - wq->sq.udb = (u64 __iomem *)(rdev->bar2_pa + off); - } else { - off += 128 * (wq->sq.qid & rdev->qpmask) + 8; - wq->sq.udb = (u64 __iomem *)(rdev->bar2_kva + off); - } - off = (wq->rq.qid << rdev->qpshift) & PAGE_MASK; - if (user) { - wq->rq.udb = (u64 __iomem *)(rdev->bar2_pa + off); - } else { - off += 128 * (wq->rq.qid & rdev->qpmask) + 8; - wq->rq.udb = (u64 __iomem *)(rdev->bar2_kva + off); - } + wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS, + &wq->sq.bar2_qid, + user ? &wq->sq.bar2_pa : NULL); + wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS, + &wq->rq.bar2_qid, + user ? &wq->rq.bar2_pa : NULL); + + /* + * User mode must have bar2 access. + */ + if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) { + pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", + pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); + goto free_dma; } + wq->rdev = rdev; wq->rq.msn = 1; @@ -336,10 +357,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, if (ret) goto free_dma; - PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%lx rqudb 0x%lx\n", + PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n", __func__, wq->sq.qid, wq->rq.qid, wq->db, - (__force unsigned long) wq->sq.udb, - (__force unsigned long) wq->rq.udb); + wq->sq.bar2_va, wq->rq.bar2_va); return 0; free_dma: @@ -1766,11 +1786,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); insert_mmap(ucontext, mm2); mm3->key = uresp.sq_db_gts_key; - mm3->addr = (__force unsigned long)qhp->wq.sq.udb; + mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa; mm3->len = PAGE_SIZE; insert_mmap(ucontext, mm3); mm4->key = uresp.rq_db_gts_key; - mm4->addr = (__force unsigned long)qhp->wq.rq.udb; + mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa; mm4->len = PAGE_SIZE; insert_mmap(ucontext, mm4); if (mm5) { diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 7f2a6c244d25..274a7ab13bef 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -33,6 +33,7 @@ #include "t4_hw.h" #include "t4_regs.h" +#include "t4_values.h" #include "t4_msg.h" #include "t4fw_ri_api.h" @@ -290,8 +291,10 @@ struct t4_sq { unsigned long phys_addr; struct t4_swsqe *sw_sq; struct t4_swsqe *oldest_read; - u64 __iomem *udb; + void __iomem *bar2_va; + u64 bar2_pa; size_t memsize; + u32 bar2_qid; u32 qid; u16 in_use; u16 size; @@ -314,8 +317,10 @@ struct t4_rq { dma_addr_t dma_addr; DEFINE_DMA_UNMAP_ADDR(mapping); struct t4_swrqe *sw_rq; - u64 __iomem *udb; + void __iomem *bar2_va; + u64 bar2_pa; size_t memsize; + u32 bar2_qid; u32 qid; u32 msn; u32 rqt_hwaddr; @@ -332,7 +337,6 @@ struct t4_wq { struct t4_sq sq; struct t4_rq rq; void __iomem *db; - void __iomem *gts; struct c4iw_rdev *rdev; int flushed; }; @@ -457,15 +461,18 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, /* Flush host queue memory writes. */ wmb(); - if (t5) { - if (inc == 1 && wqe) { + if (wq->sq.bar2_va) { + if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) { PDBG("%s: WC wq->sq.pidx = %d\n", __func__, wq->sq.pidx); - pio_copy(wq->sq.udb + 7, (void *)wqe); + pio_copy((u64 __iomem *) + (wq->sq.bar2_va + SGE_UDB_WCDOORBELL), + (u64 *)wqe); } else { PDBG("%s: DB wq->sq.pidx = %d\n", __func__, wq->sq.pidx); - writel(PIDX_T5_V(inc), wq->sq.udb); + writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid), + wq->sq.bar2_va + SGE_UDB_KDOORBELL); } /* Flush user doorbell area writes. */ @@ -481,15 +488,18 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, /* Flush host queue memory writes. */ wmb(); - if (t5) { - if (inc == 1 && wqe) { + if (wq->rq.bar2_va) { + if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) { PDBG("%s: WC wq->rq.pidx = %d\n", __func__, wq->rq.pidx); - pio_copy(wq->rq.udb + 7, (void *)wqe); + pio_copy((u64 __iomem *) + (wq->rq.bar2_va + SGE_UDB_WCDOORBELL), + (void *)wqe); } else { PDBG("%s: DB wq->rq.pidx = %d\n", __func__, wq->rq.pidx); - writel(PIDX_T5_V(inc), wq->rq.udb); + writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid), + wq->rq.bar2_va + SGE_UDB_KDOORBELL); } /* Flush user doorbell area writes. */ @@ -534,8 +544,10 @@ struct t4_cq { DEFINE_DMA_UNMAP_ADDR(mapping); struct t4_cqe *sw_queue; void __iomem *gts; + void __iomem *bar2_va; + u64 bar2_pa; + u32 bar2_qid; struct c4iw_rdev *rdev; - u64 ugts; size_t memsize; __be64 bits_type_ts; u32 cqid; @@ -552,6 +564,15 @@ struct t4_cq { unsigned long flags; }; +static inline void write_gts(struct t4_cq *cq, u32 val) +{ + if (cq->bar2_va) + writel(val | INGRESSQID_V(cq->bar2_qid), + cq->bar2_va + SGE_UDB_GTS); + else + writel(val | INGRESSQID_V(cq->cqid), cq->gts); +} + static inline int t4_clear_cq_armed(struct t4_cq *cq) { return test_and_clear_bit(CQ_ARMED, &cq->flags); @@ -563,14 +584,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) set_bit(CQ_ARMED, &cq->flags); while (cq->cidx_inc > CIDXINC_M) { - val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | - INGRESSQID_V(cq->cqid & cq->qid_mask); - writel(val, cq->gts); + val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7); + write_gts(cq, val); cq->cidx_inc -= CIDXINC_M; } - val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | - INGRESSQID_V(cq->cqid & cq->qid_mask); - writel(val, cq->gts); + val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6); + write_gts(cq, val); cq->cidx_inc = 0; return 0; } @@ -601,9 +620,8 @@ static inline void t4_hwcq_consume(struct t4_cq *cq) if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) { u32 val; - val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | - INGRESSQID_V(cq->cqid & cq->qid_mask); - writel(val, cq->gts); + val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7); + write_gts(cq, val); cq->cidx_inc = 0; } if (++cq->cidx == cq->size) { diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 8cc837537768..9b68b175069b 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c @@ -113,10 +113,12 @@ struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) return ret; } -struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, +struct ib_cq *ehca_create_cq(struct ib_device *device, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata) { + int cqe = attr->cqe; static const u32 additional_cqe = 20; struct ib_cq *cq; struct ehca_cq *my_cq; @@ -131,6 +133,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, int ipz_rc, i; unsigned long flags; + if (attr->flags) + return ERR_PTR(-EINVAL); + if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 9ed4d2588304..e8b1bb65797a 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c @@ -50,7 +50,8 @@ static unsigned int limit_uint(unsigned int value) return min_t(unsigned int, value, INT_MAX); } -int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) +int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw) { int i, ret = 0; struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, @@ -71,6 +72,9 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) IB_DEVICE_PORT_ACTIVE_EVENT, HCA_CAP_PORT_ACTIVE_EVENT, }; + if (uhw->inlen || uhw->outlen) + return -EINVAL; + rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); if (!rblock) { ehca_err(&shca->ib_device, "Can't allocate rblock memory."); diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 22f79afa7fc1..80e6a3d5df3e 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h @@ -44,11 +44,15 @@ #include "ehca_classes.h" -int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props); +int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw); int ehca_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props); +enum rdma_protocol_type +ehca_query_protocol(struct ib_device *device, u8 port_num); + int ehca_query_sma_attr(struct ehca_shca *shca, u8 port, struct ehca_sma_attr *attr); @@ -126,7 +130,8 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq); void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); -struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, +struct ib_cq *ehca_create_cq(struct ib_device *device, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata); @@ -188,9 +193,10 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context); int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, - struct ib_mad *out_mad); + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index); void ehca_poll_eqs(unsigned long data); diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index cd8d290a09fc..8246418cd4e0 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -46,6 +46,7 @@ #include <linux/notifier.h> #include <linux/memory.h> +#include <rdma/ib_mad.h> #include "ehca_classes.h" #include "ehca_iverbs.h" #include "ehca_mrmw.h" @@ -431,6 +432,24 @@ init_node_guid1: return ret; } +static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ehca_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} + static int ehca_init_device(struct ehca_shca *shca) { int ret; @@ -510,6 +529,7 @@ static int ehca_init_device(struct ehca_shca *shca) shca->ib_device.process_mad = ehca_process_mad; shca->ib_device.mmap = ehca_mmap; shca->ib_device.dma_ops = &ehca_dma_mapping_ops; + shca->ib_device.get_port_immutable = ehca_port_immutable; if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { shca->ib_device.uverbs_cmd_mask |= @@ -534,6 +554,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) struct ib_cq *ibcq; struct ib_qp *ibqp; struct ib_qp_init_attr qp_init_attr; + struct ib_cq_init_attr cq_attr = {}; int ret; if (sport->ibcq_aqp1) { @@ -541,7 +562,9 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) return -EPERM; } - ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0); + cq_attr.cqe = 10; + ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), + &cq_attr); if (IS_ERR(ibcq)) { ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); return PTR_ERR(ibcq); diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c index dba8f9f8b996..12b5bc23832b 100644 --- a/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/drivers/infiniband/hw/ehca/ehca_sqp.c @@ -140,10 +140,10 @@ struct vertcfl { } __attribute__ ((packed)); static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad) { - struct ib_perf *in_perf = (struct ib_perf *)in_mad; + const struct ib_perf *in_perf = (const struct ib_perf *)in_mad; struct ib_perf *out_perf = (struct ib_perf *)out_mad; struct ib_class_port_info *poi = (struct ib_class_port_info *)out_perf->data; @@ -187,8 +187,8 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, /* if request was globally routed, copy route info */ if (in_grh) { - struct vertcfl *vertcfl = - (struct vertcfl *)&in_grh->version_tclass_flow; + const struct vertcfl *vertcfl = + (const struct vertcfl *)&in_grh->version_tclass_flow; memcpy(poi->redirect_gid, in_grh->dgid.raw, sizeof(poi->redirect_gid)); tcslfl->tc = vertcfl->tc; @@ -217,10 +217,17 @@ perf_reply: } int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) { int ret; + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + + BUG_ON(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad)); if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc) return IB_MAD_RESULT_FAILURE; diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig index 1d9bb115cbf6..8fe54ff00580 100644 --- a/drivers/infiniband/hw/ipath/Kconfig +++ b/drivers/infiniband/hw/ipath/Kconfig @@ -9,3 +9,6 @@ config INFINIBAND_IPATH as IP-over-InfiniBand as well as with userspace applications (in conjunction with InfiniBand userspace access). For QLogic PCIe QLE based cards, use the QIB driver instead. + + If you have this hardware you will need to boot with PAT disabled + on your x86-64 systems, use the nopat kernel parameter. diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index 0416c6c0e126..e9dd9112e718 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c @@ -188,7 +188,7 @@ static void send_complete(unsigned long data) /** * ipath_create_cq - create a completion queue * @ibdev: the device this completion queue is attached to - * @entries: the minimum size of the completion queue + * @attr: creation attributes * @context: unused by the InfiniPath driver * @udata: unused by the InfiniPath driver * @@ -197,16 +197,21 @@ static void send_complete(unsigned long data) * * Called by ib_create_cq() in the generic verbs code. */ -struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, +struct ib_cq *ipath_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata) { + int entries = attr->cqe; struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cq *cq; struct ipath_cq_wc *wc; struct ib_cq *ret; u32 sz; + if (attr->flags) + return ERR_PTR(-EINVAL); + if (entries < 1 || entries > ib_ipath_max_cqes) { ret = ERR_PTR(-EINVAL); goto done; diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index bd0caedafe99..2d7e503d13cb 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -42,6 +42,9 @@ #include <linux/bitmap.h> #include <linux/slab.h> #include <linux/module.h> +#ifdef CONFIG_X86_64 +#include <asm/pat.h> +#endif #include "ipath_kernel.h" #include "ipath_verbs.h" @@ -395,6 +398,14 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) unsigned long long addr; u32 bar0 = 0, bar1 = 0; +#ifdef CONFIG_X86_64 + if (WARN(pat_enabled(), + "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { + ret = -ENODEV; + goto bail; + } +#endif + dd = ipath_alloc_devdata(pdev); if (IS_ERR(dd)) { ret = PTR_ERR(dd); @@ -542,6 +553,7 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dd->ipath_kregbase = __ioremap(addr, len, (_PAGE_NO_CACHE|_PAGE_WRITETHRU)); #else + /* XXX: split this properly to enable on PAT */ dd->ipath_kregbase = ioremap_nocache(addr, len); #endif @@ -587,12 +599,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ret = ipath_enable_wc(dd); - if (ret) { - ipath_dev_err(dd, "Write combining not enabled " - "(err %d): performance may be poor\n", - -ret); + if (ret) ret = 0; - } ipath_verify_pioperf(dd); diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index e08db7020cd4..f0f947122779 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h @@ -463,9 +463,7 @@ struct ipath_devdata { /* offset in HT config space of slave/primary interface block */ u8 ipath_ht_slave_off; /* for write combining settings */ - unsigned long ipath_wc_cookie; - unsigned long ipath_wc_base; - unsigned long ipath_wc_len; + int wc_cookie; /* ref count for each pkey */ atomic_t ipath_pkeyrefs[4]; /* shadow copy of struct page *'s for exp tid pages */ diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index e890e5ba0e01..948188e37f95 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c @@ -1257,7 +1257,7 @@ static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp, } static int process_subn(struct ib_device *ibdev, int mad_flags, - u8 port_num, struct ib_mad *in_mad, + u8 port_num, const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_smp *smp = (struct ib_smp *)out_mad; @@ -1389,7 +1389,7 @@ bail: } static int process_perf(struct ib_device *ibdev, u8 port_num, - struct ib_mad *in_mad, + const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; @@ -1490,10 +1490,17 @@ bail: * This is called by the ib_mad module. */ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) { int ret; + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + + BUG_ON(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad)); switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 44ea9390417c..48253b839a6f 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1495,11 +1495,14 @@ bail: return 0; } -static int ipath_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) +static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw) { struct ipath_ibdev *dev = to_idev(ibdev); + if (uhw->inlen || uhw->outlen) + return -EINVAL; + memset(props, 0, sizeof(*props)); props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | @@ -1980,6 +1983,24 @@ static int disable_timer(struct ipath_devdata *dd) return 0; } +static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ipath_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} + /** * ipath_register_ib_device - register our device with the infiniband core * @dd: the device data structure @@ -2179,6 +2200,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) dev->process_mad = ipath_process_mad; dev->mmap = ipath_mmap; dev->dma_ops = &ipath_dma_mapping_ops; + dev->get_port_immutable = ipath_port_immutable; snprintf(dev->node_desc, sizeof(dev->node_desc), IPATH_IDSTR " %s", init_utsname()->nodename); diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index ae6cff4abffc..ec167e545e15 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h @@ -701,9 +701,11 @@ static inline void ipath_schedule_send(struct ipath_qp *qp) int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, - struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad); + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index); /* * Compare the lower 24 bits of the two values. @@ -807,7 +809,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); -struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, +struct ib_cq *ipath_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c index 4ad0b932df1f..7b6e4c843e19 100644 --- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c +++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c @@ -37,7 +37,6 @@ */ #include <linux/pci.h> -#include <asm/mtrr.h> #include <asm/processor.h> #include "ipath_kernel.h" @@ -122,27 +121,14 @@ int ipath_enable_wc(struct ipath_devdata *dd) } if (!ret) { - int cookie; - ipath_cdbg(VERBOSE, "Setting mtrr for chip to WC " - "(addr %llx, len=0x%llx)\n", - (unsigned long long) pioaddr, - (unsigned long long) piolen); - cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); - if (cookie < 0) { - { - dev_info(&dd->pcidev->dev, - "mtrr_add() WC for PIO bufs " - "failed (%d)\n", - cookie); - ret = -EINVAL; - } - } else { - ipath_cdbg(VERBOSE, "Set mtrr for chip to WC, " - "cookie is %d\n", cookie); - dd->ipath_wc_cookie = cookie; - dd->ipath_wc_base = (unsigned long) pioaddr; - dd->ipath_wc_len = (unsigned long) piolen; - } + dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen); + if (dd->wc_cookie < 0) { + ipath_dev_err(dd, "Seting mtrr failed on PIO buffers\n"); + ret = -ENODEV; + } else if (dd->wc_cookie == 0) + ipath_cdbg(VERBOSE, "Set mtrr for chip to WC not needed\n"); + else + ipath_cdbg(VERBOSE, "Set mtrr for chip to WC\n"); } return ret; @@ -154,16 +140,5 @@ int ipath_enable_wc(struct ipath_devdata *dd) */ void ipath_disable_wc(struct ipath_devdata *dd) { - if (dd->ipath_wc_cookie) { - int r; - ipath_cdbg(VERBOSE, "undoing WCCOMB on pio buffers\n"); - r = mtrr_del(dd->ipath_wc_cookie, dd->ipath_wc_base, - dd->ipath_wc_len); - if (r < 0) - dev_info(&dd->pcidev->dev, - "mtrr_del(%lx, %lx, %lx) failed: %d\n", - dd->ipath_wc_cookie, dd->ipath_wc_base, - dd->ipath_wc_len, r); - dd->ipath_wc_cookie = 0; /* even on failure */ - } + arch_phys_wc_del(dd->wc_cookie); } diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 0176caa5792c..36eb3d012b6d 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -166,10 +166,14 @@ err_buf: return err; } -struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, +#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION +struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata) { + int entries = attr->cqe; + int vector = attr->comp_vector; struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_cq *cq; struct mlx4_uar *uar; @@ -178,6 +182,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector if (entries < 1 || entries > dev->dev->caps.max_cqes) return ERR_PTR(-EINVAL); + if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED) + return ERR_PTR(-EINVAL); + cq = kmalloc(sizeof *cq, GFP_KERNEL); if (!cq) return ERR_PTR(-ENOMEM); @@ -188,6 +195,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector spin_lock_init(&cq->lock); cq->resize_buf = NULL; cq->resize_umem = NULL; + cq->create_flags = attr->flags; INIT_LIST_HEAD(&cq->send_qp_list); INIT_LIST_HEAD(&cq->recv_qp_list); @@ -231,7 +239,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector vector = dev->eq_table[vector % ibdev->num_comp_vectors]; err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, - cq->db.dma, &cq->mcq, vector, 0, 0); + cq->db.dma, &cq->mcq, vector, 0, + !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION)); if (err) goto err_dbmap; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 9cd2b002d7ae..3e2dee46caa2 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -111,8 +111,9 @@ __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) } int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad) + int port, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const void *in_mad, void *response_mad) { struct mlx4_cmd_mailbox *inmailbox, *outmailbox; void *inbox; @@ -220,7 +221,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can * synthesize LID change, Client-Rereg, GID change, and P_Key change events. */ -static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, +static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad, u16 prev_lid) { struct ib_port_info *pinfo; @@ -356,7 +357,7 @@ static void node_desc_override(struct ib_device *dev, } } -static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad) +static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad) { int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; struct ib_mad_send_buf *send_buf; @@ -366,7 +367,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, - IB_MGMT_MAD_DATA, GFP_ATOMIC); + IB_MGMT_MAD_DATA, GFP_ATOMIC, + IB_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) return; /* @@ -722,8 +724,8 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, } static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad) { u16 slid, prev_lid = 0; int err; @@ -825,8 +827,8 @@ static void edit_counter(struct mlx4_counter *cnt, } static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_ib_dev *dev = to_mdev(ibdev); @@ -866,9 +868,17 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, } int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) { + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + + BUG_ON(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad)); + switch (rdma_port_get_link_layer(ibdev, port_num)) { case IB_LINK_LAYER_INFINIBAND: return ib_process_mad(ibdev, mad_flags, port_num, in_wc, @@ -1773,6 +1783,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) { int ret, cq_size; + struct ib_cq_init_attr cq_attr = {}; if (ctx->state != DEMUX_PV_STATE_DOWN) return -EEXIST; @@ -1801,8 +1812,9 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port, if (ctx->has_smi) cq_size *= 2; + cq_attr.cqe = cq_size; ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, - NULL, ctx, cq_size, 0); + NULL, ctx, &cq_attr); if (IS_ERR(ctx->cq)) { ret = PTR_ERR(ctx->cq); pr_err("Couldn't create tunnel CQ (%d)\n", ret); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index cc64400d41ac..166da787780c 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -132,14 +132,35 @@ static int num_ib_ports(struct mlx4_dev *dev) } static int mlx4_ib_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) + struct ib_device_attr *props, + struct ib_udata *uhw) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; int have_ib_ports; + struct mlx4_uverbs_ex_query_device cmd; + struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0}; + struct mlx4_clock_params clock_params; + if (uhw->inlen) { + if (uhw->inlen < sizeof(cmd)) + return -EINVAL; + + err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd)); + if (err) + return err; + + if (cmd.comp_mask) + return -EINVAL; + + if (cmd.reserved) + return -EINVAL; + } + + resp.response_length = offsetof(typeof(resp), response_length) + + sizeof(resp.response_length); in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) @@ -229,7 +250,24 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; + props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; + props->timestamp_mask = 0xFFFFFFFFFFFFULL; + + err = mlx4_get_internal_clock_params(dev->dev, &clock_params); + if (err) + goto out; + + if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { + resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; + resp.response_length += sizeof(resp.hca_core_clock_offset); + resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; + } + if (uhw->outlen) { + err = ib_copy_to_udata(uhw, &resp, resp.response_length); + if (err) + goto out; + } out: kfree(in_mad); kfree(out_mad); @@ -712,8 +750,24 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) dev->dev->caps.num_uars, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; - } else + } else if (vma->vm_pgoff == 3) { + struct mlx4_clock_params params; + int ret = mlx4_get_internal_clock_params(dev->dev, ¶ms); + + if (ret) + return ret; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (io_remap_pfn_range(vma, vma->vm_start, + (pci_resource_start(dev->dev->persist->pdev, + params.bar) + + params.offset) + >> PAGE_SHIFT, + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + } else { return -EINVAL; + } return 0; } @@ -758,6 +812,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, struct ib_udata *udata) { struct mlx4_ib_xrcd *xrcd; + struct ib_cq_init_attr cq_attr = {}; int err; if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) @@ -777,7 +832,8 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, goto err2; } - xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0); + cq_attr.cqe = 1; + xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); if (IS_ERR(xrcd->cq)) { err = PTR_ERR(xrcd->cq); goto err3; @@ -1185,7 +1241,6 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, &mflow->reg_id[i].id); if (err) goto err_create_flow; - i++; if (is_bonded) { /* Application always sees one port so the mirror rule * must be on port #2 @@ -1200,6 +1255,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, j++; } + i++; } if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { @@ -1207,7 +1263,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, &mflow->reg_id[i].id); if (err) goto err_create_flow; - i++; + if (is_bonded) { flow_attr->port = 2; err = mlx4_ib_tunnel_steer_add(qp, flow_attr, @@ -1218,6 +1274,7 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, j++; } /* function to create mirror rule */ + i++; } return &mflow->ibflow; @@ -2114,6 +2171,29 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) kfree(ibdev->eq_table); } +static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mlx4_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; + else + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; + + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} + static void *mlx4_ib_add(struct mlx4_dev *dev) { struct mlx4_ib_dev *ibdev; @@ -2241,6 +2321,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; ibdev->ib_dev.process_mad = mlx4_ib_process_mad; + ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; if (!mlx4_is_slave(ibdev->dev)) { ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; @@ -2278,6 +2359,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); } + ibdev->ib_dev.uverbs_ex_cmd_mask |= + (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ); + mlx4_ib_alloc_eqs(dev, ibdev); spin_lock_init(&iboe->lock); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index fce3934372a1..7933adfff662 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -110,6 +110,7 @@ struct mlx4_ib_cq { struct mutex resize_mutex; struct ib_umem *umem; struct ib_umem *resize_umem; + int create_flags; /* List of qps that it serves.*/ struct list_head send_qp_list; struct list_head recv_qp_list; @@ -555,6 +556,21 @@ struct mlx4_ib_qp_tunnel_init_attr { u8 port; }; +struct mlx4_uverbs_ex_query_device { + __u32 comp_mask; + __u32 reserved; +}; + +enum query_device_resp_mask { + QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0, +}; + +struct mlx4_uverbs_ex_query_device_resp { + __u32 comp_mask; + __u32 response_length; + __u64 hca_core_clock_offset; +}; + static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) { return container_of(ibdev, struct mlx4_ib_dev, ib_dev); @@ -668,7 +684,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); -struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector, +struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata); int mlx4_ib_destroy_cq(struct ib_cq *cq); @@ -706,11 +723,13 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr); int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad); + int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const void *in_mad, void *response_mad); int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad); + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index); int mlx4_ib_mad_init(struct mlx4_ib_dev *dev); void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 2ee6b1051975..09fbae618d35 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -736,10 +736,13 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) mlx5_db_free(dev->mdev, &cq->db); } -struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, - int vector, struct ib_ucontext *context, +struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, struct ib_udata *udata) { + int entries = attr->cqe; + int vector = attr->comp_vector; struct mlx5_create_cq_mbox_in *cqb = NULL; struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_cq *cq; @@ -750,6 +753,9 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, int eqn; int err; + if (attr->flags) + return ERR_PTR(-EINVAL); + if (entries < 0) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 9cf9a37bb5ff..8e45714fa369 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -41,8 +41,8 @@ enum { }; int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, - u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad) + u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const void *in_mad, void *response_mad) { u8 op_modifier = 0; @@ -58,11 +58,18 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, } int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) { u16 slid; int err; + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + + BUG_ON(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad)); slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 57c9809e8b87..c6cb26e0c866 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -63,7 +63,8 @@ static char mlx5_version[] = DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; static int mlx5_ib_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) + struct ib_device_attr *props, + struct ib_udata *uhw) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct ib_smp *in_mad = NULL; @@ -74,6 +75,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, int max_sq_sg; u64 flags; + if (uhw->inlen || uhw->outlen) + return -EINVAL; + gen = &dev->mdev->caps.gen; in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); @@ -910,6 +914,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev) struct mlx5_general_caps *gen; int err = -ENOMEM; int port; + struct ib_udata uhw = {.inlen = 0, .outlen = 0}; gen = &dev->mdev->caps.gen; pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); @@ -920,7 +925,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev) if (!dprops) goto out; - err = mlx5_ib_query_device(&dev->ib_dev, dprops); + err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); if (err) { mlx5_ib_warn(dev, "query_device failed %d\n", err); goto out; @@ -971,6 +976,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev) struct ib_cq *cq; struct ib_qp *qp; struct ib_mr *mr; + struct ib_cq_init_attr cq_attr = {}; int ret; attr = kzalloc(sizeof(*attr), GFP_KERNEL); @@ -994,8 +1000,9 @@ static int create_umr_res(struct mlx5_ib_dev *dev) goto error_1; } - cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, 128, - 0); + cq_attr.cqe = 128; + cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, + &cq_attr); if (IS_ERR(cq)) { mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); ret = PTR_ERR(cq); @@ -1087,6 +1094,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) { struct ib_srq_init_attr attr; struct mlx5_ib_dev *dev; + struct ib_cq_init_attr cq_attr = {.cqe = 1}; int ret = 0; dev = container_of(devr, struct mlx5_ib_dev, devr); @@ -1100,7 +1108,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) devr->p0->uobject = NULL; atomic_set(&devr->p0->usecnt, 0); - devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, 1, 0, NULL, NULL); + devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); if (IS_ERR(devr->c0)) { ret = PTR_ERR(devr->c0); goto error1; @@ -1182,6 +1190,24 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr) mlx5_ib_dealloc_pd(devr->p0); } +static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mlx5_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} + static void *mlx5_ib_add(struct mlx5_core_dev *mdev) { struct mlx5_ib_dev *dev; @@ -1285,6 +1311,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; + dev->ib_dev.get_port_immutable = mlx5_port_immutable; mlx5_ib_internal_query_odp_caps(dev); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index dff1cfcdf476..178314e764da 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -525,8 +525,8 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, - u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad); + u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const void *in_mad, void *response_mad); struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, struct mlx5_ib_ah *ah); struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); @@ -556,8 +556,9 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, void *buffer, u32 length); -struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, - int vector, struct ib_ucontext *context, +struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, struct ib_udata *udata); int mlx5_ib_destroy_cq(struct ib_cq *cq); int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); @@ -586,8 +587,10 @@ int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int mlx5_ib_unmap_fmr(struct list_head *fmr_list); int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr); int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad); + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index); struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 9d3e5c1ac60e..c7f49bbb0c72 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -1858,8 +1858,8 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn) } int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad) + int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const void *in_mad, void *response_mad) { struct mthca_mailbox *inmailbox, *outmailbox; void *inbox; diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index f952244c54de..d2e5b194b938 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h @@ -312,8 +312,8 @@ int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, struct mthca_mailbox *mailbox); int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn); int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, - void *in_mad, void *response_mad); + int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const void *in_mad, void *response_mad); int mthca_READ_MGM(struct mthca_dev *dev, int index, struct mthca_mailbox *mailbox); int mthca_WRITE_MGM(struct mthca_dev *dev, int index, diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index 7e6a6d64ad4e..4393a022867b 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -576,10 +576,11 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, - struct ib_grh *in_grh, - struct ib_mad *in_mad, - struct ib_mad *out_mad); + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index); int mthca_create_agents(struct mthca_dev *dev); void mthca_free_agents(struct mthca_dev *dev); diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 8881fa376e06..6b2418b74c99 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -104,7 +104,7 @@ static void update_sm_ah(struct mthca_dev *dev, */ static void smp_snoop(struct ib_device *ibdev, u8 port_num, - struct ib_mad *mad, + const struct ib_mad *mad, u16 prev_lid) { struct ib_event event; @@ -160,7 +160,7 @@ static void node_desc_override(struct ib_device *dev, static void forward_trap(struct mthca_dev *dev, u8 port_num, - struct ib_mad *mad) + const struct ib_mad *mad) { int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; struct ib_mad_send_buf *send_buf; @@ -170,7 +170,8 @@ static void forward_trap(struct mthca_dev *dev, if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, - IB_MGMT_MAD_DATA, GFP_ATOMIC); + IB_MGMT_MAD_DATA, GFP_ATOMIC, + IB_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) return; /* @@ -195,15 +196,21 @@ static void forward_trap(struct mthca_dev *dev, int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, - struct ib_grh *in_grh, - struct ib_mad *in_mad, - struct ib_mad *out_mad) + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) { int err; u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); u16 prev_lid = 0; struct ib_port_attr pattr; + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + + BUG_ON(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad)); /* Forward locally generated traps to the SM */ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c index 8edb28a9a0e7..15d064479ef6 100644 --- a/drivers/infiniband/hw/mthca/mthca_profile.c +++ b/drivers/infiniband/hw/mthca/mthca_profile.c @@ -77,7 +77,6 @@ s64 mthca_make_profile(struct mthca_dev *dev, u64 mem_base, mem_avail; s64 total_size = 0; struct mthca_resource *profile; - struct mthca_resource tmp; int i, j; profile = kzalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL); @@ -136,11 +135,8 @@ s64 mthca_make_profile(struct mthca_dev *dev, */ for (i = MTHCA_RES_NUM; i > 0; --i) for (j = 1; j < i; ++j) { - if (profile[j].size > profile[j - 1].size) { - tmp = profile[j]; - profile[j] = profile[j - 1]; - profile[j - 1] = tmp; - } + if (profile[j].size > profile[j - 1].size) + swap(profile[j], profile[j - 1]); } for (i = 0; i < MTHCA_RES_NUM; ++i) { diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 415f8e1a54db..93ae51dcf2ff 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -57,14 +57,17 @@ static void init_query_mad(struct ib_smp *mad) mad->method = IB_MGMT_METHOD_GET; } -static int mthca_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) +static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; struct mthca_dev *mdev = to_mdev(ibdev); + if (uhw->inlen || uhw->outlen) + return -EINVAL; + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) @@ -641,16 +644,20 @@ static int mthca_destroy_qp(struct ib_qp *qp) return 0; } -static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, - int comp_vector, +static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata) { + int entries = attr->cqe; struct mthca_create_cq ucmd; struct mthca_cq *cq; int nent; int err; + if (attr->flags) + return ERR_PTR(-EINVAL); + if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) return ERR_PTR(-EINVAL); @@ -1244,6 +1251,24 @@ out: return err; } +static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mthca_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} + int mthca_register_device(struct mthca_dev *dev) { int ret; @@ -1323,6 +1348,7 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; dev->ib_dev.reg_user_mr = mthca_reg_user_mr; dev->ib_dev.dereg_mr = mthca_dereg_mr; + dev->ib_dev.get_port_immutable = mthca_port_immutable; if (dev->mthca_flags & MTHCA_FLAG_FMR) { dev->ib_dev.alloc_fmr = mthca_alloc_fmr; diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 72b43417cbe3..9047af429906 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1616,6 +1616,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, &cm_node->loc_addr, cm_node->loc_port, &cm_node->rem_addr, cm_node->rem_port); cm_node->listener = listener; + if (listener) + cm_node->tos = listener->tos; cm_node->netdev = nesvnic->netdev; cm_node->cm_id = cm_info->cm_id; memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN); @@ -2938,6 +2940,9 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod nesqp->nesqp_context->misc2 |= cpu_to_le32(64 << NES_QPCONTEXT_MISC2_TTL_SHIFT); + nesqp->nesqp_context->misc2 |= cpu_to_le32( + cm_node->tos << NES_QPCONTEXT_MISC2_TOS_SHIFT); + nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16); nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32( @@ -3612,6 +3617,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_node->ord_size = 1; cm_node->apbvt_set = apbvt_set; + cm_node->tos = cm_id->tos; nesqp->cm_node = cm_node; cm_node->nesqp = nesqp; nes_add_ref(&nesqp->ibqp); @@ -3666,6 +3672,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) } cm_id->provider_data = cm_node; + cm_node->tos = cm_id->tos; if (!cm_node->reused_node) { if (nes_create_mapinfo(&cm_info)) diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index f522cf639789..32a6420c2940 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h @@ -303,6 +303,7 @@ struct nes_cm_listener { int backlog; enum nes_cm_listener_state listener_state; u32 reused_node; + u8 tos; }; /* per connection node and node state information */ @@ -352,6 +353,7 @@ struct nes_cm_node { struct list_head reset_entry; struct nes_qp *nesqp; atomic_t passive_state; + u8 tos; }; /* structure for client or CM to fill when making CM api calls. */ diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index c0d0296e7a00..fbc43e5f717b 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -512,12 +512,16 @@ static void nes_free_fast_reg_page_list(struct ib_fast_reg_page_list *pifrpl) /** * nes_query_device */ -static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *props) +static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw) { struct nes_vnic *nesvnic = to_nesvnic(ibdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_ib_device *nesibdev = nesvnic->nesibdev; + if (uhw->inlen || uhw->outlen) + return -EINVAL; + memset(props, 0, sizeof(*props)); memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6); @@ -606,7 +610,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr return 0; } - /** * nes_query_pkey */ @@ -1527,10 +1530,12 @@ static int nes_destroy_qp(struct ib_qp *ibqp) /** * nes_create_cq */ -static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, - int comp_vector, - struct ib_ucontext *context, struct ib_udata *udata) +static struct ib_cq *nes_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata) { + int entries = attr->cqe; u64 u64temp; struct nes_vnic *nesvnic = to_nesvnic(ibdev); struct nes_device *nesdev = nesvnic->nesdev; @@ -1550,6 +1555,9 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, unsigned long flags; int ret; + if (attr->flags) + return ERR_PTR(-EINVAL); + if (entries > nesadapter->max_cqe) return ERR_PTR(-EINVAL); @@ -3222,8 +3230,10 @@ static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) * nes_process_mad */ static int nes_process_mad(struct ib_device *ibdev, int mad_flags, - u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + u8 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) { nes_debug(NES_DBG_INIT, "\n"); return -ENOSYS; @@ -3828,6 +3838,22 @@ static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_ return 0; } +static int nes_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = nes_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; + + return 0; +} /** * nes_init_ofa_device @@ -3928,6 +3954,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) nesibdev->ibdev.iwcm->reject = nes_reject; nesibdev->ibdev.iwcm->create_listen = nes_create_listen; nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen; + nesibdev->ibdev.get_port_immutable = nes_port_immutable; return nesibdev; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index f5a5ea836dbd..4bafa15708d0 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -204,12 +204,19 @@ int ocrdma_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr) int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags, u8 port_num, - struct ib_wc *in_wc, - struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) { int status; struct ocrdma_dev *dev; + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + + BUG_ON(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad)); switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_PERF_MGMT: diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index 726a87cf22dc..cf366fe03cb8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h @@ -42,7 +42,9 @@ int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *); int ocrdma_process_mad(struct ib_device *, int process_mad_flags, u8 port_num, - struct ib_wc *in_wc, - struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad); + const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index); #endif /* __OCRDMA_AH_H__ */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 7a2b59aca004..8a1398b253a2 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -30,6 +30,7 @@ #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> +#include <rdma/ib_mad.h> #include <linux/netdevice.h> #include <net/addrconf.h> @@ -202,6 +203,24 @@ static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, return IB_LINK_LAYER_ETHERNET; } +static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ocrdma_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} + static int ocrdma_register_device(struct ocrdma_dev *dev) { strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX); @@ -286,6 +305,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) dev->ibdev.dma_device = &dev->nic_info.pdev->dev; dev->ibdev.process_mad = ocrdma_process_mad; + dev->ibdev.get_port_immutable = ocrdma_port_immutable; if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { dev->ibdev.uverbs_cmd_mask |= diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 9dcb66077d6c..5bb61eb58f2c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -61,10 +61,14 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port, return 0; } -int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr) +int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, + struct ib_udata *uhw) { struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); + if (uhw->inlen || uhw->outlen) + return -EINVAL; + memset(attr, 0, sizeof *attr); memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); @@ -375,7 +379,12 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, if (dev->pd_mgr->pd_prealloc_valid) { status = ocrdma_get_pd_num(dev, pd); - return (status == 0) ? pd : ERR_PTR(status); + if (status == 0) { + return pd; + } else { + kfree(pd); + return ERR_PTR(status); + } } retry: @@ -679,7 +688,6 @@ err: ocrdma_release_ucontext_pd(uctx); } else { status = _ocrdma_dealloc_pd(dev, pd); - kfree(pd); } exit: return ERR_PTR(status); @@ -1000,10 +1008,12 @@ err: return status; } -struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, +struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, struct ib_ucontext *ib_ctx, struct ib_udata *udata) { + int entries = attr->cqe; struct ocrdma_cq *cq; struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); struct ocrdma_ucontext *uctx = NULL; @@ -1011,6 +1021,9 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector, int status; struct ocrdma_create_cq_ureq ureq; + if (attr->flags) + return ERR_PTR(-EINVAL); + if (udata) { if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) return ERR_PTR(-EFAULT); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index b8f7853fd36c..b15c608efa7b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -36,11 +36,15 @@ int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc); int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags); -int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props); +int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props, + struct ib_udata *uhw); int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props); int ocrdma_modify_port(struct ib_device *, u8 port, int mask, struct ib_port_modify *props); +enum rdma_protocol_type +ocrdma_query_protocol(struct ib_device *device, u8 port_num); + void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); int ocrdma_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid); @@ -56,8 +60,10 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *, struct ib_ucontext *, struct ib_udata *); int ocrdma_dealloc_pd(struct ib_pd *pd); -struct ib_cq *ocrdma_create_cq(struct ib_device *, int entries, int vector, - struct ib_ucontext *, struct ib_udata *); +struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *ib_ctx, + struct ib_udata *udata); int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int ocrdma_destroy_cq(struct ib_cq *); diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c index ab4e11cfab15..2b45d0b02300 100644 --- a/drivers/infiniband/hw/qib/qib_cq.c +++ b/drivers/infiniband/hw/qib/qib_cq.c @@ -203,7 +203,7 @@ static void send_complete(struct kthread_work *work) /** * qib_create_cq - create a completion queue * @ibdev: the device this completion queue is attached to - * @entries: the minimum size of the completion queue + * @attr: creation attributes * @context: unused by the QLogic_IB driver * @udata: user data for libibverbs.so * @@ -212,16 +212,21 @@ static void send_complete(struct kthread_work *work) * * Called by ib_create_cq() in the generic verbs code. */ -struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, - int comp_vector, struct ib_ucontext *context, +struct ib_cq *qib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, struct ib_udata *udata) { + int entries = attr->cqe; struct qib_ibdev *dev = to_idev(ibdev); struct qib_cq *cq; struct qib_cq_wc *wc; struct ib_cq *ret; u32 sz; + if (attr->flags) + return ERR_PTR(-EINVAL); + if (entries < 1 || entries > ib_qib_max_cqes) { ret = ERR_PTR(-EINVAL); goto done; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index f32b4628e991..6c8ff10101c0 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5502,7 +5502,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd) goto retry; send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, - IB_MGMT_MAD_DATA, GFP_ATOMIC); + IB_MGMT_MAD_DATA, GFP_ATOMIC, + IB_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) goto retry; diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 395f4046dba2..05e3242d8442 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -83,7 +83,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) return; send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, - IB_MGMT_MAD_DATA, GFP_ATOMIC); + IB_MGMT_MAD_DATA, GFP_ATOMIC, + IB_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) return; @@ -1854,7 +1855,7 @@ static int pma_set_portcounters_ext(struct ib_pma_mad *pmp, } static int process_subn(struct ib_device *ibdev, int mad_flags, - u8 port, struct ib_mad *in_mad, + u8 port, const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_smp *smp = (struct ib_smp *)out_mad; @@ -2006,7 +2007,7 @@ bail: } static int process_perf(struct ib_device *ibdev, u8 port, - struct ib_mad *in_mad, + const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; @@ -2299,7 +2300,7 @@ static int check_cc_key(struct qib_ibport *ibp, } static int process_cc(struct ib_device *ibdev, int mad_flags, - u8 port, struct ib_mad *in_mad, + u8 port, const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad; @@ -2400,12 +2401,19 @@ bail: * This is called by the ib_mad module. */ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad) + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) { int ret; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + + BUG_ON(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad)); switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 4a3599890ea5..a05d1a372208 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1550,12 +1550,14 @@ full: } } -static int qib_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) +static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, + struct ib_udata *uhw) { struct qib_devdata *dd = dd_from_ibdev(ibdev); struct qib_ibdev *dev = to_idev(ibdev); + if (uhw->inlen || uhw->outlen) + return -EINVAL; memset(props, 0, sizeof(*props)); props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | @@ -2040,6 +2042,24 @@ static void init_ibport(struct qib_pportdata *ppd) RCU_INIT_POINTER(ibp->qp1, NULL); } +static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = qib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; + immutable->max_mad_size = IB_MGMT_MAD_SIZE; + + return 0; +} + /** * qib_register_ib_device - register our device with the infiniband core * @dd: the device data structure @@ -2227,6 +2247,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->process_mad = qib_process_mad; ibdev->mmap = qib_mmap; ibdev->dma_ops = &qib_dma_mapping_ops; + ibdev->get_port_immutable = qib_port_immutable; snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), "Intel Infiniband HCA %s", init_utsname()->nodename); diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index bfc8948fdd35..1635572752ce 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -872,8 +872,10 @@ void qib_cap_mask_chg(struct qib_ibport *ibp); void qib_sys_guid_chg(struct qib_ibport *ibp); void qib_node_desc_chg(struct qib_ibport *ibp); int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - struct ib_wc *in_wc, struct ib_grh *in_grh, - struct ib_mad *in_mad, struct ib_mad *out_mad); + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index); int qib_create_agents(struct qib_ibdev *dev); void qib_free_agents(struct qib_ibdev *dev); @@ -1007,8 +1009,9 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); -struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, - int comp_vector, struct ib_ucontext *context, +struct ib_cq *qib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, struct ib_udata *udata); int qib_destroy_cq(struct ib_cq *ibcq); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 0d0f98695d53..34c49b8105fe 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -300,6 +300,22 @@ static struct notifier_block usnic_ib_inetaddr_notifier = { }; /* End of inet section*/ +static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = usnic_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + /* Start of PF discovery section */ static void *usnic_ib_device_add(struct pci_dev *dev) { @@ -383,6 +399,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq; us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq; us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr; + us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable; if (ib_register_device(&us_ibdev->ib_dev, NULL)) diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 53bd6a2d9cdb..7df43827cb29 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -248,7 +248,8 @@ enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, } int usnic_ib_query_device(struct ib_device *ibdev, - struct ib_device_attr *props) + struct ib_device_attr *props, + struct ib_udata *uhw) { struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); union ib_gid gid; @@ -257,6 +258,9 @@ int usnic_ib_query_device(struct ib_device *ibdev, int qp_per_vf; usnic_dbg("\n"); + if (uhw->inlen || uhw->outlen) + return -EINVAL; + mutex_lock(&us_ibdev->usdev_lock); us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); @@ -570,13 +574,17 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, return status; } -struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries, - int vector, struct ib_ucontext *context, - struct ib_udata *udata) +struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata) { struct ib_cq *cq; usnic_dbg("\n"); + if (attr->flags) + return ERR_PTR(-EINVAL); + cq = kzalloc(sizeof(*cq), GFP_KERNEL); if (!cq) return ERR_PTR(-EBUSY); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index bb864f5aed70..0bd04efa16f3 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -24,9 +24,12 @@ enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, u8 port_num); int usnic_ib_query_device(struct ib_device *ibdev, - struct ib_device_attr *props); + struct ib_device_attr *props, + struct ib_udata *uhw); int usnic_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props); +enum rdma_protocol_type +usnic_ib_query_protocol(struct ib_device *device, u8 port_num); int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); @@ -44,9 +47,10 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, int usnic_ib_destroy_qp(struct ib_qp *qp); int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata); -struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries, - int vector, struct ib_ucontext *context, - struct ib_udata *udata); +struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata); int usnic_ib_destroy_cq(struct ib_cq *cq); struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 417de1f32960..cb2337f0532b 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -472,11 +472,10 @@ struct usnic_uiom_pd *usnic_uiom_alloc_pd(void) return ERR_PTR(-ENOMEM); pd->domain = domain = iommu_domain_alloc(&pci_bus_type); - if (IS_ERR_OR_NULL(domain)) { - usnic_err("Failed to allocate IOMMU domain with err %ld\n", - PTR_ERR(pd->domain)); + if (!domain) { + usnic_err("Failed to allocate IOMMU domain"); kfree(pd); - return ERR_PTR(domain ? PTR_ERR(domain) : -ENOMEM); + return ERR_PTR(-ENOMEM); } iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 9e1b203d756d..da149c278cb8 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1128,7 +1128,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) { struct ipoib_neigh_table *ntbl = &priv->ntbl; struct ipoib_neigh_hash *htbl; - struct ipoib_neigh **buckets; + struct ipoib_neigh __rcu **buckets; u32 size; clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); @@ -1146,7 +1146,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) htbl->size = size; htbl->mask = (size - 1); htbl->buckets = buckets; - ntbl->htbl = htbl; + RCU_INIT_POINTER(ntbl->htbl, htbl); htbl->ntbl = ntbl; atomic_set(&ntbl->entries, 0); @@ -1685,9 +1685,7 @@ static void ipoib_add_one(struct ib_device *device) struct net_device *dev; struct ipoib_dev_priv *priv; int s, e, p; - - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; + int count = 0; dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); if (!dev_list) @@ -1704,15 +1702,21 @@ static void ipoib_add_one(struct ib_device *device) } for (p = s; p <= e; ++p) { - if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) + if (!rdma_protocol_ib(device, p)) continue; dev = ipoib_add_port("ib%d", device, p); if (!IS_ERR(dev)) { priv = netdev_priv(dev); list_add_tail(&priv->list, dev_list); + count++; } } + if (!count) { + kfree(dev_list); + return; + } + ib_set_client_data(device, &ipoib_client, dev_list); } @@ -1721,9 +1725,6 @@ static void ipoib_remove_one(struct ib_device *device) struct ipoib_dev_priv *priv, *tmp; struct list_head *dev_list; - if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) - return; - dev_list = ib_get_client_data(device, &ipoib_client); if (!dev_list) return; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index e5cc43074196..9e6ee82a8fd7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -141,6 +141,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) .sq_sig_type = IB_SIGNAL_ALL_WR, .qp_type = IB_QPT_UD }; + struct ib_cq_init_attr cq_attr = {}; int ret, size; int i; @@ -178,14 +179,17 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) } else goto out_free_wq; - priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); + cq_attr.cqe = size; + priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, + dev, &cq_attr); if (IS_ERR(priv->recv_cq)) { printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name); goto out_cm_dev_cleanup; } + cq_attr.cqe = ipoib_sendq_size; priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL, - dev, ipoib_sendq_size, 0); + dev, &cq_attr); if (IS_ERR(priv->send_cq)) { printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name); goto out_free_recv_cq; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index cc2dd35ffbc0..5c9f565ea0e8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -51,19 +51,22 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context); static void iser_cq_event_callback(struct ib_event *cause, void *context) { - iser_err("got cq event %d \n", cause->event); + iser_err("cq event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_qp_event_callback(struct ib_event *cause, void *context) { - iser_err("got qp event %d\n",cause->event); + iser_err("qp event %s (%d)\n", + ib_event_msg(cause->event), cause->event); } static void iser_event_handler(struct ib_event_handler *handler, struct ib_event *event) { - iser_err("async event %d on device %s port %d\n", event->event, - event->device->name, event->element.port_num); + iser_err("async event %s (%d) on device %s port %d\n", + ib_event_msg(event->event), event->event, + event->device->name, event->element.port_num); } /** @@ -123,14 +126,17 @@ static int iser_create_device_ib_res(struct iser_device *device) goto pd_err; for (i = 0; i < device->comps_used; i++) { + struct ib_cq_init_attr cq_attr = {}; struct iser_comp *comp = &device->comps[i]; comp->device = device; + cq_attr.cqe = max_cqe; + cq_attr.comp_vector = i; comp->cq = ib_create_cq(device->ib_device, iser_cq_callback, iser_cq_event_callback, (void *)comp, - max_cqe, i); + &cq_attr); if (IS_ERR(comp->cq)) { comp->cq = NULL; goto cq_err; @@ -873,8 +879,9 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve int ret = 0; iser_conn = (struct iser_conn *)cma_id->context; - iser_info("event %d status %d conn %p id %p\n", - event->event, event->status, cma_id->context, cma_id); + iser_info("%s (%d): status %d conn %p id %p\n", + rdma_event_msg(event->event), event->event, + event->status, cma_id->context, cma_id); mutex_lock(&iser_conn->state_mutex); switch (event->event) { @@ -913,7 +920,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve } break; default: - iser_err("Unexpected RDMA CM event (%d)\n", event->event); + iser_err("Unexpected RDMA CM event: %s (%d)\n", + rdma_event_msg(event->event), event->event); break; } mutex_unlock(&iser_conn->state_mutex); @@ -1173,10 +1181,13 @@ static void iser_handle_wc(struct ib_wc *wc) } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - iser_err("wr id %llx status %d vend_err %x\n", - wc->wr_id, wc->status, wc->vendor_err); + iser_err("%s (%d): wr id %llx vend_err %x\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id, wc->vendor_err); else - iser_dbg("flush error: wr id %llx\n", wc->wr_id); + iser_dbg("%s (%d): wr id %llx\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id); if (wc->wr_id == ISER_BEACON_WRID) /* all flush errors were consumed */ diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 575a072d765f..f3b7a34e10d8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -80,7 +80,9 @@ isert_qp_event_callback(struct ib_event *e, void *context) { struct isert_conn *isert_conn = context; - isert_err("conn %p event: %d\n", isert_conn, e->event); + isert_err("%s (%d): conn %p\n", + ib_event_msg(e->event), e->event, isert_conn); + switch (e->event) { case IB_EVENT_COMM_EST: rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); @@ -318,15 +320,18 @@ isert_alloc_comps(struct isert_device *device, max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe); for (i = 0; i < device->comps_used; i++) { + struct ib_cq_init_attr cq_attr = {}; struct isert_comp *comp = &device->comps[i]; comp->device = device; INIT_WORK(&comp->work, isert_cq_work); + cq_attr.cqe = max_cqe; + cq_attr.comp_vector = i; comp->cq = ib_create_cq(device->ib_device, isert_cq_callback, isert_cq_event_callback, (void *)comp, - max_cqe, i); + &cq_attr); if (IS_ERR(comp->cq)) { isert_err("Unable to allocate cq\n"); ret = PTR_ERR(comp->cq); @@ -900,7 +905,8 @@ static int isert_np_cma_handler(struct isert_np *isert_np, enum rdma_cm_event_type event) { - isert_dbg("isert np %p, handling event %d\n", isert_np, event); + isert_dbg("%s (%d): isert np %p\n", + rdma_event_msg(event), event, isert_np); switch (event) { case RDMA_CM_EVENT_DEVICE_REMOVAL: @@ -974,7 +980,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { int ret = 0; - isert_info("event %d status %d id %p np %p\n", event->event, + isert_info("%s (%d): status %d id %p np %p\n", + rdma_event_msg(event->event), event->event, event->status, cma_id, cma_id->context); switch (event->event) { @@ -2108,10 +2115,13 @@ isert_handle_wc(struct ib_wc *wc) } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - isert_err("wr id %llx status %d vend_err %x\n", - wc->wr_id, wc->status, wc->vendor_err); + isert_err("%s (%d): wr id %llx vend_err %x\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id, wc->vendor_err); else - isert_dbg("flush error: wr id %llx\n", wc->wr_id); + isert_dbg("%s (%d): wr id %llx\n", + ib_wc_status_msg(wc->status), wc->status, + wc->wr_id); if (wc->wr_id != ISER_FASTREG_LI_WRID) isert_cq_comp_err(isert_conn, wc); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 918814cd0f80..eada8f758ad4 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -59,9 +59,10 @@ #define DRV_RELDATE "July 1, 2013" MODULE_AUTHOR("Roland Dreier"); -MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " - "v" DRV_VERSION " (" DRV_RELDATE ")"); +MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator"); MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_INFO(release_date, DRV_RELDATE); static unsigned int srp_sg_tablesize; static unsigned int cmd_sg_entries; @@ -253,7 +254,8 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) static void srp_qp_event(struct ib_event *event, void *context) { - pr_debug("QP event %d\n", event->event); + pr_debug("QP event %s (%d)\n", + ib_event_msg(event->event), event->event); } static int srp_init_qp(struct srp_target_port *target, @@ -465,14 +467,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) */ static void srp_destroy_qp(struct srp_rdma_ch *ch) { - struct srp_target_port *target = ch->target; static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID }; struct ib_recv_wr *bad_wr; int ret; /* Destroying a QP and reusing ch->done is only safe if not connected */ - WARN_ON_ONCE(target->connected); + WARN_ON_ONCE(ch->connected); ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret); @@ -499,6 +500,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) struct ib_fmr_pool *fmr_pool = NULL; struct srp_fr_pool *fr_pool = NULL; const int m = 1 + dev->use_fast_reg; + struct ib_cq_init_attr cq_attr = {}; int ret; init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); @@ -506,15 +508,19 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) return -ENOMEM; /* + 1 for SRP_LAST_WR_ID */ + cq_attr.cqe = target->queue_size + 1; + cq_attr.comp_vector = ch->comp_vector; recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, - target->queue_size + 1, ch->comp_vector); + &cq_attr); if (IS_ERR(recv_cq)) { ret = PTR_ERR(recv_cq); goto err; } + cq_attr.cqe = m * target->queue_size; + cq_attr.comp_vector = ch->comp_vector; send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, - m * target->queue_size, ch->comp_vector); + &cq_attr); if (IS_ERR(send_cq)) { ret = PTR_ERR(send_cq); goto err_recv_cq; @@ -781,7 +787,7 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich) shost_printk(KERN_DEBUG, target->scsi_host, PFX "Topspin/Cisco initiator port ID workaround " "activated for target GUID %016llx\n", - (unsigned long long) be64_to_cpu(target->ioc_guid)); + be64_to_cpu(target->ioc_guid)); memset(req->priv.initiator_port_id, 0, 8); memcpy(req->priv.initiator_port_id + 8, &target->srp_host->srp_dev->dev->node_guid, 8); @@ -811,35 +817,19 @@ static bool srp_queue_remove_work(struct srp_target_port *target) return changed; } -static bool srp_change_conn_state(struct srp_target_port *target, - bool connected) -{ - bool changed = false; - - spin_lock_irq(&target->lock); - if (target->connected != connected) { - target->connected = connected; - changed = true; - } - spin_unlock_irq(&target->lock); - - return changed; -} - static void srp_disconnect_target(struct srp_target_port *target) { struct srp_rdma_ch *ch; int i; - if (srp_change_conn_state(target, false)) { - /* XXX should send SRP_I_LOGOUT request */ + /* XXX should send SRP_I_LOGOUT request */ - for (i = 0; i < target->ch_count; i++) { - ch = &target->ch[i]; - if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { - shost_printk(KERN_DEBUG, target->scsi_host, - PFX "Sending CM DREQ failed\n"); - } + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + ch->connected = false; + if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { + shost_printk(KERN_DEBUG, target->scsi_host, + PFX "Sending CM DREQ failed\n"); } } } @@ -852,7 +842,7 @@ static void srp_free_req_data(struct srp_target_port *target, struct srp_request *req; int i; - if (!ch->target || !ch->req_ring) + if (!ch->req_ring) return; for (i = 0; i < target->req_ring_size; ++i) { @@ -986,14 +976,26 @@ static void srp_rport_delete(struct srp_rport *rport) srp_queue_remove_work(target); } +/** + * srp_connected_ch() - number of connected channels + * @target: SRP target port. + */ +static int srp_connected_ch(struct srp_target_port *target) +{ + int i, c = 0; + + for (i = 0; i < target->ch_count; i++) + c += target->ch[i].connected; + + return c; +} + static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) { struct srp_target_port *target = ch->target; int ret; - WARN_ON_ONCE(!multich && target->connected); - - target->qp_in_error = false; + WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0); ret = srp_lookup_path(ch); if (ret) @@ -1016,7 +1018,7 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) */ switch (ch->status) { case 0: - srp_change_conn_state(target, true); + ch->connected = true; return 0; case SRP_PORT_REDIRECT: @@ -1214,14 +1216,10 @@ static int srp_rport_reconnect(struct srp_rport *rport) */ for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - if (!ch->target) - break; ret += srp_new_cm_id(ch); } for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - if (!ch->target) - break; for (j = 0; j < target->req_ring_size; ++j) { struct srp_request *req = &ch->req_ring[j]; @@ -1230,8 +1228,6 @@ static int srp_rport_reconnect(struct srp_rport *rport) } for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - if (!ch->target) - break; /* * Whether or not creating a new CM ID succeeded, create a new * QP. This guarantees that all completion callback function @@ -1243,13 +1239,13 @@ static int srp_rport_reconnect(struct srp_rport *rport) for (j = 0; j < target->queue_size; ++j) list_add(&ch->tx_ring[j]->list, &ch->free_tx); } + + target->qp_in_error = false; + for (i = 0; i < target->ch_count; i++) { ch = &target->ch[i]; - if (ret || !ch->target) { - if (i > 1) - ret = 0; + if (ret) break; - } ret = srp_connect_ch(ch, multich); multich = true; } @@ -1842,7 +1838,7 @@ static void srp_process_aer_req(struct srp_rdma_ch *ch, s32 delta = be32_to_cpu(req->req_lim_delta); shost_printk(KERN_ERR, target->scsi_host, PFX - "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); + "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) shost_printk(KERN_ERR, target->scsi_host, PFX @@ -1929,20 +1925,21 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, return; } - if (target->connected && !target->qp_in_error) { + if (ch->connected && !target->qp_in_error) { if (wr_id & LOCAL_INV_WR_ID_MASK) { shost_printk(KERN_ERR, target->scsi_host, PFX - "LOCAL_INV failed with status %d\n", - wc_status); + "LOCAL_INV failed with status %s (%d)\n", + ib_wc_status_msg(wc_status), wc_status); } else if (wr_id & FAST_REG_WR_ID_MASK) { shost_printk(KERN_ERR, target->scsi_host, PFX - "FAST_REG_MR failed status %d\n", - wc_status); + "FAST_REG_MR failed status %s (%d)\n", + ib_wc_status_msg(wc_status), wc_status); } else { shost_printk(KERN_ERR, target->scsi_host, - PFX "failed %s status %d for iu %p\n", + PFX "failed %s status %s (%d) for iu %p\n", send_err ? "send" : "receive", - wc_status, (void *)(uintptr_t)wr_id); + ib_wc_status_msg(wc_status), wc_status, + (void *)(uintptr_t)wr_id); } queue_work(system_long_wq, &target->tl_err_work); } @@ -2034,7 +2031,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) memset(cmd, 0, sizeof *cmd); cmd->opcode = SRP_CMD; - cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); + int_to_scsilun(scmnd->device->lun, &cmd->lun); cmd->tag = tag; memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); @@ -2367,7 +2364,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) case IB_CM_DREQ_RECEIVED: shost_printk(KERN_WARNING, target->scsi_host, PFX "DREQ received - connection closed\n"); - srp_change_conn_state(target, false); + ch->connected = false; if (ib_send_cm_drep(cm_id, NULL, 0)) shost_printk(KERN_ERR, target->scsi_host, PFX "Sending CM DREP failed\n"); @@ -2414,8 +2411,8 @@ srp_change_queue_depth(struct scsi_device *sdev, int qdepth) return scsi_change_queue_depth(sdev, qdepth); } -static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, - unsigned int lun, u8 func) +static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun, + u8 func) { struct srp_target_port *target = ch->target; struct srp_rport *rport = target->rport; @@ -2423,7 +2420,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, struct srp_iu *iu; struct srp_tsk_mgmt *tsk_mgmt; - if (!target->connected || target->qp_in_error) + if (!ch->connected || target->qp_in_error) return -1; init_completion(&ch->tsk_mgmt_done); @@ -2449,7 +2446,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, memset(tsk_mgmt, 0, sizeof *tsk_mgmt); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = cpu_to_be64((u64) lun << 48); + int_to_scsilun(lun, &tsk_mgmt->lun); tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; tsk_mgmt->tsk_mgmt_func = func; tsk_mgmt->task_tag = req_tag; @@ -2563,8 +2560,7 @@ static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - return sprintf(buf, "0x%016llx\n", - (unsigned long long) be64_to_cpu(target->id_ext)); + return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); } static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, @@ -2572,8 +2568,7 @@ static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - return sprintf(buf, "0x%016llx\n", - (unsigned long long) be64_to_cpu(target->ioc_guid)); + return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); } static ssize_t show_service_id(struct device *dev, @@ -2581,8 +2576,7 @@ static ssize_t show_service_id(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - return sprintf(buf, "0x%016llx\n", - (unsigned long long) be64_to_cpu(target->service_id)); + return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id)); } static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, @@ -2773,7 +2767,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) target->state = SRP_TARGET_SCANNING; sprintf(target->target_name, "SRP.T10:%016llX", - (unsigned long long) be64_to_cpu(target->id_ext)); + be64_to_cpu(target->id_ext)); if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) return -ENODEV; @@ -2797,7 +2791,8 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) scsi_scan_target(&target->scsi_host->shost_gendev, 0, target->scsi_id, SCAN_WILD_CARD, 0); - if (!target->connected || target->qp_in_error) { + if (srp_connected_ch(target) < target->ch_count || + target->qp_in_error) { shost_printk(KERN_INFO, target->scsi_host, PFX "SCSI scan failed - removing SCSI host\n"); srp_queue_remove_work(target); @@ -3146,7 +3141,7 @@ static ssize_t srp_create_target(struct device *dev, target_host->transportt = ib_srp_transport_template; target_host->max_channel = 0; target_host->max_id = 1; - target_host->max_lun = SRP_MAX_LUN; + target_host->max_lun = -1LL; target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; target = host_to_target(target_host); @@ -3172,11 +3167,11 @@ static ssize_t srp_create_target(struct device *dev, ret = srp_parse_options(buf, target); if (ret) - goto err; + goto out; ret = scsi_init_shared_tag_map(target_host, target_host->can_queue); if (ret) - goto err; + goto out; target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; @@ -3187,7 +3182,7 @@ static ssize_t srp_create_target(struct device *dev, be64_to_cpu(target->ioc_guid), be64_to_cpu(target->initiator_ext)); ret = -EEXIST; - goto err; + goto out; } if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg && @@ -3208,7 +3203,7 @@ static ssize_t srp_create_target(struct device *dev, spin_lock_init(&target->lock); ret = ib_query_gid(ibdev, host->port, 0, &target->sgid); if (ret) - goto err; + goto out; ret = -ENOMEM; target->ch_count = max_t(unsigned, num_online_nodes(), @@ -3219,7 +3214,7 @@ static ssize_t srp_create_target(struct device *dev, target->ch = kcalloc(target->ch_count, sizeof(*target->ch), GFP_KERNEL); if (!target->ch) - goto err; + goto out; node_idx = 0; for_each_online_node(node) { @@ -3315,9 +3310,6 @@ err_disconnect: } kfree(target->ch); - -err: - scsi_host_put(target_host); goto out; } diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index a611556406ac..17ee3f80ba55 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -54,7 +54,6 @@ enum { SRP_DLID_REDIRECT = 2, SRP_STALE_CONN = 3, - SRP_MAX_LUN = 512, SRP_DEF_SG_TABLESIZE = 12, SRP_DEFAULT_QUEUE_SIZE = 1 << 6, @@ -170,6 +169,7 @@ struct srp_rdma_ch { struct completion tsk_mgmt_done; u8 tsk_mgmt_status; + bool connected; }; /** @@ -214,7 +214,6 @@ struct srp_target_port { __be16 pkey; u32 rq_tmo_jiffies; - bool connected; int zero_req_lim; diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 9b84b4c0a000..4556cd11288e 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -41,6 +41,7 @@ #include <linux/string.h> #include <linux/delay.h> #include <linux/atomic.h> +#include <scsi/scsi_proto.h> #include <scsi/scsi_tcq.h> #include <target/configfs_macros.h> #include <target/target_core_base.h> @@ -476,7 +477,8 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, mad_wc->wc->pkey_index, 0, IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA, - GFP_KERNEL); + GFP_KERNEL, + IB_MGMT_BASE_VERSION); if (IS_ERR(rsp)) goto err_rsp; @@ -2080,6 +2082,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) struct srpt_port *sport = ch->sport; struct srpt_device *sdev = sport->sdev; u32 srp_sq_size = sport->port_attrib.srp_sq_size; + struct ib_cq_init_attr cq_attr = {}; int ret; WARN_ON(ch->rq_size < 1); @@ -2090,8 +2093,9 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) goto out; retry: + cq_attr.cqe = ch->rq_size + srp_sq_size; ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, - ch->rq_size + srp_sq_size, 0); + &cq_attr); if (IS_ERR(ch->cq)) { ret = PTR_ERR(ch->cq); pr_err("failed to create CQ cqe= %d ret= %d\n", diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index 3dae156905de..d85c0c205625 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -245,7 +245,7 @@ struct srpt_send_ioctx { u8 n_rdma; u8 n_rbuf; bool queue_status_only; - u8 sense_data[SCSI_SENSE_BUFFERSIZE]; + u8 sense_data[TRANSPORT_SENSE_BUFFER]; }; /** diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index e1c7e9e51045..fffea87a014f 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -34,6 +34,7 @@ #include <linux/irq.h> #include <linux/msi.h> #include <linux/dma-contiguous.h> +#include <linux/irqdomain.h> #include <asm/irq_remapping.h> #include <asm/io_apic.h> #include <asm/apic.h> @@ -3852,6 +3853,21 @@ union irte { } fields; }; +struct irq_2_irte { + u16 devid; /* Device ID for IRTE table */ + u16 index; /* Index into IRTE table*/ +}; + +struct amd_ir_data { + struct irq_2_irte irq_2_irte; + union irte irte_entry; + union { + struct msi_msg msi_entry; + }; +}; + +static struct irq_chip amd_ir_chip; + #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) #define DTE_IRQ_TABLE_LEN (8ULL << 1) @@ -3945,7 +3961,7 @@ out_unlock: return table; } -static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) +static int alloc_irq_index(u16 devid, int count) { struct irq_remap_table *table; unsigned long flags; @@ -3967,18 +3983,10 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) c = 0; if (c == count) { - struct irq_2_irte *irte_info; - for (; c != 0; --c) table->table[index - c + 1] = IRTE_ALLOCATED; index -= count - 1; - - cfg->remapped = 1; - irte_info = &cfg->irq_2_irte; - irte_info->devid = devid; - irte_info->index = index; - goto out; } } @@ -3991,22 +3999,6 @@ out: return index; } -static int get_irte(u16 devid, int index, union irte *irte) -{ - struct irq_remap_table *table; - unsigned long flags; - - table = get_irq_table(devid, false); - if (!table) - return -ENOMEM; - - spin_lock_irqsave(&table->lock, flags); - irte->val = table->table[index]; - spin_unlock_irqrestore(&table->lock, flags); - - return 0; -} - static int modify_irte(u16 devid, int index, union irte irte) { struct irq_remap_table *table; @@ -4053,243 +4045,316 @@ static void free_irte(u16 devid, int index) iommu_completion_wait(iommu); } -static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, - unsigned int destination, int vector, - struct io_apic_irq_attr *attr) +static int get_devid(struct irq_alloc_info *info) { - struct irq_remap_table *table; - struct irq_2_irte *irte_info; - struct irq_cfg *cfg; - union irte irte; - int ioapic_id; - int index; - int devid; - int ret; - - cfg = irq_cfg(irq); - if (!cfg) - return -EINVAL; - - irte_info = &cfg->irq_2_irte; - ioapic_id = mpc_ioapic_id(attr->ioapic); - devid = get_ioapic_devid(ioapic_id); - - if (devid < 0) - return devid; - - table = get_irq_table(devid, true); - if (table == NULL) - return -ENOMEM; - - index = attr->ioapic_pin; + int devid = -1; - /* Setup IRQ remapping info */ - cfg->remapped = 1; - irte_info->devid = devid; - irte_info->index = index; + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_IOAPIC: + devid = get_ioapic_devid(info->ioapic_id); + break; + case X86_IRQ_ALLOC_TYPE_HPET: + devid = get_hpet_devid(info->hpet_id); + break; + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + devid = get_device_id(&info->msi_dev->dev); + break; + default: + BUG_ON(1); + break; + } - /* Setup IRTE for IOMMU */ - irte.val = 0; - irte.fields.vector = vector; - irte.fields.int_type = apic->irq_delivery_mode; - irte.fields.destination = destination; - irte.fields.dm = apic->irq_dest_mode; - irte.fields.valid = 1; - - ret = modify_irte(devid, index, irte); - if (ret) - return ret; + return devid; +} - /* Setup IOAPIC entry */ - memset(entry, 0, sizeof(*entry)); +static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info) +{ + struct amd_iommu *iommu; + int devid; - entry->vector = index; - entry->mask = 0; - entry->trigger = attr->trigger; - entry->polarity = attr->polarity; + if (!info) + return NULL; - /* - * Mask level triggered irqs. - */ - if (attr->trigger) - entry->mask = 1; + devid = get_devid(info); + if (devid >= 0) { + iommu = amd_iommu_rlookup_table[devid]; + if (iommu) + return iommu->ir_domain; + } - return 0; + return NULL; } -static int set_affinity(struct irq_data *data, const struct cpumask *mask, - bool force) +static struct irq_domain *get_irq_domain(struct irq_alloc_info *info) { - struct irq_2_irte *irte_info; - unsigned int dest, irq; - struct irq_cfg *cfg; - union irte irte; - int err; - - if (!config_enabled(CONFIG_SMP)) - return -1; - - cfg = irqd_cfg(data); - irq = data->irq; - irte_info = &cfg->irq_2_irte; + struct amd_iommu *iommu; + int devid; - if (!cpumask_intersects(mask, cpu_online_mask)) - return -EINVAL; + if (!info) + return NULL; - if (get_irte(irte_info->devid, irte_info->index, &irte)) - return -EBUSY; + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + devid = get_device_id(&info->msi_dev->dev); + if (devid >= 0) { + iommu = amd_iommu_rlookup_table[devid]; + if (iommu) + return iommu->msi_domain; + } + break; + default: + break; + } - if (assign_irq_vector(irq, cfg, mask)) - return -EBUSY; + return NULL; +} - err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); - if (err) { - if (assign_irq_vector(irq, cfg, data->affinity)) - pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq); - return err; - } +struct irq_remap_ops amd_iommu_irq_ops = { + .prepare = amd_iommu_prepare, + .enable = amd_iommu_enable, + .disable = amd_iommu_disable, + .reenable = amd_iommu_reenable, + .enable_faulting = amd_iommu_enable_faulting, + .get_ir_irq_domain = get_ir_irq_domain, + .get_irq_domain = get_irq_domain, +}; - irte.fields.vector = cfg->vector; - irte.fields.destination = dest; +static void irq_remapping_prepare_irte(struct amd_ir_data *data, + struct irq_cfg *irq_cfg, + struct irq_alloc_info *info, + int devid, int index, int sub_handle) +{ + struct irq_2_irte *irte_info = &data->irq_2_irte; + struct msi_msg *msg = &data->msi_entry; + union irte *irte = &data->irte_entry; + struct IO_APIC_route_entry *entry; - modify_irte(irte_info->devid, irte_info->index, irte); + data->irq_2_irte.devid = devid; + data->irq_2_irte.index = index + sub_handle; - if (cfg->move_in_progress) - send_cleanup_vector(cfg); + /* Setup IRTE for IOMMU */ + irte->val = 0; + irte->fields.vector = irq_cfg->vector; + irte->fields.int_type = apic->irq_delivery_mode; + irte->fields.destination = irq_cfg->dest_apicid; + irte->fields.dm = apic->irq_dest_mode; + irte->fields.valid = 1; + + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_IOAPIC: + /* Setup IOAPIC entry */ + entry = info->ioapic_entry; + info->ioapic_entry = NULL; + memset(entry, 0, sizeof(*entry)); + entry->vector = index; + entry->mask = 0; + entry->trigger = info->ioapic_trigger; + entry->polarity = info->ioapic_polarity; + /* Mask level triggered irqs. */ + if (info->ioapic_trigger) + entry->mask = 1; + break; - cpumask_copy(data->affinity, mask); + case X86_IRQ_ALLOC_TYPE_HPET: + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO; + msg->data = irte_info->index; + break; - return 0; + default: + BUG_ON(1); + break; + } } -static int free_irq(int irq) +static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) { - struct irq_2_irte *irte_info; + struct irq_alloc_info *info = arg; + struct irq_data *irq_data; + struct amd_ir_data *data; struct irq_cfg *cfg; + int i, ret, devid; + int index = -1; - cfg = irq_cfg(irq); - if (!cfg) + if (!info) + return -EINVAL; + if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI && + info->type != X86_IRQ_ALLOC_TYPE_MSIX) return -EINVAL; - irte_info = &cfg->irq_2_irte; - - free_irte(irte_info->devid, irte_info->index); + /* + * With IRQ remapping enabled, don't need contiguous CPU vectors + * to support multiple MSI interrupts. + */ + if (info->type == X86_IRQ_ALLOC_TYPE_MSI) + info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; - return 0; -} + devid = get_devid(info); + if (devid < 0) + return -EINVAL; -static void compose_msi_msg(struct pci_dev *pdev, - unsigned int irq, unsigned int dest, - struct msi_msg *msg, u8 hpet_id) -{ - struct irq_2_irte *irte_info; - struct irq_cfg *cfg; - union irte irte; + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); + if (ret < 0) + return ret; - cfg = irq_cfg(irq); - if (!cfg) - return; + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto out_free_parent; - irte_info = &cfg->irq_2_irte; + if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { + if (get_irq_table(devid, true)) + index = info->ioapic_pin; + else + ret = -ENOMEM; + } else { + index = alloc_irq_index(devid, nr_irqs); + } + if (index < 0) { + pr_warn("Failed to allocate IRTE\n"); + kfree(data); + goto out_free_parent; + } - irte.val = 0; - irte.fields.vector = cfg->vector; - irte.fields.int_type = apic->irq_delivery_mode; - irte.fields.destination = dest; - irte.fields.dm = apic->irq_dest_mode; - irte.fields.valid = 1; + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + cfg = irqd_cfg(irq_data); + if (!irq_data || !cfg) { + ret = -EINVAL; + goto out_free_data; + } - modify_irte(irte_info->devid, irte_info->index, irte); + if (i > 0) { + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto out_free_data; + } + irq_data->hwirq = (devid << 16) + i; + irq_data->chip_data = data; + irq_data->chip = &amd_ir_chip; + irq_remapping_prepare_irte(data, cfg, info, devid, index, i); + irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); + } + return 0; - msg->address_hi = MSI_ADDR_BASE_HI; - msg->address_lo = MSI_ADDR_BASE_LO; - msg->data = irte_info->index; +out_free_data: + for (i--; i >= 0; i--) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data) + kfree(irq_data->chip_data); + } + for (i = 0; i < nr_irqs; i++) + free_irte(devid, index + i); +out_free_parent: + irq_domain_free_irqs_common(domain, virq, nr_irqs); + return ret; } -static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) +static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) { - struct irq_cfg *cfg; - int index; - u16 devid; - - if (!pdev) - return -EINVAL; + struct irq_2_irte *irte_info; + struct irq_data *irq_data; + struct amd_ir_data *data; + int i; - cfg = irq_cfg(irq); - if (!cfg) - return -EINVAL; + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + data = irq_data->chip_data; + irte_info = &data->irq_2_irte; + free_irte(irte_info->devid, irte_info->index); + kfree(data); + } + } + irq_domain_free_irqs_common(domain, virq, nr_irqs); +} - devid = get_device_id(&pdev->dev); - index = alloc_irq_index(cfg, devid, nvec); +static void irq_remapping_activate(struct irq_domain *domain, + struct irq_data *irq_data) +{ + struct amd_ir_data *data = irq_data->chip_data; + struct irq_2_irte *irte_info = &data->irq_2_irte; - return index < 0 ? MAX_IRQS_PER_TABLE : index; + modify_irte(irte_info->devid, irte_info->index, data->irte_entry); } -static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, - int index, int offset) +static void irq_remapping_deactivate(struct irq_domain *domain, + struct irq_data *irq_data) { - struct irq_2_irte *irte_info; - struct irq_cfg *cfg; - u16 devid; + struct amd_ir_data *data = irq_data->chip_data; + struct irq_2_irte *irte_info = &data->irq_2_irte; + union irte entry; - if (!pdev) - return -EINVAL; + entry.val = 0; + modify_irte(irte_info->devid, irte_info->index, data->irte_entry); +} - cfg = irq_cfg(irq); - if (!cfg) - return -EINVAL; +static struct irq_domain_ops amd_ir_domain_ops = { + .alloc = irq_remapping_alloc, + .free = irq_remapping_free, + .activate = irq_remapping_activate, + .deactivate = irq_remapping_deactivate, +}; - if (index >= MAX_IRQS_PER_TABLE) - return 0; +static int amd_ir_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct amd_ir_data *ir_data = data->chip_data; + struct irq_2_irte *irte_info = &ir_data->irq_2_irte; + struct irq_cfg *cfg = irqd_cfg(data); + struct irq_data *parent = data->parent_data; + int ret; - devid = get_device_id(&pdev->dev); - irte_info = &cfg->irq_2_irte; + ret = parent->chip->irq_set_affinity(parent, mask, force); + if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) + return ret; - cfg->remapped = 1; - irte_info->devid = devid; - irte_info->index = index + offset; + /* + * Atomically updates the IRTE with the new destination, vector + * and flushes the interrupt entry cache. + */ + ir_data->irte_entry.fields.vector = cfg->vector; + ir_data->irte_entry.fields.destination = cfg->dest_apicid; + modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry); - return 0; + /* + * After this point, all the interrupts will start arriving + * at the new destination. So, time to cleanup the previous + * vector allocation. + */ + send_cleanup_vector(cfg); + + return IRQ_SET_MASK_OK_DONE; } -static int alloc_hpet_msi(unsigned int irq, unsigned int id) +static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) { - struct irq_2_irte *irte_info; - struct irq_cfg *cfg; - int index, devid; + struct amd_ir_data *ir_data = irq_data->chip_data; - cfg = irq_cfg(irq); - if (!cfg) - return -EINVAL; + *msg = ir_data->msi_entry; +} - irte_info = &cfg->irq_2_irte; - devid = get_hpet_devid(id); - if (devid < 0) - return devid; +static struct irq_chip amd_ir_chip = { + .irq_ack = ir_ack_apic_edge, + .irq_set_affinity = amd_ir_set_affinity, + .irq_compose_msi_msg = ir_compose_msi_msg, +}; - index = alloc_irq_index(cfg, devid, 1); - if (index < 0) - return index; +int amd_iommu_create_irq_domain(struct amd_iommu *iommu) +{ + iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu); + if (!iommu->ir_domain) + return -ENOMEM; - cfg->remapped = 1; - irte_info->devid = devid; - irte_info->index = index; + iommu->ir_domain->parent = arch_get_ir_parent_domain(); + iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain); return 0; } - -struct irq_remap_ops amd_iommu_irq_ops = { - .prepare = amd_iommu_prepare, - .enable = amd_iommu_enable, - .disable = amd_iommu_disable, - .reenable = amd_iommu_reenable, - .enable_faulting = amd_iommu_enable_faulting, - .setup_ioapic_entry = setup_ioapic_entry, - .set_affinity = set_affinity, - .free_irq = free_irq, - .compose_msi_msg = compose_msi_msg, - .msi_alloc_irq = msi_alloc_irq, - .msi_setup_irq = msi_setup_irq, - .alloc_hpet_msi = alloc_hpet_msi, -}; #endif diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 450ef5001a65..c17df04d7a7f 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1124,6 +1124,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (ret) return ret; + ret = amd_iommu_create_irq_domain(iommu); + if (ret) + return ret; + /* * Make sure IOMMU is not considered to translate itself. The IVRS * table tells us so, but this is a lie! diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 72b0fd455e24..0a21142d3639 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h @@ -62,6 +62,15 @@ extern u8 amd_iommu_pc_get_max_counters(u16 devid); extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, u64 *value, bool is_write); +#ifdef CONFIG_IRQ_REMAP +extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu); +#else +static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) +{ + return 0; +} +#endif + #define PPR_SUCCESS 0x0 #define PPR_INVALID 0x1 #define PPR_FAILURE 0xf diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 05030e523771..6533e874c9d7 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -398,6 +398,7 @@ struct amd_iommu_fault { struct iommu_domain; +struct irq_domain; /* * This structure contains generic data for IOMMU protection domains @@ -579,6 +580,10 @@ struct amd_iommu { /* The maximum PC banks and counters/bank (PCSup=1) */ u8 max_banks; u8 max_counters; +#ifdef CONFIG_IRQ_REMAP + struct irq_domain *ir_domain; + struct irq_domain *msi_domain; +#endif }; struct devid_map { diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 9847613085e1..536f2d8ea41a 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1087,8 +1087,8 @@ static void free_iommu(struct intel_iommu *iommu) if (iommu->irq) { free_irq(iommu->irq, iommu); - irq_set_handler_data(iommu->irq, NULL); dmar_free_hwirq(iommu->irq); + iommu->irq = 0; } if (iommu->qi) { @@ -1642,23 +1642,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu) if (iommu->irq) return 0; - irq = dmar_alloc_hwirq(); - if (irq <= 0) { + irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu); + if (irq > 0) { + iommu->irq = irq; + } else { pr_err("IOMMU: no free vectors\n"); return -EINVAL; } - irq_set_handler_data(irq, iommu); - iommu->irq = irq; - - ret = arch_setup_dmar_msi(irq); - if (ret) { - irq_set_handler_data(irq, NULL); - iommu->irq = 0; - dmar_free_hwirq(irq); - return ret; - } - ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); if (ret) pr_err("IOMMU: can't request irq\n"); diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 5709ae9c3e77..80f1d1486247 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -8,6 +8,7 @@ #include <linux/irq.h> #include <linux/intel-iommu.h> #include <linux/acpi.h> +#include <linux/irqdomain.h> #include <asm/io_apic.h> #include <asm/smp.h> #include <asm/cpu.h> @@ -17,6 +18,11 @@ #include "irq_remapping.h" +enum irq_mode { + IRQ_REMAPPING, + IRQ_POSTING, +}; + struct ioapic_scope { struct intel_iommu *iommu; unsigned int id; @@ -31,6 +37,22 @@ struct hpet_scope { unsigned int devfn; }; +struct irq_2_iommu { + struct intel_iommu *iommu; + u16 irte_index; + u16 sub_handle; + u8 irte_mask; + enum irq_mode mode; +}; + +struct intel_ir_data { + struct irq_2_iommu irq_2_iommu; + struct irte irte_entry; + union { + struct msi_msg msi_entry; + }; +}; + #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8) @@ -50,43 +72,14 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS]; * the dmar_global_lock. */ static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); +static struct irq_domain_ops intel_ir_domain_ops; static int __init parse_ioapics_under_ir(void); -static struct irq_2_iommu *irq_2_iommu(unsigned int irq) -{ - struct irq_cfg *cfg = irq_cfg(irq); - return cfg ? &cfg->irq_2_iommu : NULL; -} - -static int get_irte(int irq, struct irte *entry) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - int index; - - if (!entry || !irq_iommu) - return -1; - - raw_spin_lock_irqsave(&irq_2_ir_lock, flags); - - if (unlikely(!irq_iommu->iommu)) { - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return -1; - } - - index = irq_iommu->irte_index + irq_iommu->sub_handle; - *entry = *(irq_iommu->iommu->ir_table->base + index); - - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return 0; -} - -static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) +static int alloc_irte(struct intel_iommu *iommu, int irq, + struct irq_2_iommu *irq_iommu, u16 count) { struct ir_table *table = iommu->ir_table; - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - struct irq_cfg *cfg = irq_cfg(irq); unsigned int mask = 0; unsigned long flags; int index; @@ -113,11 +106,11 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) if (index < 0) { pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); } else { - cfg->remapped = 1; irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = 0; irq_iommu->irte_mask = mask; + irq_iommu->mode = IRQ_REMAPPING; } raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); @@ -135,47 +128,9 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) return qi_submit_sync(&desc, iommu); } -static int map_irq_to_irte_handle(int irq, u16 *sub_handle) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - int index; - - if (!irq_iommu) - return -1; - - raw_spin_lock_irqsave(&irq_2_ir_lock, flags); - *sub_handle = irq_iommu->sub_handle; - index = irq_iommu->irte_index; - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return index; -} - -static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) +static int modify_irte(struct irq_2_iommu *irq_iommu, + struct irte *irte_modified) { - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - struct irq_cfg *cfg = irq_cfg(irq); - unsigned long flags; - - if (!irq_iommu) - return -1; - - raw_spin_lock_irqsave(&irq_2_ir_lock, flags); - - cfg->remapped = 1; - irq_iommu->iommu = iommu; - irq_iommu->irte_index = index; - irq_iommu->sub_handle = subhandle; - irq_iommu->irte_mask = 0; - - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return 0; -} - -static int modify_irte(int irq, struct irte *irte_modified) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); struct intel_iommu *iommu; unsigned long flags; struct irte *irte; @@ -196,6 +151,9 @@ static int modify_irte(int irq, struct irte *irte_modified) __iommu_flush_cache(iommu, irte, sizeof(*irte)); rc = qi_flush_iec(iommu, index, 0); + + /* Update iommu mode according to the IRTE mode */ + irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING; raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); return rc; @@ -242,7 +200,7 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) return 0; iommu = irq_iommu->iommu; - index = irq_iommu->irte_index + irq_iommu->sub_handle; + index = irq_iommu->irte_index; start = iommu->ir_table->base + index; end = start + (1 << irq_iommu->irte_mask); @@ -257,29 +215,6 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) return qi_flush_iec(iommu, index, irq_iommu->irte_mask); } -static int free_irte(int irq) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - int rc; - - if (!irq_iommu) - return -1; - - raw_spin_lock_irqsave(&irq_2_ir_lock, flags); - - rc = clear_entries(irq_iommu); - - irq_iommu->iommu = NULL; - irq_iommu->irte_index = 0; - irq_iommu->sub_handle = 0; - irq_iommu->irte_mask = 0; - - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return rc; -} - /* * source validation type */ @@ -488,7 +423,6 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); - if (!pages) { pr_err("IR%d: failed to allocate pages of order %d\n", iommu->seq_id, INTR_REMAP_PAGE_ORDER); @@ -502,11 +436,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) goto out_free_pages; } + iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(), + 0, INTR_REMAP_TABLE_ENTRIES, + NULL, &intel_ir_domain_ops, + iommu); + if (!iommu->ir_domain) { + pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); + goto out_free_bitmap; + } + iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain); + ir_table->base = page_address(pages); ir_table->bitmap = bitmap; iommu->ir_table = ir_table; return 0; +out_free_bitmap: + kfree(bitmap); out_free_pages: __free_pages(pages, INTR_REMAP_PAGE_ORDER); out_free_table: @@ -517,6 +463,14 @@ out_free_table: static void intel_teardown_irq_remapping(struct intel_iommu *iommu) { if (iommu && iommu->ir_table) { + if (iommu->ir_msi_domain) { + irq_domain_remove(iommu->ir_msi_domain); + iommu->ir_msi_domain = NULL; + } + if (iommu->ir_domain) { + irq_domain_remove(iommu->ir_domain); + iommu->ir_domain = NULL; + } free_pages((unsigned long)iommu->ir_table->base, INTR_REMAP_PAGE_ORDER); kfree(iommu->ir_table->bitmap); @@ -627,6 +581,26 @@ error: return -ENODEV; } +/* + * Set Posted-Interrupts capability. + */ +static inline void set_irq_posting_cap(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + + if (!disable_irq_post) { + intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP; + + for_each_iommu(iommu, drhd) + if (!cap_pi_support(iommu->cap)) { + intel_irq_remap_ops.capability &= + ~(1 << IRQ_POSTING_CAP); + break; + } + } +} + static int __init intel_enable_irq_remapping(void) { struct dmar_drhd_unit *drhd; @@ -702,12 +676,7 @@ static int __init intel_enable_irq_remapping(void) irq_remapping_enabled = 1; - /* - * VT-d has a different layout for IO-APIC entries when - * interrupt remapping is enabled. So it needs a special routine - * to print IO-APIC entries for debugging purposes too. - */ - x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries; + set_irq_posting_cap(); pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic"); @@ -909,6 +878,12 @@ static void disable_irq_remapping(void) iommu_disable_irq_remapping(iommu); } + + /* + * Clear Posted-Interrupts capability. + */ + if (!disable_irq_post) + intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP); } static int reenable_irq_remapping(int eim) @@ -936,6 +911,8 @@ static int reenable_irq_remapping(int eim) if (!setup) goto error; + set_irq_posting_cap(); + return 0; error: @@ -945,8 +922,7 @@ error: return -1; } -static void prepare_irte(struct irte *irte, int vector, - unsigned int dest) +static void prepare_irte(struct irte *irte, int vector, unsigned int dest) { memset(irte, 0, sizeof(*irte)); @@ -966,76 +942,63 @@ static void prepare_irte(struct irte *irte, int vector, irte->redir_hint = 1; } -static int intel_setup_ioapic_entry(int irq, - struct IO_APIC_route_entry *route_entry, - unsigned int destination, int vector, - struct io_apic_irq_attr *attr) +static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info) { - int ioapic_id = mpc_ioapic_id(attr->ioapic); - struct intel_iommu *iommu; - struct IR_IO_APIC_route_entry *entry; - struct irte irte; - int index; - - down_read(&dmar_global_lock); - iommu = map_ioapic_to_ir(ioapic_id); - if (!iommu) { - pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); - index = -ENODEV; - } else { - index = alloc_irte(iommu, irq, 1); - if (index < 0) { - pr_warn("Failed to allocate IRTE for ioapic %d\n", - ioapic_id); - index = -ENOMEM; - } - } - up_read(&dmar_global_lock); - if (index < 0) - return index; - - prepare_irte(&irte, vector, destination); + struct intel_iommu *iommu = NULL; - /* Set source-id of interrupt request */ - set_ioapic_sid(&irte, ioapic_id); + if (!info) + return NULL; - modify_irte(irq, &irte); + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_IOAPIC: + iommu = map_ioapic_to_ir(info->ioapic_id); + break; + case X86_IRQ_ALLOC_TYPE_HPET: + iommu = map_hpet_to_ir(info->hpet_id); + break; + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + iommu = map_dev_to_ir(info->msi_dev); + break; + default: + BUG_ON(1); + break; + } - apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " - "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " - "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " - "Avail:%X Vector:%02X Dest:%08X " - "SID:%04X SQ:%X SVT:%X)\n", - attr->ioapic, irte.present, irte.fpd, irte.dst_mode, - irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, - irte.avail, irte.vector, irte.dest_id, - irte.sid, irte.sq, irte.svt); + return iommu ? iommu->ir_domain : NULL; +} - entry = (struct IR_IO_APIC_route_entry *)route_entry; - memset(entry, 0, sizeof(*entry)); +static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info) +{ + struct intel_iommu *iommu; - entry->index2 = (index >> 15) & 0x1; - entry->zero = 0; - entry->format = 1; - entry->index = (index & 0x7fff); - /* - * IO-APIC RTE will be configured with virtual vector. - * irq handler will do the explicit EOI to the io-apic. - */ - entry->vector = attr->ioapic_pin; - entry->mask = 0; /* enable IRQ */ - entry->trigger = attr->trigger; - entry->polarity = attr->polarity; + if (!info) + return NULL; - /* Mask level triggered irqs. - * Use IRQ_DELAYED_DISABLE for edge triggered irqs. - */ - if (attr->trigger) - entry->mask = 1; + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + iommu = map_dev_to_ir(info->msi_dev); + if (iommu) + return iommu->ir_msi_domain; + break; + default: + break; + } - return 0; + return NULL; } +struct irq_remap_ops intel_irq_remap_ops = { + .prepare = intel_prepare_irq_remapping, + .enable = intel_enable_irq_remapping, + .disable = disable_irq_remapping, + .reenable = reenable_irq_remapping, + .enable_faulting = enable_drhd_fault_handling, + .get_ir_irq_domain = intel_get_ir_irq_domain, + .get_irq_domain = intel_get_irq_domain, +}; + /* * Migrate the IO-APIC irq in the presence of intr-remapping. * @@ -1051,170 +1014,282 @@ static int intel_setup_ioapic_entry(int irq, * is used to migrate MSI irq's in the presence of interrupt-remapping. */ static int -intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, - bool force) +intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { + struct intel_ir_data *ir_data = data->chip_data; + struct irte *irte = &ir_data->irte_entry; struct irq_cfg *cfg = irqd_cfg(data); - unsigned int dest, irq = data->irq; - struct irte irte; - int err; - - if (!config_enabled(CONFIG_SMP)) - return -EINVAL; - - if (!cpumask_intersects(mask, cpu_online_mask)) - return -EINVAL; - - if (get_irte(irq, &irte)) - return -EBUSY; - - err = assign_irq_vector(irq, cfg, mask); - if (err) - return err; - - err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); - if (err) { - if (assign_irq_vector(irq, cfg, data->affinity)) - pr_err("Failed to recover vector for irq %d\n", irq); - return err; - } + struct irq_data *parent = data->parent_data; + int ret; - irte.vector = cfg->vector; - irte.dest_id = IRTE_DEST(dest); + ret = parent->chip->irq_set_affinity(parent, mask, force); + if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) + return ret; /* * Atomically updates the IRTE with the new destination, vector * and flushes the interrupt entry cache. */ - modify_irte(irq, &irte); + irte->vector = cfg->vector; + irte->dest_id = IRTE_DEST(cfg->dest_apicid); + + /* Update the hardware only if the interrupt is in remapped mode. */ + if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING) + modify_irte(&ir_data->irq_2_iommu, irte); /* * After this point, all the interrupts will start arriving * at the new destination. So, time to cleanup the previous * vector allocation. */ - if (cfg->move_in_progress) - send_cleanup_vector(cfg); + send_cleanup_vector(cfg); - cpumask_copy(data->affinity, mask); - return 0; + return IRQ_SET_MASK_OK_DONE; } -static void intel_compose_msi_msg(struct pci_dev *pdev, - unsigned int irq, unsigned int dest, - struct msi_msg *msg, u8 hpet_id) +static void intel_ir_compose_msi_msg(struct irq_data *irq_data, + struct msi_msg *msg) { - struct irq_cfg *cfg; - struct irte irte; - u16 sub_handle = 0; - int ir_index; + struct intel_ir_data *ir_data = irq_data->chip_data; - cfg = irq_cfg(irq); + *msg = ir_data->msi_entry; +} - ir_index = map_irq_to_irte_handle(irq, &sub_handle); - BUG_ON(ir_index == -1); +static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info) +{ + struct intel_ir_data *ir_data = data->chip_data; + struct vcpu_data *vcpu_pi_info = info; - prepare_irte(&irte, cfg->vector, dest); + /* stop posting interrupts, back to remapping mode */ + if (!vcpu_pi_info) { + modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry); + } else { + struct irte irte_pi; - /* Set source-id of interrupt request */ - if (pdev) - set_msi_sid(&irte, pdev); - else - set_hpet_sid(&irte, hpet_id); + /* + * We are not caching the posted interrupt entry. We + * copy the data from the remapped entry and modify + * the fields which are relevant for posted mode. The + * cached remapped entry is used for switching back to + * remapped mode. + */ + memset(&irte_pi, 0, sizeof(irte_pi)); + dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry); + + /* Update the posted mode fields */ + irte_pi.p_pst = 1; + irte_pi.p_urgent = 0; + irte_pi.p_vector = vcpu_pi_info->vector; + irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >> + (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT); + irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) & + ~(-1UL << PDA_HIGH_BIT); + + modify_irte(&ir_data->irq_2_iommu, &irte_pi); + } - modify_irte(irq, &irte); + return 0; +} + +static struct irq_chip intel_ir_chip = { + .irq_ack = ir_ack_apic_edge, + .irq_set_affinity = intel_ir_set_affinity, + .irq_compose_msi_msg = intel_ir_compose_msi_msg, + .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity, +}; - msg->address_hi = MSI_ADDR_BASE_HI; - msg->data = sub_handle; - msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | - MSI_ADDR_IR_SHV | - MSI_ADDR_IR_INDEX1(ir_index) | - MSI_ADDR_IR_INDEX2(ir_index); +static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data, + struct irq_cfg *irq_cfg, + struct irq_alloc_info *info, + int index, int sub_handle) +{ + struct IR_IO_APIC_route_entry *entry; + struct irte *irte = &data->irte_entry; + struct msi_msg *msg = &data->msi_entry; + + prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid); + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_IOAPIC: + /* Set source-id of interrupt request */ + set_ioapic_sid(irte, info->ioapic_id); + apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n", + info->ioapic_id, irte->present, irte->fpd, + irte->dst_mode, irte->redir_hint, + irte->trigger_mode, irte->dlvry_mode, + irte->avail, irte->vector, irte->dest_id, + irte->sid, irte->sq, irte->svt); + + entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry; + info->ioapic_entry = NULL; + memset(entry, 0, sizeof(*entry)); + entry->index2 = (index >> 15) & 0x1; + entry->zero = 0; + entry->format = 1; + entry->index = (index & 0x7fff); + /* + * IO-APIC RTE will be configured with virtual vector. + * irq handler will do the explicit EOI to the io-apic. + */ + entry->vector = info->ioapic_pin; + entry->mask = 0; /* enable IRQ */ + entry->trigger = info->ioapic_trigger; + entry->polarity = info->ioapic_polarity; + if (info->ioapic_trigger) + entry->mask = 1; /* Mask level triggered irqs. */ + break; + + case X86_IRQ_ALLOC_TYPE_HPET: + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + if (info->type == X86_IRQ_ALLOC_TYPE_HPET) + set_hpet_sid(irte, info->hpet_id); + else + set_msi_sid(irte, info->msi_dev); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->data = sub_handle; + msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | + MSI_ADDR_IR_SHV | + MSI_ADDR_IR_INDEX1(index) | + MSI_ADDR_IR_INDEX2(index); + break; + + default: + BUG_ON(1); + break; + } } -/* - * Map the PCI dev to the corresponding remapping hardware unit - * and allocate 'nvec' consecutive interrupt-remapping table entries - * in it. - */ -static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) +static void intel_free_irq_resources(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) { - struct intel_iommu *iommu; - int index; + struct irq_data *irq_data; + struct intel_ir_data *data; + struct irq_2_iommu *irq_iommu; + unsigned long flags; + int i; - down_read(&dmar_global_lock); - iommu = map_dev_to_ir(dev); - if (!iommu) { - printk(KERN_ERR - "Unable to map PCI %s to iommu\n", pci_name(dev)); - index = -ENOENT; - } else { - index = alloc_irte(iommu, irq, nvec); - if (index < 0) { - printk(KERN_ERR - "Unable to allocate %d IRTE for PCI %s\n", - nvec, pci_name(dev)); - index = -ENOSPC; + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + data = irq_data->chip_data; + irq_iommu = &data->irq_2_iommu; + raw_spin_lock_irqsave(&irq_2_ir_lock, flags); + clear_entries(irq_iommu); + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); + irq_domain_reset_irq_data(irq_data); + kfree(data); } } - up_read(&dmar_global_lock); - - return index; } -static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, - int index, int sub_handle) +static int intel_irq_remapping_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *arg) { - struct intel_iommu *iommu; - int ret = -ENOENT; + struct intel_iommu *iommu = domain->host_data; + struct irq_alloc_info *info = arg; + struct intel_ir_data *data, *ird; + struct irq_data *irq_data; + struct irq_cfg *irq_cfg; + int i, ret, index; + + if (!info || !iommu) + return -EINVAL; + if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI && + info->type != X86_IRQ_ALLOC_TYPE_MSIX) + return -EINVAL; + + /* + * With IRQ remapping enabled, don't need contiguous CPU vectors + * to support multiple MSI interrupts. + */ + if (info->type == X86_IRQ_ALLOC_TYPE_MSI) + info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; + + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); + if (ret < 0) + return ret; + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto out_free_parent; down_read(&dmar_global_lock); - iommu = map_dev_to_ir(pdev); - if (iommu) { - /* - * setup the mapping between the irq and the IRTE - * base index, the sub_handle pointing to the - * appropriate interrupt remap table entry. - */ - set_irte_irq(irq, iommu, index, sub_handle); - ret = 0; - } + index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs); up_read(&dmar_global_lock); + if (index < 0) { + pr_warn("Failed to allocate IRTE\n"); + kfree(data); + goto out_free_parent; + } + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_cfg = irqd_cfg(irq_data); + if (!irq_data || !irq_cfg) { + ret = -EINVAL; + goto out_free_data; + } + + if (i > 0) { + ird = kzalloc(sizeof(*ird), GFP_KERNEL); + if (!ird) + goto out_free_data; + /* Initialize the common data */ + ird->irq_2_iommu = data->irq_2_iommu; + ird->irq_2_iommu.sub_handle = i; + } else { + ird = data; + } + + irq_data->hwirq = (index << 16) + i; + irq_data->chip_data = ird; + irq_data->chip = &intel_ir_chip; + intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i); + irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); + } + return 0; + +out_free_data: + intel_free_irq_resources(domain, virq, i); +out_free_parent: + irq_domain_free_irqs_common(domain, virq, nr_irqs); return ret; } -static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id) +static void intel_irq_remapping_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) { - int ret = -1; - struct intel_iommu *iommu; - int index; + intel_free_irq_resources(domain, virq, nr_irqs); + irq_domain_free_irqs_common(domain, virq, nr_irqs); +} - down_read(&dmar_global_lock); - iommu = map_hpet_to_ir(id); - if (iommu) { - index = alloc_irte(iommu, irq, 1); - if (index >= 0) - ret = 0; - } - up_read(&dmar_global_lock); +static void intel_irq_remapping_activate(struct irq_domain *domain, + struct irq_data *irq_data) +{ + struct intel_ir_data *data = irq_data->chip_data; - return ret; + modify_irte(&data->irq_2_iommu, &data->irte_entry); } -struct irq_remap_ops intel_irq_remap_ops = { - .prepare = intel_prepare_irq_remapping, - .enable = intel_enable_irq_remapping, - .disable = disable_irq_remapping, - .reenable = reenable_irq_remapping, - .enable_faulting = enable_drhd_fault_handling, - .setup_ioapic_entry = intel_setup_ioapic_entry, - .set_affinity = intel_ioapic_set_affinity, - .free_irq = free_irte, - .compose_msi_msg = intel_compose_msi_msg, - .msi_alloc_irq = intel_msi_alloc_irq, - .msi_setup_irq = intel_msi_setup_irq, - .alloc_hpet_msi = intel_alloc_hpet_msi, +static void intel_irq_remapping_deactivate(struct irq_domain *domain, + struct irq_data *irq_data) +{ + struct intel_ir_data *data = irq_data->chip_data; + struct irte entry; + + memset(&entry, 0, sizeof(entry)); + modify_irte(&data->irq_2_iommu, &entry); +} + +static struct irq_domain_ops intel_ir_domain_ops = { + .alloc = intel_irq_remapping_alloc, + .free = intel_irq_remapping_free, + .activate = intel_irq_remapping_activate, + .deactivate = intel_irq_remapping_deactivate, }; /* @@ -1280,6 +1355,9 @@ int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) return -EINVAL; if (!ecap_ir_support(iommu->ecap)) return 0; + if (irq_remapping_cap(IRQ_POSTING_CAP) && + !cap_pi_support(iommu->cap)) + return -EBUSY; if (insert) { if (!iommu->ir_table) diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 390079ee1350..2d9993062ded 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -6,6 +6,7 @@ #include <linux/msi.h> #include <linux/irq.h> #include <linux/pci.h> +#include <linux/irqdomain.h> #include <asm/hw_irq.h> #include <asm/irq_remapping.h> @@ -21,21 +22,11 @@ int irq_remap_broken; int disable_sourceid_checking; int no_x2apic_optout; +int disable_irq_post = 1; + static int disable_irq_remap; static struct irq_remap_ops *remap_ops; -static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec); -static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, - int index, int sub_handle); -static int set_remapped_irq_affinity(struct irq_data *data, - const struct cpumask *mask, - bool force); - -static bool irq_remapped(struct irq_cfg *cfg) -{ - return (cfg->remapped == 1); -} - static void irq_remapping_disable_io_apic(void) { /* @@ -49,117 +40,9 @@ static void irq_remapping_disable_io_apic(void) disconnect_bsp_APIC(0); } -static int do_setup_msi_irqs(struct pci_dev *dev, int nvec) -{ - int ret, sub_handle, nvec_pow2, index = 0; - unsigned int irq; - struct msi_desc *msidesc; - - msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); - - irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); - if (irq == 0) - return -ENOSPC; - - nvec_pow2 = __roundup_pow_of_two(nvec); - for (sub_handle = 0; sub_handle < nvec; sub_handle++) { - if (!sub_handle) { - index = msi_alloc_remapped_irq(dev, irq, nvec_pow2); - if (index < 0) { - ret = index; - goto error; - } - } else { - ret = msi_setup_remapped_irq(dev, irq + sub_handle, - index, sub_handle); - if (ret < 0) - goto error; - } - ret = setup_msi_irq(dev, msidesc, irq, sub_handle); - if (ret < 0) - goto error; - } - return 0; - -error: - irq_free_hwirqs(irq, nvec); - - /* - * Restore altered MSI descriptor fields and prevent just destroyed - * IRQs from tearing down again in default_teardown_msi_irqs() - */ - msidesc->irq = 0; - - return ret; -} - -static int do_setup_msix_irqs(struct pci_dev *dev, int nvec) -{ - int node, ret, sub_handle, index = 0; - struct msi_desc *msidesc; - unsigned int irq; - - node = dev_to_node(&dev->dev); - sub_handle = 0; - - list_for_each_entry(msidesc, &dev->msi_list, list) { - - irq = irq_alloc_hwirq(node); - if (irq == 0) - return -1; - - if (sub_handle == 0) - ret = index = msi_alloc_remapped_irq(dev, irq, nvec); - else - ret = msi_setup_remapped_irq(dev, irq, index, sub_handle); - - if (ret < 0) - goto error; - - ret = setup_msi_irq(dev, msidesc, irq, 0); - if (ret < 0) - goto error; - - sub_handle += 1; - irq += 1; - } - - return 0; - -error: - irq_free_hwirq(irq); - return ret; -} - -static int irq_remapping_setup_msi_irqs(struct pci_dev *dev, - int nvec, int type) -{ - if (type == PCI_CAP_ID_MSI) - return do_setup_msi_irqs(dev, nvec); - else - return do_setup_msix_irqs(dev, nvec); -} - -static void eoi_ioapic_pin_remapped(int apic, int pin, int vector) -{ - /* - * Intr-remapping uses pin number as the virtual vector - * in the RTE. Actual vector is programmed in - * intr-remapping table entry. Hence for the io-apic - * EOI we use the pin number. - */ - io_apic_eoi(apic, pin); -} - static void __init irq_remapping_modify_x86_ops(void) { x86_io_apic_ops.disable = irq_remapping_disable_io_apic; - x86_io_apic_ops.set_affinity = set_remapped_irq_affinity; - x86_io_apic_ops.setup_entry = setup_ioapic_remapped_entry; - x86_io_apic_ops.eoi_ioapic_pin = eoi_ioapic_pin_remapped; - x86_msi.setup_msi_irqs = irq_remapping_setup_msi_irqs; - x86_msi.setup_hpet_msi = setup_hpet_msi_remapped; - x86_msi.compose_msi_msg = compose_remapped_msi_msg; } static __init int setup_nointremap(char *str) @@ -198,6 +81,15 @@ void set_irq_remapping_broken(void) irq_remap_broken = 1; } +bool irq_remapping_cap(enum irq_remap_cap cap) +{ + if (!remap_ops || disable_irq_post) + return 0; + + return (remap_ops->capability & (1 << cap)); +} +EXPORT_SYMBOL_GPL(irq_remapping_cap); + int __init irq_remapping_prepare(void) { if (disable_irq_remap) @@ -254,113 +146,48 @@ int __init irq_remap_enable_fault_handling(void) return remap_ops->enable_faulting(); } -int setup_ioapic_remapped_entry(int irq, - struct IO_APIC_route_entry *entry, - unsigned int destination, int vector, - struct io_apic_irq_attr *attr) -{ - if (!remap_ops->setup_ioapic_entry) - return -ENODEV; - - return remap_ops->setup_ioapic_entry(irq, entry, destination, - vector, attr); -} - -static int set_remapped_irq_affinity(struct irq_data *data, - const struct cpumask *mask, bool force) -{ - if (!config_enabled(CONFIG_SMP) || !remap_ops->set_affinity) - return 0; - - return remap_ops->set_affinity(data, mask, force); -} - -void free_remapped_irq(int irq) -{ - struct irq_cfg *cfg = irq_cfg(irq); - - if (irq_remapped(cfg) && remap_ops->free_irq) - remap_ops->free_irq(irq); -} - -void compose_remapped_msi_msg(struct pci_dev *pdev, - unsigned int irq, unsigned int dest, - struct msi_msg *msg, u8 hpet_id) -{ - struct irq_cfg *cfg = irq_cfg(irq); - - if (!irq_remapped(cfg)) - native_compose_msi_msg(pdev, irq, dest, msg, hpet_id); - else if (remap_ops->compose_msi_msg) - remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id); -} - -static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) -{ - if (!remap_ops->msi_alloc_irq) - return -ENODEV; - - return remap_ops->msi_alloc_irq(pdev, irq, nvec); -} - -static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, - int index, int sub_handle) -{ - if (!remap_ops->msi_setup_irq) - return -ENODEV; - - return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle); -} - -int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) -{ - int ret; - - if (!remap_ops->alloc_hpet_msi) - return -ENODEV; - - ret = remap_ops->alloc_hpet_msi(irq, id); - if (ret) - return -EINVAL; - - return default_setup_hpet_msi(irq, id); -} - void panic_if_irq_remap(const char *msg) { if (irq_remapping_enabled) panic(msg); } -static void ir_ack_apic_edge(struct irq_data *data) +void ir_ack_apic_edge(struct irq_data *data) { ack_APIC_irq(); } -static void ir_ack_apic_level(struct irq_data *data) +/** + * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU + * device serving request @info + * @info: interrupt allocation information, used to identify the IOMMU device + * + * It's used to get parent irqdomain for HPET and IOAPIC irqdomains. + * Returns pointer to IRQ domain, or NULL on failure. + */ +struct irq_domain * +irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info) { - ack_APIC_irq(); - eoi_ioapic_irq(data->irq, irqd_cfg(data)); -} + if (!remap_ops || !remap_ops->get_ir_irq_domain) + return NULL; -static void ir_print_prefix(struct irq_data *data, struct seq_file *p) -{ - seq_printf(p, " IR-%s", data->chip->name); + return remap_ops->get_ir_irq_domain(info); } -void irq_remap_modify_chip_defaults(struct irq_chip *chip) +/** + * irq_remapping_get_irq_domain - Get the irqdomain serving the request @info + * @info: interrupt allocation information, used to identify the IOMMU device + * + * There will be one PCI MSI/MSIX irqdomain associated with each interrupt + * remapping device, so this interface is used to retrieve the PCI MSI/MSIX + * irqdomain serving request @info. + * Returns pointer to IRQ domain, or NULL on failure. + */ +struct irq_domain * +irq_remapping_get_irq_domain(struct irq_alloc_info *info) { - chip->irq_print_chip = ir_print_prefix; - chip->irq_ack = ir_ack_apic_edge; - chip->irq_eoi = ir_ack_apic_level; - chip->irq_set_affinity = x86_io_apic_ops.set_affinity; -} + if (!remap_ops || !remap_ops->get_irq_domain) + return NULL; -bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip) -{ - if (!irq_remapped(cfg)) - return false; - irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); - irq_remap_modify_chip_defaults(chip); - return true; + return remap_ops->get_irq_domain(info); } diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h index 7c70cc29ffe6..039c7af7b190 100644 --- a/drivers/iommu/irq_remapping.h +++ b/drivers/iommu/irq_remapping.h @@ -24,19 +24,22 @@ #ifdef CONFIG_IRQ_REMAP -struct IO_APIC_route_entry; -struct io_apic_irq_attr; struct irq_data; -struct cpumask; -struct pci_dev; struct msi_msg; +struct irq_domain; +struct irq_alloc_info; extern int irq_remap_broken; extern int disable_sourceid_checking; extern int no_x2apic_optout; extern int irq_remapping_enabled; +extern int disable_irq_post; + struct irq_remap_ops { + /* The supported capabilities */ + int capability; + /* Initializes hardware and makes it ready for remapping interrupts */ int (*prepare)(void); @@ -52,40 +55,23 @@ struct irq_remap_ops { /* Enable fault handling */ int (*enable_faulting)(void); - /* IO-APIC setup routine */ - int (*setup_ioapic_entry)(int irq, struct IO_APIC_route_entry *, - unsigned int, int, - struct io_apic_irq_attr *); - - /* Set the CPU affinity of a remapped interrupt */ - int (*set_affinity)(struct irq_data *data, const struct cpumask *mask, - bool force); - - /* Free an IRQ */ - int (*free_irq)(int); + /* Get the irqdomain associated the IOMMU device */ + struct irq_domain *(*get_ir_irq_domain)(struct irq_alloc_info *); - /* Create MSI msg to use for interrupt remapping */ - void (*compose_msi_msg)(struct pci_dev *, - unsigned int, unsigned int, - struct msi_msg *, u8); - - /* Allocate remapping resources for MSI */ - int (*msi_alloc_irq)(struct pci_dev *, int, int); - - /* Setup the remapped MSI irq */ - int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int); - - /* Setup interrupt remapping for an HPET MSI */ - int (*alloc_hpet_msi)(unsigned int, unsigned int); + /* Get the MSI irqdomain associated with the IOMMU device */ + struct irq_domain *(*get_irq_domain)(struct irq_alloc_info *); }; extern struct irq_remap_ops intel_irq_remap_ops; extern struct irq_remap_ops amd_iommu_irq_ops; +extern void ir_ack_apic_edge(struct irq_data *data); + #else /* CONFIG_IRQ_REMAP */ #define irq_remapping_enabled 0 #define irq_remap_broken 0 +#define disable_irq_post 1 #endif /* CONFIG_IRQ_REMAP */ diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 6de62a96e79c..99b9a9792975 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -30,6 +30,7 @@ config ARM_GIC_V3_ITS config ARM_NVIC bool select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY select GENERIC_IRQ_CHIP config ARM_VIC diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index 5945223b73fa..5c82e3bdafdf 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/syscore_ops.h> #include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/interrupt.h> @@ -34,9 +35,14 @@ struct combiner_chip_data { unsigned int irq_mask; void __iomem *base; unsigned int parent_irq; +#ifdef CONFIG_PM + u32 pm_save; +#endif }; +static struct combiner_chip_data *combiner_data; static struct irq_domain *combiner_irq_domain; +static unsigned int max_nr = 20; static inline void __iomem *combiner_base(struct irq_data *data) { @@ -164,18 +170,16 @@ static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq, return 0; } -static struct irq_domain_ops combiner_irq_domain_ops = { +static const struct irq_domain_ops combiner_irq_domain_ops = { .xlate = combiner_irq_domain_xlate, .map = combiner_irq_domain_map, }; static void __init combiner_init(void __iomem *combiner_base, - struct device_node *np, - unsigned int max_nr) + struct device_node *np) { int i, irq; unsigned int nr_irq; - struct combiner_chip_data *combiner_data; nr_irq = max_nr * IRQ_IN_COMBINER; @@ -201,11 +205,59 @@ static void __init combiner_init(void __iomem *combiner_base, } } +#ifdef CONFIG_PM + +/** + * combiner_suspend - save interrupt combiner state before suspend + * + * Save the interrupt enable set register for all combiner groups since + * the state is lost when the system enters into a sleep state. + * + */ +static int combiner_suspend(void) +{ + int i; + + for (i = 0; i < max_nr; i++) + combiner_data[i].pm_save = + __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET); + + return 0; +} + +/** + * combiner_resume - restore interrupt combiner state after resume + * + * Restore the interrupt enable set register for all combiner groups since + * the state is lost when the system enters into a sleep state on suspend. + * + */ +static void combiner_resume(void) +{ + int i; + + for (i = 0; i < max_nr; i++) { + __raw_writel(combiner_data[i].irq_mask, + combiner_data[i].base + COMBINER_ENABLE_CLEAR); + __raw_writel(combiner_data[i].pm_save, + combiner_data[i].base + COMBINER_ENABLE_SET); + } +} + +#else +#define combiner_suspend NULL +#define combiner_resume NULL +#endif + +static struct syscore_ops combiner_syscore_ops = { + .suspend = combiner_suspend, + .resume = combiner_resume, +}; + static int __init combiner_of_init(struct device_node *np, struct device_node *parent) { void __iomem *combiner_base; - unsigned int max_nr = 20; combiner_base = of_iomap(np, 0); if (!combiner_base) { @@ -219,7 +271,9 @@ static int __init combiner_of_init(struct device_node *np, __func__, max_nr); } - combiner_init(combiner_base, np, max_nr); + combiner_init(combiner_base, np); + + register_syscore_ops(&combiner_syscore_ops); return 0; } diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index daccc8bdbb42..0d3b0fe2f175 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -409,7 +409,7 @@ static struct notifier_block mpic_cascaded_cpu_notifier = { }; #endif /* CONFIG_SMP */ -static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { +static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = { .map = armada_370_xp_mpic_irq_map, .xlate = irq_domain_xlate_onecell, }; diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index a2e8c3f876cb..459bf4429d36 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -339,6 +339,15 @@ static int __init aic5_of_init(struct device_node *node, return 0; } +#define NR_SAMA5D2_IRQS 77 + +static int __init sama5d2_aic5_of_init(struct device_node *node, + struct device_node *parent) +{ + return aic5_of_init(node, parent, NR_SAMA5D2_IRQS); +} +IRQCHIP_DECLARE(sama5d2_aic5, "atmel,sama5d2-aic", sama5d2_aic5_of_init); + #define NR_SAMA5D3_IRQS 48 static int __init sama5d3_aic5_of_init(struct device_node *node, diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c index 5916d6cdafa1..e68c3b60a681 100644 --- a/drivers/irqchip/irq-bcm2835.c +++ b/drivers/irqchip/irq-bcm2835.c @@ -135,7 +135,7 @@ static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr, return 0; } -static struct irq_domain_ops armctrl_ops = { +static const struct irq_domain_ops armctrl_ops = { .xlate = armctrl_xlate }; diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index ad96ebb0c7ab..9448e391cb71 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -24,11 +24,8 @@ int gic_configure_irq(unsigned int irq, unsigned int type, void __iomem *base, void (*sync_access)(void)) { - u32 enablemask = 1 << (irq % 32); - u32 enableoff = (irq / 32) * 4; u32 confmask = 0x2 << ((irq % 16) * 2); u32 confoff = (irq / 16) * 4; - bool enabled = false; u32 val, oldval; int ret = 0; @@ -43,17 +40,6 @@ int gic_configure_irq(unsigned int irq, unsigned int type, val |= confmask; /* - * As recommended by the spec, disable the interrupt before changing - * the configuration - */ - if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { - writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); - if (sync_access) - sync_access(); - enabled = true; - } - - /* * Write back the new configuration, and possibly re-enable * the interrupt. If we tried to write a new configuration and failed, * return an error. @@ -62,9 +48,6 @@ int gic_configure_irq(unsigned int irq, unsigned int type, if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval) ret = -EINVAL; - if (enabled) - writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); - if (sync_access) sync_access(); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 49875adb6b44..c52f7ba205b4 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -658,6 +658,7 @@ static struct irq_chip gic_chip = { .irq_set_affinity = gic_set_affinity, .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .flags = IRQCHIP_SET_TYPE_MASKED, }; #define GIC_ID_NR (1U << gic_data.rdists.id_bits) diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 01999d74bd3a..8d7e1c8b6d56 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -324,6 +324,7 @@ static struct irq_chip gic_chip = { #endif .irq_get_irqchip_state = gic_irq_get_irqchip_state, .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .flags = IRQCHIP_SET_TYPE_MASKED, }; void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 7d6ffb5de84f..0cae45d10695 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -202,6 +202,7 @@ static struct irq_chip hip04_irq_chip = { #ifdef CONFIG_SMP .irq_set_affinity = hip04_irq_set_affinity, #endif + .flags = IRQCHIP_SET_TYPE_MASKED, }; static u16 hip04_get_cpumask(struct hip04_irq_data *intc) diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c index 78e8b3ce5252..81e3cf5b9a1f 100644 --- a/drivers/irqchip/irq-keystone.c +++ b/drivers/irqchip/irq-keystone.c @@ -131,7 +131,7 @@ static int keystone_irq_map(struct irq_domain *h, unsigned int virq, return 0; } -static struct irq_domain_ops keystone_irq_ops = { +static const struct irq_domain_ops keystone_irq_ops = { .map = keystone_irq_map, .xlate = irq_domain_xlate_onecell, }; @@ -184,8 +184,7 @@ static int keystone_irq_probe(struct platform_device *pdev) platform_set_drvdata(pdev, kirq); - irq_set_chained_handler(kirq->irq, keystone_irq_handler); - irq_set_handler_data(kirq->irq, kirq); + irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq); /* clear all source bits */ keystone_irq_writel(kirq, ~0x0); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 269c2354c431..4400edd1a6c7 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -746,7 +746,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, return 0; } -static struct irq_domain_ops gic_irq_domain_ops = { +static const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, .xlate = gic_irq_domain_xlate, }; diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index eaf0a710e98a..15c13039bba2 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -111,7 +111,7 @@ static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq, return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); } -static struct irq_domain_ops sysirq_domain_ops = { +static const struct irq_domain_ops sysirq_domain_ops = { .xlate = mtk_sysirq_domain_xlate, .alloc = mtk_sysirq_domain_alloc, .free = irq_domain_free_irqs_common, @@ -144,7 +144,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node, chip_data->intpol_base = ioremap(res.start, size); if (!chip_data->intpol_base) { pr_err("mtk_sysirq: unable to map sysirq register\n"); - ret = PTR_ERR(chip_data->intpol_base); + ret = -ENXIO; goto out_free; } diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c index e4acf1e3f8e3..04bf97b289cf 100644 --- a/drivers/irqchip/irq-mxs.c +++ b/drivers/irqchip/irq-mxs.c @@ -90,7 +90,7 @@ static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq, return 0; } -static struct irq_domain_ops icoll_irq_domain_ops = { +static const struct irq_domain_ops icoll_irq_domain_ops = { .map = icoll_irq_domain_map, .xlate = irq_domain_xlate_onecell, }; diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index 4ff0805fca01..5fac9100f6cb 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -49,6 +49,31 @@ nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs) handle_IRQ(irq, regs); } +static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct of_phandle_args *irq_data = arg; + + ret = irq_domain_xlate_onecell(domain, irq_data->np, irq_data->args, + irq_data->args_count, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) + irq_map_generic_chip(domain, virq + i, hwirq + i); + + return 0; +} + +static const struct irq_domain_ops nvic_irq_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .alloc = nvic_irq_domain_alloc, + .free = irq_domain_free_irqs_top, +}; + static int __init nvic_of_init(struct device_node *node, struct device_node *parent) { @@ -70,7 +95,8 @@ static int __init nvic_of_init(struct device_node *node, irqs = NVIC_MAX_IRQ; nvic_irq_domain = - irq_domain_add_linear(node, irqs, &irq_generic_chip_ops, NULL); + irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL); + if (!nvic_irq_domain) { pr_warn("Failed to allocate irq domain\n"); return -ENOMEM; diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 9a0767b9c89d..0670ab4e3897 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -347,7 +347,7 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, return 0; } -static struct irq_domain_ops intc_irqpin_irq_domain_ops = { +static const struct irq_domain_ops intc_irqpin_irq_domain_ops = { .map = intc_irqpin_irq_domain_map, .xlate = irq_domain_xlate_twocell, }; diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index cdf80b7794cd..778bd076aeea 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -29,7 +29,6 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/module.h> -#include <linux/platform_data/irq-renesas-irqc.h> #include <linux/pm_runtime.h> #define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */ @@ -62,7 +61,6 @@ struct irqc_priv { void __iomem *iomem; void __iomem *cpu_int_base; struct irqc_irq irq[IRQC_IRQ_MAX]; - struct renesas_irqc_config config; unsigned int number_of_irqs; struct platform_device *pdev; struct irq_chip irq_chip; @@ -168,14 +166,13 @@ static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq, return 0; } -static struct irq_domain_ops irqc_irq_domain_ops = { +static const struct irq_domain_ops irqc_irq_domain_ops = { .map = irqc_irq_domain_map, .xlate = irq_domain_xlate_twocell, }; static int irqc_probe(struct platform_device *pdev) { - struct renesas_irqc_config *pdata = pdev->dev.platform_data; struct irqc_priv *p; struct resource *io; struct resource *irq; @@ -191,10 +188,6 @@ static int irqc_probe(struct platform_device *pdev) goto err0; } - /* deal with driver instance configuration */ - if (pdata) - memcpy(&p->config, pdata, sizeof(*pdata)); - p->pdev = pdev; platform_set_drvdata(pdev, p); @@ -251,8 +244,7 @@ static int irqc_probe(struct platform_device *pdev) irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, - p->number_of_irqs, - p->config.irq_base, + p->number_of_irqs, 0, &irqc_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; @@ -272,13 +264,6 @@ static int irqc_probe(struct platform_device *pdev) dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); - /* warn in case of mismatch if irq base is specified */ - if (p->config.irq_base) { - if (p->config.irq_base != p->irq[0].domain_irq) - dev_warn(&pdev->dev, "irq base mismatch (%d/%d)\n", - p->config.irq_base, p->irq[0].domain_irq); - } - return 0; err3: while (--k >= 0) diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c index c8d373fcd823..e96717f45ea1 100644 --- a/drivers/irqchip/irq-s3c24xx.c +++ b/drivers/irqchip/irq-s3c24xx.c @@ -502,7 +502,7 @@ err: return -EINVAL; } -static struct irq_domain_ops s3c24xx_irq_ops = { +static const struct irq_domain_ops s3c24xx_irq_ops = { .map = s3c24xx_irq_map, .xlate = irq_domain_xlate_twocell, }; @@ -1228,7 +1228,7 @@ static int s3c24xx_irq_xlate_of(struct irq_domain *d, struct device_node *n, return 0; } -static struct irq_domain_ops s3c24xx_irq_ops_of = { +static const struct irq_domain_ops s3c24xx_irq_ops_of = { .map = s3c24xx_irq_map_of, .xlate = s3c24xx_irq_xlate_of, }; diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c index 64155b686081..83d6aa6464ee 100644 --- a/drivers/irqchip/irq-sun4i.c +++ b/drivers/irqchip/irq-sun4i.c @@ -89,7 +89,7 @@ static int sun4i_irq_map(struct irq_domain *d, unsigned int virq, return 0; } -static struct irq_domain_ops sun4i_irq_ops = { +static const struct irq_domain_ops sun4i_irq_ops = { .map = sun4i_irq_map, .xlate = irq_domain_xlate_onecell, }; diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c index 1ab451729a5c..888111b76ea0 100644 --- a/drivers/irqchip/irq-versatile-fpga.c +++ b/drivers/irqchip/irq-versatile-fpga.c @@ -132,7 +132,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq, return 0; } -static struct irq_domain_ops fpga_irqdomain_ops = { +static const struct irq_domain_ops fpga_irqdomain_ops = { .map = fpga_irqdomain_map, .xlate = irq_domain_xlate_onetwocell, }; diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c index 9521057d4744..f5c01cbcc73a 100644 --- a/drivers/irqchip/irq-vf610-mscm-ir.c +++ b/drivers/irqchip/irq-vf610-mscm-ir.c @@ -47,6 +47,7 @@ struct vf610_mscm_ir_chip_data { void __iomem *mscm_ir_base; u16 cpu_mask; u16 saved_irsprc[MSCM_IRSPRC_NUM]; + bool is_nvic; }; static struct vf610_mscm_ir_chip_data *mscm_ir_data; @@ -101,7 +102,7 @@ static void vf610_mscm_ir_enable(struct irq_data *data) writew_relaxed(chip_data->cpu_mask, chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); - irq_chip_unmask_parent(data); + irq_chip_enable_parent(data); } static void vf610_mscm_ir_disable(struct irq_data *data) @@ -111,7 +112,7 @@ static void vf610_mscm_ir_disable(struct irq_data *data) writew_relaxed(0x0, chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); - irq_chip_mask_parent(data); + irq_chip_disable_parent(data); } static struct irq_chip vf610_mscm_ir_irq_chip = { @@ -143,10 +144,17 @@ static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int vi domain->host_data); gic_data.np = domain->parent->of_node; - gic_data.args_count = 3; - gic_data.args[0] = GIC_SPI; - gic_data.args[1] = irq_data->args[0]; - gic_data.args[2] = irq_data->args[1]; + + if (mscm_ir_data->is_nvic) { + gic_data.args_count = 1; + gic_data.args[0] = irq_data->args[0]; + } else { + gic_data.args_count = 3; + gic_data.args[0] = GIC_SPI; + gic_data.args[1] = irq_data->args[0]; + gic_data.args[2] = irq_data->args[1]; + } + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); } @@ -174,10 +182,9 @@ static int __init vf610_mscm_ir_of_init(struct device_node *node, return -ENOMEM; mscm_ir_data->mscm_ir_base = of_io_request_and_map(node, 0, "mscm-ir"); - - if (!mscm_ir_data->mscm_ir_base) { + if (IS_ERR(mscm_ir_data->mscm_ir_base)) { pr_err("vf610_mscm_ir: unable to map mscm register\n"); - ret = -ENOMEM; + ret = PTR_ERR(mscm_ir_data->mscm_ir_base); goto out_free; } @@ -199,6 +206,9 @@ static int __init vf610_mscm_ir_of_init(struct device_node *node, goto out_unmap; } + if (of_device_is_compatible(domain->parent->of_node, "arm,armv7m-nvic")) + mscm_ir_data->is_nvic = true; + cpu_pm_register_notifier(&mscm_ir_notifier_block); return 0; diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 54089debf2dc..d4ce331ea4a0 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -256,7 +256,7 @@ static void __exception_irq_entry vic_handle_irq(struct pt_regs *regs) } while (handled); } -static struct irq_domain_ops vic_irqdomain_ops = { +static const struct irq_domain_ops vic_irqdomain_ops = { .map = vic_irqdomain_map, .xlate = irq_domain_xlate_onetwocell, }; diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c index b7af816f2769..0b297009b856 100644 --- a/drivers/irqchip/irq-vt8500.c +++ b/drivers/irqchip/irq-vt8500.c @@ -173,7 +173,7 @@ static int vt8500_irq_map(struct irq_domain *h, unsigned int virq, return 0; } -static struct irq_domain_ops vt8500_irq_domain_ops = { +static const struct irq_domain_ops vt8500_irq_domain_ops = { .map = vt8500_irq_map, .xlate = irq_domain_xlate_onecell, }; diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 9c145a7cb056..a45121546caf 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -207,8 +207,7 @@ static void __init spear_shirq_register(struct spear_shirq *shirq, if (!shirq->irq_chip) return; - irq_set_chained_handler(parent_irq, shirq_handler); - irq_set_handler_data(parent_irq, shirq); + irq_set_chained_handler_and_data(parent_irq, shirq_handler, shirq); for (i = 0; i < shirq->nr_irqs; i++) { irq_set_chip_and_handler(shirq->virq_base + i, diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 546b7e81161d..aa5dd5668528 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -58,7 +58,7 @@ * About SOFTNET: * Most of the changes were pretty obvious and basically done by HE already. * - * One problem of the isdn net device code is that is uses struct net_device + * One problem of the isdn net device code is that it uses struct net_device * for masters and slaves. However, only master interface are registered to * the network layer, and therefore, it only makes sense to call netif_* * functions on them. diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 728681debdbe..7fb2a19ac649 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -187,6 +187,7 @@ void led_classdev_resume(struct led_classdev *led_cdev) } EXPORT_SYMBOL_GPL(led_classdev_resume); +#ifdef CONFIG_PM_SLEEP static int led_suspend(struct device *dev) { struct led_classdev *led_cdev = dev_get_drvdata(dev); @@ -206,11 +207,9 @@ static int led_resume(struct device *dev) return 0; } +#endif -static const struct dev_pm_ops leds_class_dev_pm_ops = { - .suspend = led_suspend, - .resume = led_resume, -}; +static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume); static int match_name(struct device *dev, const void *data) { diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index 5e7559be222a..eb934b0242e0 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c @@ -20,7 +20,7 @@ #include "lg.h" /* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */ -static unsigned int syscall_vector = SYSCALL_VECTOR; +static unsigned int syscall_vector = IA32_SYSCALL_VECTOR; module_param(syscall_vector, uint, 0444); /* The address of the interrupt handler is split into two bits: */ @@ -333,8 +333,8 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq) */ static bool could_be_syscall(unsigned int num) { - /* Normal Linux SYSCALL_VECTOR or reserved vector? */ - return num == SYSCALL_VECTOR || num == syscall_vector; + /* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */ + return num == IA32_SYSCALL_VECTOR || num == syscall_vector; } /* The syscall vector it wants must be unused by Host. */ @@ -351,7 +351,7 @@ bool check_syscall_vector(struct lguest *lg) int init_interrupts(void) { /* If they want some strange system call vector, reserve it now */ - if (syscall_vector != SYSCALL_VECTOR) { + if (syscall_vector != IA32_SYSCALL_VECTOR) { if (test_bit(syscall_vector, used_vectors) || vector_used_by_percpu_irq(syscall_vector)) { printk(KERN_ERR "lg: couldn't reserve syscall %u\n", @@ -366,7 +366,7 @@ int init_interrupts(void) void free_interrupts(void) { - if (syscall_vector != SYSCALL_VECTOR) + if (syscall_vector != IA32_SYSCALL_VECTOR) clear_bit(syscall_vector, used_vectors); } diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 30f2aef69d78..6a4cd771a2be 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -46,7 +46,7 @@ #include <asm/setup.h> #include <asm/lguest.h> #include <asm/uaccess.h> -#include <asm/i387.h> +#include <asm/fpu/internal.h> #include <asm/tlbflush.h> #include "../lg.h" @@ -251,7 +251,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) * we set it now, so we can trap and pass that trap to the Guest if it * uses the FPU. */ - if (cpu->ts && user_has_fpu()) + if (cpu->ts && fpregs_active()) stts(); /* @@ -283,7 +283,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); /* Clear the host TS bit if it was set above. */ - if (cpu->ts && user_has_fpu()) + if (cpu->ts && fpregs_active()) clts(); /* @@ -297,12 +297,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) /* * Similarly, if we took a trap because the Guest used the FPU, * we have to restore the FPU it expects to see. - * math_state_restore() may sleep and we may even move off to + * fpu__restore() may sleep and we may even move off to * a different CPU. So all the critical stuff should be done * before this. */ - else if (cpu->regs->trapnum == 7 && !user_has_fpu()) - math_state_restore(); + else if (cpu->regs->trapnum == 7 && !fpregs_active()) + fpu__restore(¤t->thread.fpu); } /*H:130 diff --git a/drivers/media/pci/cx25821/cx25821-medusa-reg.h b/drivers/media/pci/cx25821/cx25821-medusa-reg.h index c98ac946b277..2e10643a86b7 100644 --- a/drivers/media/pci/cx25821/cx25821-medusa-reg.h +++ b/drivers/media/pci/cx25821/cx25821-medusa-reg.h @@ -84,9 +84,9 @@ #define ABIST_BIN4_VGA3 0x01D4 #define ABIST_BIN5_VGA4 0x01D8 #define ABIST_BIN6_VGA5 0x01DC -#define ABIST_BIN7_VGA6 0x0x1E0 -#define ABIST_CLAMP_A 0x0x1E4 -#define ABIST_CLAMP_B 0x0x1E8 +#define ABIST_BIN7_VGA6 0x01E0 +#define ABIST_CLAMP_A 0x01E4 +#define ABIST_CLAMP_B 0x01E8 #define ABIST_CLAMP_C 0x01EC #define ABIST_CLAMP_D 0x01F0 #define ABIST_CLAMP_E 0x01F4 diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig index dd6ee57e3a4c..6e5867c57305 100644 --- a/drivers/media/pci/ivtv/Kconfig +++ b/drivers/media/pci/ivtv/Kconfig @@ -57,5 +57,8 @@ config VIDEO_FB_IVTV This is used in the Hauppauge PVR-350 card. There is a driver homepage at <http://www.ivtvdriver.org>. + In order to use this module, you will need to boot with PAT disabled + on x86 systems, using the nopat kernel parameter. + To compile this driver as a module, choose M here: the module will be called ivtvfb. diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 9ff1230192e8..4cb365d4ffdc 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -44,8 +44,8 @@ #include <linux/ivtvfb.h> #include <linux/slab.h> -#ifdef CONFIG_MTRR -#include <asm/mtrr.h> +#ifdef CONFIG_X86_64 +#include <asm/pat.h> #endif #include "ivtv-driver.h" @@ -155,12 +155,11 @@ struct osd_info { /* Buffer size */ u32 video_buffer_size; -#ifdef CONFIG_MTRR /* video_base rounded down as required by hardware MTRRs */ unsigned long fb_start_aligned_physaddr; /* video_base rounded up as required by hardware MTRRs */ unsigned long fb_end_aligned_physaddr; -#endif + int wc_cookie; /* Store the buffer offset */ int set_osd_coords_x; @@ -1099,6 +1098,8 @@ static int ivtvfb_init_vidmode(struct ivtv *itv) static int ivtvfb_init_io(struct ivtv *itv) { struct osd_info *oi = itv->osd_info; + /* Find the largest power of two that maps the whole buffer */ + int size_shift = 31; mutex_lock(&itv->serialize_lock); if (ivtv_init_on_first_open(itv)) { @@ -1132,29 +1133,16 @@ static int ivtvfb_init_io(struct ivtv *itv) oi->video_pbase, oi->video_vbase, oi->video_buffer_size / 1024); -#ifdef CONFIG_MTRR - { - /* Find the largest power of two that maps the whole buffer */ - int size_shift = 31; - - while (!(oi->video_buffer_size & (1 << size_shift))) { - size_shift--; - } - size_shift++; - oi->fb_start_aligned_physaddr = oi->video_pbase & ~((1 << size_shift) - 1); - oi->fb_end_aligned_physaddr = oi->video_pbase + oi->video_buffer_size; - oi->fb_end_aligned_physaddr += (1 << size_shift) - 1; - oi->fb_end_aligned_physaddr &= ~((1 << size_shift) - 1); - if (mtrr_add(oi->fb_start_aligned_physaddr, - oi->fb_end_aligned_physaddr - oi->fb_start_aligned_physaddr, - MTRR_TYPE_WRCOMB, 1) < 0) { - IVTVFB_INFO("disabled mttr\n"); - oi->fb_start_aligned_physaddr = 0; - oi->fb_end_aligned_physaddr = 0; - } - } -#endif - + while (!(oi->video_buffer_size & (1 << size_shift))) + size_shift--; + size_shift++; + oi->fb_start_aligned_physaddr = oi->video_pbase & ~((1 << size_shift) - 1); + oi->fb_end_aligned_physaddr = oi->video_pbase + oi->video_buffer_size; + oi->fb_end_aligned_physaddr += (1 << size_shift) - 1; + oi->fb_end_aligned_physaddr &= ~((1 << size_shift) - 1); + oi->wc_cookie = arch_phys_wc_add(oi->fb_start_aligned_physaddr, + oi->fb_end_aligned_physaddr - + oi->fb_start_aligned_physaddr); /* Blank the entire osd. */ memset_io(oi->video_vbase, 0, oi->video_buffer_size); @@ -1172,14 +1160,7 @@ static void ivtvfb_release_buffers (struct ivtv *itv) /* Release pseudo palette */ kfree(oi->ivtvfb_info.pseudo_palette); - -#ifdef CONFIG_MTRR - if (oi->fb_end_aligned_physaddr) { - mtrr_del(-1, oi->fb_start_aligned_physaddr, - oi->fb_end_aligned_physaddr - oi->fb_start_aligned_physaddr); - } -#endif - + arch_phys_wc_del(oi->wc_cookie); kfree(oi); itv->osd_info = NULL; } @@ -1284,6 +1265,13 @@ static int __init ivtvfb_init(void) int registered = 0; int err; +#ifdef CONFIG_X86_64 + if (WARN(pat_enabled(), + "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) { + return -ENODEV; + } +#endif + if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", IVTV_MAX_CARDS - 1); diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 187f83629f7e..5dcc0313c38a 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -59,10 +59,6 @@ #include <linux/delay.h> #include <linux/interrupt.h> /* needed for in_interrupt() proto */ #include <linux/dma-mapping.h> -#include <asm/io.h> -#ifdef CONFIG_MTRR -#include <asm/mtrr.h> -#endif #include <linux/kthread.h> #include <scsi/scsi_host.h> @@ -2820,13 +2816,6 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc) pci_disable_device(ioc->pcidev); pci_release_selected_regions(ioc->pcidev, ioc->bars); -#if defined(CONFIG_MTRR) && 0 - if (ioc->mtrr_reg > 0) { - mtrr_del(ioc->mtrr_reg, 0, 0); - dprintk(ioc, printk(MYIOC_s_INFO_FMT "MTRR region de-registered\n", ioc->name)); - } -#endif - /* Zap the adapter lookup ptr! */ list_del(&ioc->list); @@ -4512,19 +4501,6 @@ PrimeIocFifos(MPT_ADAPTER *ioc) ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF); -#if defined(CONFIG_MTRR) && 0 - /* - * Enable Write Combining MTRR for IOC's memory region. - * (at least as much as we can; "size and base must be - * multiples of 4 kiB" - */ - ioc->mtrr_reg = mtrr_add(ioc->req_frames_dma, - sz, - MTRR_TYPE_WRCOMB, 1); - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MTRR region registered (base:size=%08x:%x)\n", - ioc->name, ioc->req_frames_dma, sz)); -#endif - for (i = 0; i < ioc->req_depth; i++) { alloc_dma += ioc->req_sz; mem += ioc->req_sz; diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index 8f14090b8b71..813d46311f6a 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -671,7 +671,6 @@ typedef struct _MPT_ADAPTER u8 *HostPageBuffer; /* SAS - host page buffer support */ u32 HostPageBuffer_sz; dma_addr_t HostPageBuffer_dma; - int mtrr_reg; struct pci_dev *pcidev; /* struct pci_dev pointer */ int bars; /* bitmask of BAR's that must be configured */ int msi_enable; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 5bdaae15a742..005a88b9f440 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -4090,7 +4090,7 @@ mptsas_handle_queue_full_event(struct fw_event_work *fw_event) continue; } depth = scsi_track_queue_full(sdev, - current_depth - 1); + sdev->queue_depth - 1); if (depth > 0) sdev_printk(KERN_INFO, sdev, "Queue depth reduced to (%d)\n", @@ -4100,7 +4100,7 @@ mptsas_handle_queue_full_event(struct fw_event_work *fw_event) "Tagged Command Queueing is being " "disabled\n"); else if (depth == 0) - sdev_printk(KERN_INFO, sdev, + sdev_printk(KERN_DEBUG, sdev, "Queue depth not changed yet\n"); } } diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index 58ea9fdd3a15..3591550598ad 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c @@ -566,8 +566,7 @@ static int ucb1x00_probe(struct mcp *mcp) } irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING); - irq_set_handler_data(ucb->irq, ucb); - irq_set_chained_handler(ucb->irq, ucb1x00_irq); + irq_set_chained_handler_and_data(ucb->irq, ucb1x00_irq, ucb); if (pdata && pdata->gpio_base) { ucb->gpio.label = dev_name(&ucb->dev); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 60f7141a6b02..c9c3d20b784b 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -913,6 +913,9 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, if (!err) break; + /* Re-tune if needed */ + mmc_retune_recheck(card->host); + prev_cmd_status_valid = false; pr_err("%s: error %d sending status command, %sing\n", req->rq_disk->disk_name, err, retry ? "retry" : "abort"); @@ -1204,6 +1207,7 @@ static int mmc_blk_err_check(struct mmc_card *card, mmc_active); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mq_mrq->req; + int need_retune = card->host->need_retune; int ecc_err = 0, gen_err = 0; /* @@ -1271,6 +1275,12 @@ static int mmc_blk_err_check(struct mmc_card *card, } if (brq->data.error) { + if (need_retune && !brq->retune_retry_done) { + pr_info("%s: retrying because a re-tune was needed\n", + req->rq_disk->disk_name); + brq->retune_retry_done = 1; + return MMC_BLK_RETRY; + } pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", req->rq_disk->disk_name, brq->data.error, (unsigned)blk_rq_pos(req), @@ -1830,7 +1840,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request *brq = &mq->mqrq_cur->brq; - int ret = 1, disable_multi = 0, retry = 0, type; + int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0; enum mmc_blk_status status; struct mmc_queue_req *mq_rq; struct request *req = rqc; @@ -1910,10 +1920,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) break; case MMC_BLK_CMD_ERR: ret = mmc_blk_cmd_err(md, card, brq, req, ret); - if (!mmc_blk_reset(md, card->host, type)) - break; - goto cmd_abort; + if (mmc_blk_reset(md, card->host, type)) + goto cmd_abort; + if (!ret) + goto start_new_req; + break; case MMC_BLK_RETRY: + retune_retry_done = brq->retune_retry_done; if (retry++ < 5) break; /* Fall through */ @@ -1976,6 +1989,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } + mq_rq->brq.retune_retry_done = retune_retry_done; } } while (ret); @@ -2217,7 +2231,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) * The CSD capacity field is in units of read_blkbits. * set_capacity takes units of 512 bytes. */ - size = card->csd.capacity << (card->csd.read_blkbits - 9); + size = (typeof(sector_t))card->csd.capacity + << (card->csd.read_blkbits - 9); } return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 53b741398b93..b78cf5d403a3 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -268,8 +268,6 @@ static int mmc_test_wait_busy(struct mmc_test_card *test) static int mmc_test_buffer_transfer(struct mmc_test_card *test, u8 *buffer, unsigned addr, unsigned blksz, int write) { - int ret; - struct mmc_request mrq = {0}; struct mmc_command cmd = {0}; struct mmc_command stop = {0}; @@ -292,11 +290,7 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test, if (data.error) return data.error; - ret = mmc_test_wait_busy(test); - if (ret) - return ret; - - return 0; + return mmc_test_wait_busy(test); } static void mmc_test_free_mem(struct mmc_test_mem *mem) @@ -826,9 +820,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); } - done_areq = cur_areq; - cur_areq = other_areq; - other_areq = done_areq; + swap(cur_areq, other_areq); dev_addr += blocks; } @@ -994,11 +986,7 @@ static int mmc_test_basic_write(struct mmc_test_card *test) sg_init_one(&sg, test->buffer, 512); - ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); - if (ret) - return ret; - - return 0; + return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); } static int mmc_test_basic_read(struct mmc_test_card *test) @@ -1012,44 +1000,29 @@ static int mmc_test_basic_read(struct mmc_test_card *test) sg_init_one(&sg, test->buffer, 512); - ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); - if (ret) - return ret; - - return 0; + return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); } static int mmc_test_verify_write(struct mmc_test_card *test) { - int ret; struct scatterlist sg; sg_init_one(&sg, test->buffer, 512); - ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); - if (ret) - return ret; - - return 0; + return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); } static int mmc_test_verify_read(struct mmc_test_card *test) { - int ret; struct scatterlist sg; sg_init_one(&sg, test->buffer, 512); - ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); - if (ret) - return ret; - - return 0; + return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); } static int mmc_test_multi_write(struct mmc_test_card *test) { - int ret; unsigned int size; struct scatterlist sg; @@ -1066,16 +1039,11 @@ static int mmc_test_multi_write(struct mmc_test_card *test) sg_init_one(&sg, test->buffer, size); - ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); - if (ret) - return ret; - - return 0; + return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); } static int mmc_test_multi_read(struct mmc_test_card *test) { - int ret; unsigned int size; struct scatterlist sg; @@ -1092,11 +1060,7 @@ static int mmc_test_multi_read(struct mmc_test_card *test) sg_init_one(&sg, test->buffer, size); - ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); - if (ret) - return ret; - - return 0; + return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); } static int mmc_test_pow2_write(struct mmc_test_card *test) @@ -1263,11 +1227,7 @@ static int mmc_test_xfersize_write(struct mmc_test_card *test) if (ret) return ret; - ret = mmc_test_broken_transfer(test, 1, 512, 1); - if (ret) - return ret; - - return 0; + return mmc_test_broken_transfer(test, 1, 512, 1); } static int mmc_test_xfersize_read(struct mmc_test_card *test) @@ -1278,11 +1238,7 @@ static int mmc_test_xfersize_read(struct mmc_test_card *test) if (ret) return ret; - ret = mmc_test_broken_transfer(test, 1, 512, 0); - if (ret) - return ret; - - return 0; + return mmc_test_broken_transfer(test, 1, 512, 0); } static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) @@ -1296,11 +1252,7 @@ static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) if (ret) return ret; - ret = mmc_test_broken_transfer(test, 2, 512, 1); - if (ret) - return ret; - - return 0; + return mmc_test_broken_transfer(test, 2, 512, 1); } static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) @@ -1314,48 +1266,33 @@ static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) if (ret) return ret; - ret = mmc_test_broken_transfer(test, 2, 512, 0); - if (ret) - return ret; - - return 0; + return mmc_test_broken_transfer(test, 2, 512, 0); } #ifdef CONFIG_HIGHMEM static int mmc_test_write_high(struct mmc_test_card *test) { - int ret; struct scatterlist sg; sg_init_table(&sg, 1); sg_set_page(&sg, test->highmem, 512, 0); - ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); - if (ret) - return ret; - - return 0; + return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); } static int mmc_test_read_high(struct mmc_test_card *test) { - int ret; struct scatterlist sg; sg_init_table(&sg, 1); sg_set_page(&sg, test->highmem, 512, 0); - ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); - if (ret) - return ret; - - return 0; + return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); } static int mmc_test_multi_write_high(struct mmc_test_card *test) { - int ret; unsigned int size; struct scatterlist sg; @@ -1373,16 +1310,11 @@ static int mmc_test_multi_write_high(struct mmc_test_card *test) sg_init_table(&sg, 1); sg_set_page(&sg, test->highmem, size, 0); - ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); - if (ret) - return ret; - - return 0; + return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); } static int mmc_test_multi_read_high(struct mmc_test_card *test) { - int ret; unsigned int size; struct scatterlist sg; @@ -1400,11 +1332,7 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test) sg_init_table(&sg, 1); sg_set_page(&sg, test->highmem, size, 0); - ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); - if (ret) - return ret; - - return 0; + return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); } #else diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 8efa3684aef8..b5a2b145d89f 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -56,7 +56,6 @@ static int mmc_queue_thread(void *d) down(&mq->thread_sem); do { struct request *req = NULL; - struct mmc_queue_req *tmp; unsigned int cmd_flags = 0; spin_lock_irq(q->queue_lock); @@ -69,6 +68,7 @@ static int mmc_queue_thread(void *d) set_current_state(TASK_RUNNING); cmd_flags = req ? req->cmd_flags : 0; mq->issue_fn(mq, req); + cond_resched(); if (mq->flags & MMC_QUEUE_NEW_REQUEST) { mq->flags &= ~MMC_QUEUE_NEW_REQUEST; continue; /* fetch again */ @@ -86,9 +86,7 @@ static int mmc_queue_thread(void *d) mq->mqrq_prev->brq.mrq.data = NULL; mq->mqrq_prev->req = NULL; - tmp = mq->mqrq_prev; - mq->mqrq_prev = mq->mqrq_cur; - mq->mqrq_cur = tmp; + swap(mq->mqrq_prev, mq->mqrq_cur); } else { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 99e6521e6169..36cddab57d77 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -12,6 +12,7 @@ struct mmc_blk_request { struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; + int retune_retry_done; }; enum mmc_packed_type { diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 92e7671426eb..9ad73f30f744 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -133,6 +133,12 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) struct mmc_command *cmd = mrq->cmd; int err = cmd->error; + /* Flag re-tuning needed on CRC errors */ + if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || + (mrq->data && mrq->data->error == -EILSEQ) || + (mrq->stop && mrq->stop->error == -EILSEQ)) + mmc_retune_needed(host); + if (err && cmd->retries && mmc_host_is_spi(host)) { if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND) cmd->retries = 0; @@ -186,12 +192,29 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) EXPORT_SYMBOL(mmc_request_done); +static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) +{ + int err; + + /* Assumes host controller has been runtime resumed by mmc_claim_host */ + err = mmc_retune(host); + if (err) { + mrq->cmd->error = err; + mmc_request_done(host, mrq); + return; + } + + host->ops->request(host, mrq); +} + static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) { #ifdef CONFIG_MMC_DEBUG unsigned int i, sz; struct scatterlist *sg; #endif + mmc_retune_hold(host); + if (mmc_card_removed(host->card)) return -ENOMEDIUM; @@ -252,7 +275,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) } mmc_host_clk_hold(host); led_trigger_event(host->led, LED_FULL); - host->ops->request(host, mrq); + __mmc_start_request(host, mrq); return 0; } @@ -301,12 +324,15 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) use_busy_signal = false; } + mmc_retune_hold(card->host); + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal, true, false); if (err) { pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); + mmc_retune_release(card->host); goto out; } @@ -317,6 +343,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) */ if (!use_busy_signal) mmc_card_set_doing_bkops(card); + else + mmc_retune_release(card->host); out: mmc_release_host(card->host); } @@ -417,22 +445,22 @@ static int mmc_wait_for_data_req_done(struct mmc_host *host, host->areq); break; /* return err */ } else { + mmc_retune_recheck(host); pr_info("%s: req failed (CMD%u): %d, retrying...\n", mmc_hostname(host), cmd->opcode, cmd->error); cmd->retries--; cmd->error = 0; - host->ops->request(host, mrq); + __mmc_start_request(host, mrq); continue; /* wait for done/new event again */ } } else if (context_info->is_new_req) { context_info->is_new_req = false; - if (!next_req) { - err = MMC_BLK_NEW_REQUEST; - break; /* return err */ - } + if (!next_req) + return MMC_BLK_NEW_REQUEST; } } + mmc_retune_release(host); return err; } @@ -467,12 +495,16 @@ static void mmc_wait_for_req_done(struct mmc_host *host, mmc_card_removed(host->card)) break; + mmc_retune_recheck(host); + pr_debug("%s: req failed (CMD%u): %d, retrying...\n", mmc_hostname(host), cmd->opcode, cmd->error); cmd->retries--; cmd->error = 0; - host->ops->request(host, mrq); + __mmc_start_request(host, mrq); } + + mmc_retune_release(host); } /** @@ -728,6 +760,7 @@ int mmc_stop_bkops(struct mmc_card *card) */ if (!err || (err == -EINVAL)) { mmc_card_clr_doing_bkops(card); + mmc_retune_release(card->host); err = 0; } @@ -1109,6 +1142,8 @@ int mmc_execute_tuning(struct mmc_card *card) if (err) pr_err("%s: tuning execution failed\n", mmc_hostname(host)); + else + mmc_retune_enable(host); return err; } @@ -1140,6 +1175,8 @@ void mmc_set_bus_width(struct mmc_host *host, unsigned int width) */ void mmc_set_initial_state(struct mmc_host *host) { + mmc_retune_disable(host); + if (mmc_host_is_spi(host)) host->ios.chip_select = MMC_CS_HIGH; else @@ -1147,6 +1184,7 @@ void mmc_set_initial_state(struct mmc_host *host) host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; + host->ios.drv_type = 0; mmc_set_ios(host); } @@ -1551,8 +1589,8 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) goto power_cycle; } - /* Keep clock gated for at least 5 ms */ - mmc_delay(5); + /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ + mmc_delay(10); host->ios.clock = clock; mmc_set_ios(host); @@ -1601,6 +1639,44 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) mmc_host_clk_release(host); } +int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, + int card_drv_type, int *drv_type) +{ + struct mmc_host *host = card->host; + int host_drv_type = SD_DRIVER_TYPE_B; + int drive_strength; + + *drv_type = 0; + + if (!host->ops->select_drive_strength) + return 0; + + /* Use SD definition of driver strength for hosts */ + if (host->caps & MMC_CAP_DRIVER_TYPE_A) + host_drv_type |= SD_DRIVER_TYPE_A; + + if (host->caps & MMC_CAP_DRIVER_TYPE_C) + host_drv_type |= SD_DRIVER_TYPE_C; + + if (host->caps & MMC_CAP_DRIVER_TYPE_D) + host_drv_type |= SD_DRIVER_TYPE_D; + + /* + * The drive strength that the hardware can support + * depends on the board design. Pass the appropriate + * information and let the hardware specific code + * return what is possible given the options + */ + mmc_host_clk_hold(host); + drive_strength = host->ops->select_drive_strength(card, max_dtr, + host_drv_type, + card_drv_type, + drv_type); + mmc_host_clk_release(host); + + return drive_strength; +} + /* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. @@ -1970,6 +2046,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, unsigned long timeout; int err; + mmc_retune_hold(card->host); + /* * qty is used to calculate the erase timeout which depends on how many * erase groups (or allocation units in SD terminology) are affected. @@ -2073,6 +2151,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)); out: + mmc_retune_release(card->host); return err; } @@ -2331,7 +2410,8 @@ int mmc_hw_reset(struct mmc_host *host) ret = host->bus_ops->reset(host); mmc_bus_put(host); - pr_warn("%s: tried to reset card\n", mmc_hostname(host)); + if (ret != -EOPNOTSUPP) + pr_warn("%s: tried to reset card\n", mmc_hostname(host)); return ret; } diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index cfba3c05aab1..1a22a82209b2 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -50,6 +50,8 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr); int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); void mmc_set_timing(struct mmc_host *host, unsigned int timing); void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); +int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, + int card_drv_type, int *drv_type); void mmc_power_up(struct mmc_host *host, u32 ocr); void mmc_power_off(struct mmc_host *host); void mmc_power_cycle(struct mmc_host *host, u32 ocr); @@ -88,6 +90,8 @@ void mmc_remove_card_debugfs(struct mmc_card *card); void mmc_init_context_info(struct mmc_host *host); int mmc_execute_tuning(struct mmc_card *card); +int mmc_hs200_to_hs400(struct mmc_card *card); +int mmc_hs400_to_hs200(struct mmc_card *card); #endif diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 8be0df758e68..99a9c9011c50 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -301,6 +301,90 @@ static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) #endif +void mmc_retune_enable(struct mmc_host *host) +{ + host->can_retune = 1; + if (host->retune_period) + mod_timer(&host->retune_timer, + jiffies + host->retune_period * HZ); +} + +void mmc_retune_disable(struct mmc_host *host) +{ + host->can_retune = 0; + del_timer_sync(&host->retune_timer); + host->retune_now = 0; + host->need_retune = 0; +} + +void mmc_retune_timer_stop(struct mmc_host *host) +{ + del_timer_sync(&host->retune_timer); +} +EXPORT_SYMBOL(mmc_retune_timer_stop); + +void mmc_retune_hold(struct mmc_host *host) +{ + if (!host->hold_retune) + host->retune_now = 1; + host->hold_retune += 1; +} + +void mmc_retune_release(struct mmc_host *host) +{ + if (host->hold_retune) + host->hold_retune -= 1; + else + WARN_ON(1); +} + +int mmc_retune(struct mmc_host *host) +{ + bool return_to_hs400 = false; + int err; + + if (host->retune_now) + host->retune_now = 0; + else + return 0; + + if (!host->need_retune || host->doing_retune || !host->card) + return 0; + + host->need_retune = 0; + + host->doing_retune = 1; + + if (host->ios.timing == MMC_TIMING_MMC_HS400) { + err = mmc_hs400_to_hs200(host->card); + if (err) + goto out; + + return_to_hs400 = true; + + if (host->ops->prepare_hs400_tuning) + host->ops->prepare_hs400_tuning(host, &host->ios); + } + + err = mmc_execute_tuning(host->card); + if (err) + goto out; + + if (return_to_hs400) + err = mmc_hs200_to_hs400(host->card); +out: + host->doing_retune = 0; + + return err; +} + +static void mmc_retune_timer(unsigned long data) +{ + struct mmc_host *host = (struct mmc_host *)data; + + mmc_retune_needed(host); +} + /** * mmc_of_parse() - parse host's device-tree node * @host: host whose node should be parsed. @@ -400,6 +484,9 @@ int mmc_of_parse(struct mmc_host *host) else if (ret != -ENOENT) return ret; + if (of_property_read_bool(np, "disable-wp")) + host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; + /* See the comment on CD inversion above */ if (ro_cap_invert ^ ro_gpio_invert) host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; @@ -504,6 +591,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) #ifdef CONFIG_PM host->pm_notify.notifier_call = mmc_pm_notify; #endif + setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host); /* * By default, hosts do not support SGIO or large requests. diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index f2ab9e578126..992bf5397633 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -15,5 +15,11 @@ int mmc_register_host_class(void); void mmc_unregister_host_class(void); +void mmc_retune_enable(struct mmc_host *host); +void mmc_retune_disable(struct mmc_host *host); +void mmc_retune_hold(struct mmc_host *host); +void mmc_retune_release(struct mmc_host *host); +int mmc_retune(struct mmc_host *host); + #endif diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index f36c76f8b232..e726903170a8 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -21,6 +21,7 @@ #include <linux/mmc/mmc.h> #include "core.h" +#include "host.h" #include "bus.h" #include "mmc_ops.h" #include "sd_ops.h" @@ -266,8 +267,10 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd) * calculate the enhanced data area offset, in bytes */ card->ext_csd.enhanced_area_offset = - (ext_csd[139] << 24) + (ext_csd[138] << 16) + - (ext_csd[137] << 8) + ext_csd[136]; + (((unsigned long long)ext_csd[139]) << 24) + + (((unsigned long long)ext_csd[138]) << 16) + + (((unsigned long long)ext_csd[137]) << 8) + + (((unsigned long long)ext_csd[136])); if (mmc_card_blockaddr(card)) card->ext_csd.enhanced_area_offset <<= 9; /* @@ -434,6 +437,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->ext_csd.raw_trim_mult = ext_csd[EXT_CSD_TRIM_MULT]; card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; + card->ext_csd.raw_driver_strength = ext_csd[EXT_CSD_DRIVER_STRENGTH]; if (card->ext_csd.rev >= 4) { if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] & EXT_CSD_PART_SETTING_COMPLETED) @@ -1040,6 +1044,7 @@ static int mmc_select_hs400(struct mmc_card *card) { struct mmc_host *host = card->host; int err = 0; + u8 val; /* * HS400 mode requires 8-bit bus width @@ -1055,8 +1060,10 @@ static int mmc_select_hs400(struct mmc_card *card) mmc_set_timing(card->host, MMC_TIMING_MMC_HS); mmc_set_bus_speed(card); + val = EXT_CSD_TIMING_HS | + card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, + EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, true, true, true); if (err) { @@ -1075,8 +1082,10 @@ static int mmc_select_hs400(struct mmc_card *card) return err; } + val = EXT_CSD_TIMING_HS400 | + card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, + EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, true, true, true); if (err) { @@ -1091,6 +1100,115 @@ static int mmc_select_hs400(struct mmc_card *card) return 0; } +int mmc_hs200_to_hs400(struct mmc_card *card) +{ + return mmc_select_hs400(card); +} + +/* Caller must hold re-tuning */ +static int mmc_switch_status(struct mmc_card *card) +{ + u32 status; + int err; + + err = mmc_send_status(card, &status); + if (err) + return err; + + return mmc_switch_status_error(card->host, status); +} + +int mmc_hs400_to_hs200(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + bool send_status = true; + unsigned int max_dtr; + int err; + u8 val; + + if (host->caps & MMC_CAP_WAIT_WHILE_BUSY) + send_status = false; + + /* Reduce frequency to HS */ + max_dtr = card->ext_csd.hs_max_dtr; + mmc_set_clock(host, max_dtr); + + /* Switch HS400 to HS DDR */ + val = EXT_CSD_TIMING_HS | + card->drive_strength << EXT_CSD_DRV_STR_SHIFT; + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, + val, card->ext_csd.generic_cmd6_time, + true, send_status, true); + if (err) + goto out_err; + + mmc_set_timing(host, MMC_TIMING_MMC_DDR52); + + if (!send_status) { + err = mmc_switch_status(card); + if (err) + goto out_err; + } + + /* Switch HS DDR to HS */ + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, + EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time, + true, send_status, true); + if (err) + goto out_err; + + mmc_set_timing(host, MMC_TIMING_MMC_HS); + + if (!send_status) { + err = mmc_switch_status(card); + if (err) + goto out_err; + } + + /* Switch HS to HS200 */ + val = EXT_CSD_TIMING_HS200 | + card->drive_strength << EXT_CSD_DRV_STR_SHIFT; + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, + val, card->ext_csd.generic_cmd6_time, true, + send_status, true); + if (err) + goto out_err; + + mmc_set_timing(host, MMC_TIMING_MMC_HS200); + + if (!send_status) { + err = mmc_switch_status(card); + if (err) + goto out_err; + } + + mmc_set_bus_speed(card); + + return 0; + +out_err: + pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host), + __func__, err); + return err; +} + +static void mmc_select_driver_type(struct mmc_card *card) +{ + int card_drv_type, drive_strength, drv_type; + + card_drv_type = card->ext_csd.raw_driver_strength | + mmc_driver_type_mask(0); + + drive_strength = mmc_select_drive_strength(card, + card->ext_csd.hs200_max_dtr, + card_drv_type, &drv_type); + + card->drive_strength = drive_strength; + + if (drv_type) + mmc_set_driver_type(card->host, drv_type); +} + /* * For device supporting HS200 mode, the following sequence * should be done before executing the tuning process. @@ -1102,6 +1220,7 @@ static int mmc_select_hs200(struct mmc_card *card) { struct mmc_host *host = card->host; int err = -EINVAL; + u8 val; if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V) err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); @@ -1113,14 +1232,18 @@ static int mmc_select_hs200(struct mmc_card *card) if (err) goto err; + mmc_select_driver_type(card); + /* * Set the bus width(4 or 8) with host's support and * switch to HS200 mode if bus width is set successfully. */ err = mmc_select_bus_width(card); if (!IS_ERR_VALUE(err)) { + val = EXT_CSD_TIMING_HS200 | + card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200, + EXT_CSD_HS_TIMING, val, card->ext_csd.generic_cmd6_time, true, true, true); if (!err) @@ -1511,9 +1634,12 @@ static int mmc_sleep(struct mmc_host *host) unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000); int err; + /* Re-tuning can't be done once the card is deselected */ + mmc_retune_hold(host); + err = mmc_deselect_cards(host); if (err) - return err; + goto out_release; cmd.opcode = MMC_SLEEP_AWAKE; cmd.arg = card->rca << 16; @@ -1534,7 +1660,7 @@ static int mmc_sleep(struct mmc_host *host) err = mmc_wait_for_cmd(host, &cmd, 0); if (err) - return err; + goto out_release; /* * If the host does not wait while the card signals busy, then we will @@ -1545,6 +1671,8 @@ static int mmc_sleep(struct mmc_host *host) if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) mmc_delay(timeout_ms); +out_release: + mmc_retune_release(host); return err; } @@ -1782,17 +1910,6 @@ static int mmc_runtime_resume(struct mmc_host *host) return 0; } -static int mmc_power_restore(struct mmc_host *host) -{ - int ret; - - mmc_claim_host(host); - ret = mmc_init_card(host, host->card->ocr, host->card); - mmc_release_host(host); - - return ret; -} - int mmc_can_reset(struct mmc_card *card) { u8 rst_n_function; @@ -1830,7 +1947,7 @@ static int mmc_reset(struct mmc_host *host) mmc_set_initial_state(host); mmc_host_clk_release(host); - return mmc_power_restore(host); + return mmc_init_card(host, card->ocr, card); } static const struct mmc_bus_ops mmc_ops = { @@ -1840,7 +1957,6 @@ static const struct mmc_bus_ops mmc_ops = { .resume = mmc_resume, .runtime_suspend = mmc_runtime_suspend, .runtime_resume = mmc_runtime_resume, - .power_restore = mmc_power_restore, .alive = mmc_alive, .shutdown = mmc_shutdown, .reset = mmc_reset, diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 0ea042dc7443..0e9ae1c276c8 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -19,6 +19,7 @@ #include <linux/mmc/mmc.h> #include "core.h" +#include "host.h" #include "mmc_ops.h" #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ @@ -449,6 +450,21 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc) return err; } +int mmc_switch_status_error(struct mmc_host *host, u32 status) +{ + if (mmc_host_is_spi(host)) { + if (status & R1_SPI_ILLEGAL_COMMAND) + return -EBADMSG; + } else { + if (status & 0xFDFFA000) + pr_warn("%s: unexpected status %#x after switch\n", + mmc_hostname(host), status); + if (status & R1_SWITCH_ERROR) + return -EBADMSG; + } + return 0; +} + /** * __mmc_switch - modify EXT_CSD register * @card: the MMC card associated with the data transfer @@ -474,6 +490,8 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, u32 status = 0; bool use_r1b_resp = use_busy_signal; + mmc_retune_hold(host); + /* * If the cmd timeout and the max_busy_timeout of the host are both * specified, let's validate them. A failure means we need to prevent @@ -506,11 +524,11 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) - return err; + goto out; /* No need to check card status in case of unblocking command */ if (!use_busy_signal) - return 0; + goto out; /* * CRC errors shall only be ignored in cases were CMD13 is used to poll @@ -529,7 +547,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (send_status) { err = __mmc_send_status(card, &status, ignore_crc); if (err) - return err; + goto out; } if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) break; @@ -543,29 +561,23 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, */ if (!send_status) { mmc_delay(timeout_ms); - return 0; + goto out; } /* Timeout if the device never leaves the program state. */ if (time_after(jiffies, timeout)) { pr_err("%s: Card stuck in programming state! %s\n", mmc_hostname(host), __func__); - return -ETIMEDOUT; + err = -ETIMEDOUT; + goto out; } } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); - if (mmc_host_is_spi(host)) { - if (status & R1_SPI_ILLEGAL_COMMAND) - return -EBADMSG; - } else { - if (status & 0xFDFFA000) - pr_warn("%s: unexpected status %#x after switch\n", - mmc_hostname(host), status); - if (status & R1_SWITCH_ERROR) - return -EBADMSG; - } + err = mmc_switch_status_error(host, status); +out: + mmc_retune_release(host); - return 0; + return err; } EXPORT_SYMBOL_GPL(__mmc_switch); diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 6f4b00ed93de..f498f9ae21f0 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -27,6 +27,7 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc); int mmc_bus_test(struct mmc_card *card, u8 bus_width); int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status); int mmc_can_ext_csd(struct mmc_card *card); +int mmc_switch_status_error(struct mmc_host *host, u32 status); #endif diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 31a9ef256d06..4e7366ab187f 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -386,64 +386,31 @@ out: static int sd_select_driver_type(struct mmc_card *card, u8 *status) { - int host_drv_type = SD_DRIVER_TYPE_B; - int card_drv_type = SD_DRIVER_TYPE_B; - int drive_strength; + int card_drv_type, drive_strength, drv_type; int err; - /* - * If the host doesn't support any of the Driver Types A,C or D, - * or there is no board specific handler then default Driver - * Type B is used. - */ - if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C - | MMC_CAP_DRIVER_TYPE_D))) - return 0; - - if (!card->host->ops->select_drive_strength) - return 0; - - if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) - host_drv_type |= SD_DRIVER_TYPE_A; - - if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) - host_drv_type |= SD_DRIVER_TYPE_C; - - if (card->host->caps & MMC_CAP_DRIVER_TYPE_D) - host_drv_type |= SD_DRIVER_TYPE_D; - - if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) - card_drv_type |= SD_DRIVER_TYPE_A; - - if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) - card_drv_type |= SD_DRIVER_TYPE_C; + card->drive_strength = 0; - if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D) - card_drv_type |= SD_DRIVER_TYPE_D; + card_drv_type = card->sw_caps.sd3_drv_type | SD_DRIVER_TYPE_B; - /* - * The drive strength that the hardware can support - * depends on the board design. Pass the appropriate - * information and let the hardware specific code - * return what is possible given the options - */ - mmc_host_clk_hold(card->host); - drive_strength = card->host->ops->select_drive_strength( - card->sw_caps.uhs_max_dtr, - host_drv_type, card_drv_type); - mmc_host_clk_release(card->host); - - err = mmc_sd_switch(card, 1, 2, drive_strength, status); - if (err) - return err; + drive_strength = mmc_select_drive_strength(card, + card->sw_caps.uhs_max_dtr, + card_drv_type, &drv_type); - if ((status[15] & 0xF) != drive_strength) { - pr_warn("%s: Problem setting drive strength!\n", - mmc_hostname(card->host)); - return 0; + if (drive_strength) { + err = mmc_sd_switch(card, 1, 2, drive_strength, status); + if (err) + return err; + if ((status[15] & 0xF) != drive_strength) { + pr_warn("%s: Problem setting drive strength!\n", + mmc_hostname(card->host)); + return 0; + } + card->drive_strength = drive_strength; } - mmc_set_driver_type(card->host, drive_strength); + if (drv_type) + mmc_set_driver_type(card->host, drv_type); return 0; } @@ -804,6 +771,28 @@ int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card) return 0; } +static int mmc_sd_get_ro(struct mmc_host *host) +{ + int ro; + + /* + * Some systems don't feature a write-protect pin and don't need one. + * E.g. because they only have micro-SD card slot. For those systems + * assume that the SD card is always read-write. + */ + if (host->caps2 & MMC_CAP2_NO_WRITE_PROTECT) + return 0; + + if (!host->ops->get_ro) + return -1; + + mmc_host_clk_hold(host); + ro = host->ops->get_ro(host); + mmc_host_clk_release(host); + + return ro; +} + int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, bool reinit) { @@ -855,13 +844,7 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, * Check if read-only switch is active. */ if (!reinit) { - int ro = -1; - - if (host->ops->get_ro) { - mmc_host_clk_hold(card->host); - ro = host->ops->get_ro(host); - mmc_host_clk_release(card->host); - } + int ro = mmc_sd_get_ro(host); if (ro < 0) { pr_warn("%s: host does not support reading read-only switch, assuming write-enable\n", @@ -1181,21 +1164,10 @@ static int mmc_sd_runtime_resume(struct mmc_host *host) return 0; } -static int mmc_sd_power_restore(struct mmc_host *host) -{ - int ret; - - mmc_claim_host(host); - ret = mmc_sd_init_card(host, host->card->ocr, host->card); - mmc_release_host(host); - - return ret; -} - static int mmc_sd_reset(struct mmc_host *host) { mmc_power_cycle(host, host->card->ocr); - return mmc_sd_power_restore(host); + return mmc_sd_init_card(host, host->card->ocr, host->card); } static const struct mmc_bus_ops mmc_sd_ops = { @@ -1205,7 +1177,6 @@ static const struct mmc_bus_ops mmc_sd_ops = { .runtime_resume = mmc_sd_runtime_resume, .suspend = mmc_sd_suspend, .resume = mmc_sd_resume, - .power_restore = mmc_sd_power_restore, .alive = mmc_sd_alive, .shutdown = mmc_sd_suspend, .reset = mmc_sd_reset, diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 5bc6c7dbbd60..b91abedcfdca 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -402,69 +402,38 @@ static unsigned char host_drive_to_sdio_drive(int host_strength) static void sdio_select_driver_type(struct mmc_card *card) { - int host_drv_type = SD_DRIVER_TYPE_B; - int card_drv_type = SD_DRIVER_TYPE_B; - int drive_strength; + int card_drv_type, drive_strength, drv_type; unsigned char card_strength; int err; - /* - * If the host doesn't support any of the Driver Types A,C or D, - * or there is no board specific handler then default Driver - * Type B is used. - */ - if (!(card->host->caps & - (MMC_CAP_DRIVER_TYPE_A | - MMC_CAP_DRIVER_TYPE_C | - MMC_CAP_DRIVER_TYPE_D))) - return; - - if (!card->host->ops->select_drive_strength) - return; - - if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) - host_drv_type |= SD_DRIVER_TYPE_A; - - if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) - host_drv_type |= SD_DRIVER_TYPE_C; + card->drive_strength = 0; - if (card->host->caps & MMC_CAP_DRIVER_TYPE_D) - host_drv_type |= SD_DRIVER_TYPE_D; + card_drv_type = card->sw_caps.sd3_drv_type | SD_DRIVER_TYPE_B; - if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) - card_drv_type |= SD_DRIVER_TYPE_A; - - if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) - card_drv_type |= SD_DRIVER_TYPE_C; - - if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D) - card_drv_type |= SD_DRIVER_TYPE_D; - - /* - * The drive strength that the hardware can support - * depends on the board design. Pass the appropriate - * information and let the hardware specific code - * return what is possible given the options - */ - drive_strength = card->host->ops->select_drive_strength( - card->sw_caps.uhs_max_dtr, - host_drv_type, card_drv_type); + drive_strength = mmc_select_drive_strength(card, + card->sw_caps.uhs_max_dtr, + card_drv_type, &drv_type); - /* if error just use default for drive strength B */ - err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0, - &card_strength); - if (err) - return; + if (drive_strength) { + /* if error just use default for drive strength B */ + err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0, + &card_strength); + if (err) + return; - card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT); - card_strength |= host_drive_to_sdio_drive(drive_strength); + card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT); + card_strength |= host_drive_to_sdio_drive(drive_strength); - err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH, - card_strength, NULL); + /* if error default to drive strength B */ + err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH, + card_strength, NULL); + if (err) + return; + card->drive_strength = drive_strength; + } - /* if error default to drive strength B */ - if (!err) - mmc_set_driver_type(card->host, drive_strength); + if (drv_type) + mmc_set_driver_type(card->host, drv_type); } @@ -934,8 +903,12 @@ static int mmc_sdio_suspend(struct mmc_host *host) mmc_release_host(host); } - if (!mmc_card_keep_power(host)) + if (!mmc_card_keep_power(host)) { mmc_power_off(host); + } else if (host->retune_period) { + mmc_retune_timer_stop(host); + mmc_retune_needed(host); + } return 0; } @@ -1056,6 +1029,12 @@ static int mmc_sdio_runtime_resume(struct mmc_host *host) return mmc_sdio_power_restore(host); } +static int mmc_sdio_reset(struct mmc_host *host) +{ + mmc_power_cycle(host, host->card->ocr); + return mmc_sdio_power_restore(host); +} + static const struct mmc_bus_ops mmc_sdio_ops = { .remove = mmc_sdio_remove, .detect = mmc_sdio_detect, @@ -1066,6 +1045,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = { .runtime_resume = mmc_sdio_runtime_resume, .power_restore = mmc_sdio_power_restore, .alive = mmc_sdio_alive, + .reset = mmc_sdio_reset, }; diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index bee02e644d62..7e327a6dd53d 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -137,6 +137,10 @@ static int sdio_bus_probe(struct device *dev) if (!id) return -ENODEV; + ret = dev_pm_domain_attach(dev, false); + if (ret == -EPROBE_DEFER) + return ret; + /* Unbound SDIO functions are always suspended. * During probe, the function is set active and the usage count * is incremented. If the driver supports runtime PM, @@ -166,6 +170,7 @@ static int sdio_bus_probe(struct device *dev) disable_runtimepm: if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) pm_runtime_put_noidle(dev); + dev_pm_domain_detach(dev, false); return ret; } @@ -197,6 +202,8 @@ static int sdio_bus_remove(struct device *dev) if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) pm_runtime_put_sync(dev); + dev_pm_domain_detach(dev, false); + return ret; } @@ -316,10 +323,8 @@ int sdio_add_func(struct sdio_func *func) sdio_set_of_node(func); sdio_acpi_set_handle(func); ret = device_add(&func->dev); - if (ret == 0) { + if (ret == 0) sdio_func_set_present(func); - dev_pm_domain_attach(&func->dev, false); - } return ret; } @@ -335,7 +340,6 @@ void sdio_remove_func(struct sdio_func *func) if (!sdio_func_present(func)) return; - dev_pm_domain_detach(&func->dev, false); device_del(&func->dev); of_node_put(func->dev.of_node); put_device(&func->dev); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index b1f837e749fe..fd9a58e216a5 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -219,6 +219,7 @@ config MMC_SDHCI_SIRF tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs" depends on ARCH_SIRF depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS help This selects the SDHCI support for SiRF System-on-Chip devices. @@ -775,3 +776,11 @@ config MMC_TOSHIBA_PCI tristate "Toshiba Type A SD/MMC Card Interface Driver" depends on PCI help + +config MMC_MTK + tristate "MediaTek SD/MMC Card Interface support" + help + This selects the MediaTek(R) Secure digital and Multimedia card Interface. + If you have a machine with a integrated SD/MMC card reader, say Y or M here. + This is needed if support for any SD/SDIO/MMC devices is required. + If unsure, say N. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index e3ab5b968651..e928d61c5f4b 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o obj-$(CONFIG_MMC_WBSD) += wbsd.o obj-$(CONFIG_MMC_AU1X) += au1xmmc.o +obj-$(CONFIG_MMC_MTK) += mtk-sd.o obj-$(CONFIG_MMC_OMAP) += omap.o obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 1625f908dc70..ea2a2ebc6b91 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1161,7 +1161,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host) mmc_davinci_reset_ctrl(host, 0); } -static struct platform_device_id davinci_mmc_devtype[] = { +static const struct platform_device_id davinci_mmc_devtype[] = { { .name = "dm6441-mmc", .driver_data = MMC_CTLR_VERSION_1, diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index e761eb1b1441..1e75309898b7 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -556,4 +556,4 @@ module_platform_driver(dw_mci_exynos_pltfm_driver); MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension"); MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:dwmmc-exynos"); +MODULE_ALIAS("platform:dwmmc_exynos"); diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 650f9cc3f7a6..63c2e2ed1288 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -8,16 +8,30 @@ * (at your option) any later version. */ -#include <linux/module.h> -#include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/mfd/syscon.h> #include <linux/mmc/host.h> #include <linux/mmc/dw_mmc.h> +#include <linux/module.h> #include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include "dw_mmc.h" #include "dw_mmc-pltfm.h" +/* + * hi6220 sd only support io voltage 1.8v and 3v + * Also need config AO_SCTRL_SEL18 accordingly + */ +#define AO_SCTRL_SEL18 BIT(10) +#define AO_SCTRL_CTRL3 0x40C + +struct k3_priv { + struct regmap *reg; +}; + static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios) { int ret; @@ -33,8 +47,93 @@ static const struct dw_mci_drv_data k3_drv_data = { .set_ios = dw_mci_k3_set_ios, }; +static int dw_mci_hi6220_parse_dt(struct dw_mci *host) +{ + struct k3_priv *priv; + + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->reg = syscon_regmap_lookup_by_phandle(host->dev->of_node, + "hisilicon,peripheral-syscon"); + if (IS_ERR(priv->reg)) + priv->reg = NULL; + + host->priv = priv; + return 0; +} + +static int dw_mci_hi6220_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct k3_priv *priv; + struct dw_mci *host; + int min_uv, max_uv; + int ret; + + host = slot->host; + priv = host->priv; + + if (!priv || !priv->reg) + return 0; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + ret = regmap_update_bits(priv->reg, AO_SCTRL_CTRL3, + AO_SCTRL_SEL18, 0); + min_uv = 3000000; + max_uv = 3000000; + } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + ret = regmap_update_bits(priv->reg, AO_SCTRL_CTRL3, + AO_SCTRL_SEL18, AO_SCTRL_SEL18); + min_uv = 1800000; + max_uv = 1800000; + } else { + dev_dbg(host->dev, "voltage not supported\n"); + return -EINVAL; + } + + if (ret) { + dev_dbg(host->dev, "switch voltage failed\n"); + return ret; + } + + if (IS_ERR_OR_NULL(mmc->supply.vqmmc)) + return 0; + + ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv); + if (ret) { + dev_dbg(host->dev, "Regulator set error %d: %d - %d\n", + ret, min_uv, max_uv); + return ret; + } + + return 0; +} + +static void dw_mci_hi6220_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + int ret; + unsigned int clock; + + clock = (ios->clock <= 25000000) ? 25000000 : ios->clock; + + ret = clk_set_rate(host->biu_clk, clock); + if (ret) + dev_warn(host->dev, "failed to set rate %uHz\n", clock); + + host->bus_hz = clk_get_rate(host->biu_clk); +} + +static const struct dw_mci_drv_data hi6220_data = { + .switch_voltage = dw_mci_hi6220_switch_voltage, + .set_ios = dw_mci_hi6220_set_ios, + .parse_dt = dw_mci_hi6220_parse_dt, +}; + static const struct of_device_id dw_mci_k3_match[] = { { .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, }, + { .compatible = "hisilicon,hi6220-dw-mshc", .data = &hi6220_data, }, {}, }; MODULE_DEVICE_TABLE(of, dw_mci_k3_match); @@ -94,4 +193,4 @@ module_platform_driver(dw_mci_k3_pltfm_driver); MODULE_DESCRIPTION("K3 Specific DW-MSHC Driver Extension"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:dwmmc-k3"); +MODULE_ALIAS("platform:dwmmc_k3"); diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index dbf166f94f1b..de15121bba7d 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c @@ -153,5 +153,5 @@ module_platform_driver(dw_mci_rockchip_pltfm_driver); MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>"); MODULE_DESCRIPTION("Rockchip Specific DW-MSHC Driver Extension"); -MODULE_ALIAS("platform:dwmmc-rockchip"); +MODULE_ALIAS("platform:dwmmc_rockchip"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 5f5adafb253a..40e9d8e45f25 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -1236,11 +1236,15 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) { struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; u32 uhs; u32 v18 = SDMMC_UHS_18V << slot->id; int min_uv, max_uv; int ret; + if (drv_data && drv_data->switch_voltage) + return drv_data->switch_voltage(mmc, ios); + /* * Program the voltage. Note that some instances of dw_mmc may use * the UHS_REG for this. For other instances (like exynos) the UHS_REG @@ -1278,10 +1282,7 @@ static int dw_mci_get_ro(struct mmc_host *mmc) int gpio_ro = mmc_gpio_get_ro(mmc); /* Use platform get_ro function, else try on board write protect */ - if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) || - (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT)) - read_only = 0; - else if (!IS_ERR_VALUE(gpio_ro)) + if (!IS_ERR_VALUE(gpio_ro)) read_only = gpio_ro; else read_only = @@ -2280,9 +2281,10 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } #ifdef CONFIG_OF -/* given a slot id, find out the device node representing that slot */ -static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) +/* given a slot, find out the device node representing that slot */ +static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot) { + struct device *dev = slot->mmc->parent; struct device_node *np; const __be32 *addr; int len; @@ -2294,42 +2296,28 @@ static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) addr = of_get_property(np, "reg", &len); if (!addr || (len < sizeof(int))) continue; - if (be32_to_cpup(addr) == slot) + if (be32_to_cpup(addr) == slot->id) return np; } return NULL; } -static struct dw_mci_of_slot_quirks { - char *quirk; - int id; -} of_slot_quirks[] = { - { - .quirk = "disable-wp", - .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT, - }, -}; - -static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) +static void dw_mci_slot_of_parse(struct dw_mci_slot *slot) { - struct device_node *np = dw_mci_of_find_slot_node(dev, slot); - int quirks = 0; - int idx; + struct device_node *np = dw_mci_of_find_slot_node(slot); - /* get quirks */ - for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++) - if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) { - dev_warn(dev, "Slot quirk %s is deprecated\n", - of_slot_quirks[idx].quirk); - quirks |= of_slot_quirks[idx].id; - } + if (!np) + return; - return quirks; + if (of_property_read_bool(np, "disable-wp")) { + slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; + dev_warn(slot->mmc->parent, + "Slot quirk 'disable-wp' is deprecated\n"); + } } #else /* CONFIG_OF */ -static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) +static void dw_mci_slot_of_parse(struct dw_mci_slot *slot) { - return 0; } #endif /* CONFIG_OF */ @@ -2352,8 +2340,6 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) slot->host = host; host->slot[id] = slot; - slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id); - mmc->ops = &dw_mci_ops; if (of_property_read_u32_array(host->dev->of_node, "clock-freq-min-max", freq, 2)) { @@ -2391,6 +2377,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) if (host->pdata->caps2) mmc->caps2 = host->pdata->caps2; + dw_mci_slot_of_parse(slot); + ret = mmc_of_parse(mmc); if (ret) goto err_host_allocated; @@ -2618,9 +2606,6 @@ static struct dw_mci_of_quirks { { .quirk = "broken-cd", .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, - }, { - .quirk = "disable-wp", - .id = DW_MCI_QUIRK_NO_WRITE_PROTECT, }, }; @@ -2941,15 +2926,15 @@ void dw_mci_remove(struct dw_mci *host) { int i; - mci_writel(host, RINTSTS, 0xFFFFFFFF); - mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ - for (i = 0; i < host->num_slots; i++) { dev_dbg(host->dev, "remove slot %d\n", i); if (host->slot[i]) dw_mci_cleanup_slot(host->slot[i], i); } + mci_writel(host, RINTSTS, 0xFFFFFFFF); + mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ + /* disable clock to CIU */ mci_writel(host, CLKENA, 0); mci_writel(host, CLKSRC, 0); diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index f45ab91de339..8ce4674730a6 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -227,7 +227,6 @@ extern int dw_mci_resume(struct dw_mci *host); * struct dw_mci_slot - MMC slot state * @mmc: The mmc_host representing this slot. * @host: The MMC controller this slot is using. - * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) * @ctype: Card type for this slot. * @mrq: mmc_request currently being processed or waiting to be * processed, or NULL when the slot is idle. @@ -245,8 +244,6 @@ struct dw_mci_slot { struct mmc_host *mmc; struct dw_mci *host; - int quirks; - u32 ctype; struct mmc_request *mrq; @@ -287,5 +284,7 @@ struct dw_mci_drv_data { int (*execute_tuning)(struct dw_mci_slot *slot); int (*prepare_hs400_tuning)(struct dw_mci *host, struct mmc_ios *ios); + int (*switch_voltage)(struct mmc_host *mmc, + struct mmc_ios *ios); }; #endif /* _DW_MMC_H_ */ diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c new file mode 100644 index 000000000000..7153500dd007 --- /dev/null +++ b/drivers/mmc/host/mtk-sd.c @@ -0,0 +1,1462 @@ +/* + * Copyright (c) 2014-2015 MediaTek Inc. + * Author: Chaotian.Jing <chaotian.jing@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/ioport.h> +#include <linux/irq.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_gpio.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> +#include <linux/spinlock.h> + +#include <linux/mmc/card.h> +#include <linux/mmc/core.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/sdio.h> + +#define MAX_BD_NUM 1024 + +/*--------------------------------------------------------------------------*/ +/* Common Definition */ +/*--------------------------------------------------------------------------*/ +#define MSDC_BUS_1BITS 0x0 +#define MSDC_BUS_4BITS 0x1 +#define MSDC_BUS_8BITS 0x2 + +#define MSDC_BURST_64B 0x6 + +/*--------------------------------------------------------------------------*/ +/* Register Offset */ +/*--------------------------------------------------------------------------*/ +#define MSDC_CFG 0x0 +#define MSDC_IOCON 0x04 +#define MSDC_PS 0x08 +#define MSDC_INT 0x0c +#define MSDC_INTEN 0x10 +#define MSDC_FIFOCS 0x14 +#define SDC_CFG 0x30 +#define SDC_CMD 0x34 +#define SDC_ARG 0x38 +#define SDC_STS 0x3c +#define SDC_RESP0 0x40 +#define SDC_RESP1 0x44 +#define SDC_RESP2 0x48 +#define SDC_RESP3 0x4c +#define SDC_BLK_NUM 0x50 +#define SDC_ACMD_RESP 0x80 +#define MSDC_DMA_SA 0x90 +#define MSDC_DMA_CTRL 0x98 +#define MSDC_DMA_CFG 0x9c +#define MSDC_PATCH_BIT 0xb0 +#define MSDC_PATCH_BIT1 0xb4 +#define MSDC_PAD_TUNE 0xec + +/*--------------------------------------------------------------------------*/ +/* Register Mask */ +/*--------------------------------------------------------------------------*/ + +/* MSDC_CFG mask */ +#define MSDC_CFG_MODE (0x1 << 0) /* RW */ +#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */ +#define MSDC_CFG_RST (0x1 << 2) /* RW */ +#define MSDC_CFG_PIO (0x1 << 3) /* RW */ +#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */ +#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */ +#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */ +#define MSDC_CFG_CKSTB (0x1 << 7) /* R */ +#define MSDC_CFG_CKDIV (0xff << 8) /* RW */ +#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */ + +/* MSDC_IOCON mask */ +#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */ +#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */ +#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */ +#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */ +#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */ +#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */ +#define MSDC_IOCON_W_DSPL (0x1 << 8) /* RW */ +#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */ +#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */ +#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */ +#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */ +#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */ +#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */ +#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */ +#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */ +#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */ + +/* MSDC_PS mask */ +#define MSDC_PS_CDEN (0x1 << 0) /* RW */ +#define MSDC_PS_CDSTS (0x1 << 1) /* R */ +#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */ +#define MSDC_PS_DAT (0xff << 16) /* R */ +#define MSDC_PS_CMD (0x1 << 24) /* R */ +#define MSDC_PS_WP (0x1 << 31) /* R */ + +/* MSDC_INT mask */ +#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */ +#define MSDC_INT_CDSC (0x1 << 1) /* W1C */ +#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */ +#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */ +#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */ +#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */ +#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */ +#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */ +#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */ +#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */ +#define MSDC_INT_CSTA (0x1 << 11) /* R */ +#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */ +#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */ +#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */ +#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */ +#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */ +#define MSDC_INT_DMA_BDCSERR (0x1 << 17) /* W1C */ +#define MSDC_INT_DMA_GPDCSERR (0x1 << 18) /* W1C */ +#define MSDC_INT_DMA_PROTECT (0x1 << 19) /* W1C */ + +/* MSDC_INTEN mask */ +#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */ +#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */ +#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */ +#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */ +#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */ +#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */ +#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */ +#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */ +#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */ +#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */ +#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */ +#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */ +#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */ +#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */ +#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */ +#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */ +#define MSDC_INTEN_DMA_BDCSERR (0x1 << 17) /* RW */ +#define MSDC_INTEN_DMA_GPDCSERR (0x1 << 18) /* RW */ +#define MSDC_INTEN_DMA_PROTECT (0x1 << 19) /* RW */ + +/* MSDC_FIFOCS mask */ +#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */ +#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */ +#define MSDC_FIFOCS_CLR (0x1 << 31) /* RW */ + +/* SDC_CFG mask */ +#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */ +#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */ +#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */ +#define SDC_CFG_SDIO (0x1 << 19) /* RW */ +#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */ +#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */ +#define SDC_CFG_DTOC (0xff << 24) /* RW */ + +/* SDC_STS mask */ +#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */ +#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */ +#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */ + +/* MSDC_DMA_CTRL mask */ +#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */ +#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */ +#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */ +#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */ +#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */ +#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */ + +/* MSDC_DMA_CFG mask */ +#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */ +#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */ +#define MSDC_DMA_CFG_AHBHPROT2 (0x2 << 8) /* RW */ +#define MSDC_DMA_CFG_ACTIVEEN (0x2 << 12) /* RW */ +#define MSDC_DMA_CFG_CS12B16B (0x1 << 16) /* RW */ + +/* MSDC_PATCH_BIT mask */ +#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */ +#define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7) +#define MSDC_CKGEN_MSDC_DLY_SEL (0x1f << 10) +#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */ +#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */ +#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */ +#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */ +#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */ +#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */ +#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */ +#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ +#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ + +#define REQ_CMD_EIO (0x1 << 0) +#define REQ_CMD_TMO (0x1 << 1) +#define REQ_DAT_ERR (0x1 << 2) +#define REQ_STOP_EIO (0x1 << 3) +#define REQ_STOP_TMO (0x1 << 4) +#define REQ_CMD_BUSY (0x1 << 5) + +#define MSDC_PREPARE_FLAG (0x1 << 0) +#define MSDC_ASYNC_FLAG (0x1 << 1) +#define MSDC_MMAP_FLAG (0x1 << 2) + +#define MTK_MMC_AUTOSUSPEND_DELAY 50 +#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */ +#define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */ + +/*--------------------------------------------------------------------------*/ +/* Descriptor Structure */ +/*--------------------------------------------------------------------------*/ +struct mt_gpdma_desc { + u32 gpd_info; +#define GPDMA_DESC_HWO (0x1 << 0) +#define GPDMA_DESC_BDP (0x1 << 1) +#define GPDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */ +#define GPDMA_DESC_INT (0x1 << 16) + u32 next; + u32 ptr; + u32 gpd_data_len; +#define GPDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */ +#define GPDMA_DESC_EXTLEN (0xff << 16) /* bit16 ~ bit23 */ + u32 arg; + u32 blknum; + u32 cmd; +}; + +struct mt_bdma_desc { + u32 bd_info; +#define BDMA_DESC_EOL (0x1 << 0) +#define BDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */ +#define BDMA_DESC_BLKPAD (0x1 << 17) +#define BDMA_DESC_DWPAD (0x1 << 18) + u32 next; + u32 ptr; + u32 bd_data_len; +#define BDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */ +}; + +struct msdc_dma { + struct scatterlist *sg; /* I/O scatter list */ + struct mt_gpdma_desc *gpd; /* pointer to gpd array */ + struct mt_bdma_desc *bd; /* pointer to bd array */ + dma_addr_t gpd_addr; /* the physical address of gpd array */ + dma_addr_t bd_addr; /* the physical address of bd array */ +}; + +struct msdc_save_para { + u32 msdc_cfg; + u32 iocon; + u32 sdc_cfg; + u32 pad_tune; + u32 patch_bit0; + u32 patch_bit1; +}; + +struct msdc_host { + struct device *dev; + struct mmc_host *mmc; /* mmc structure */ + int cmd_rsp; + + spinlock_t lock; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + int error; + + void __iomem *base; /* host base address */ + + struct msdc_dma dma; /* dma channel */ + u64 dma_mask; + + u32 timeout_ns; /* data timeout ns */ + u32 timeout_clks; /* data timeout clks */ + + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_uhs; + struct delayed_work req_timeout; + int irq; /* host interrupt */ + + struct clk *src_clk; /* msdc source clock */ + struct clk *h_clk; /* msdc h_clk */ + u32 mclk; /* mmc subsystem clock frequency */ + u32 src_clk_freq; /* source clock frequency */ + u32 sclk; /* SD/MS bus clock frequency */ + bool ddr; + bool vqmmc_enabled; + struct msdc_save_para save_para; /* used when gate HCLK */ +}; + +static void sdr_set_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val |= bs; + writel(val, reg); +} + +static void sdr_clr_bits(void __iomem *reg, u32 bs) +{ + u32 val = readl(reg); + + val &= ~bs; + writel(val, reg); +} + +static void sdr_set_field(void __iomem *reg, u32 field, u32 val) +{ + unsigned int tv = readl(reg); + + tv &= ~field; + tv |= ((val) << (ffs((unsigned int)field) - 1)); + writel(tv, reg); +} + +static void sdr_get_field(void __iomem *reg, u32 field, u32 *val) +{ + unsigned int tv = readl(reg); + + *val = ((tv & field) >> (ffs((unsigned int)field) - 1)); +} + +static void msdc_reset_hw(struct msdc_host *host) +{ + u32 val; + + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST); + while (readl(host->base + MSDC_CFG) & MSDC_CFG_RST) + cpu_relax(); + + sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR); + while (readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_CLR) + cpu_relax(); + + val = readl(host->base + MSDC_INT); + writel(val, host->base + MSDC_INT); +} + +static void msdc_cmd_next(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd); + +static u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | + MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR | + MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT; + +static u8 msdc_dma_calcs(u8 *buf, u32 len) +{ + u32 i, sum = 0; + + for (i = 0; i < len; i++) + sum += buf[i]; + return 0xff - (u8) sum; +} + +static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, + struct mmc_data *data) +{ + unsigned int j, dma_len; + dma_addr_t dma_address; + u32 dma_ctrl; + struct scatterlist *sg; + struct mt_gpdma_desc *gpd; + struct mt_bdma_desc *bd; + + sg = data->sg; + + gpd = dma->gpd; + bd = dma->bd; + + /* modify gpd */ + gpd->gpd_info |= GPDMA_DESC_HWO; + gpd->gpd_info |= GPDMA_DESC_BDP; + /* need to clear first. use these bits to calc checksum */ + gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM; + gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8; + + /* modify bd */ + for_each_sg(data->sg, sg, data->sg_count, j) { + dma_address = sg_dma_address(sg); + dma_len = sg_dma_len(sg); + + /* init bd */ + bd[j].bd_info &= ~BDMA_DESC_BLKPAD; + bd[j].bd_info &= ~BDMA_DESC_DWPAD; + bd[j].ptr = (u32)dma_address; + bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN; + bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN); + + if (j == data->sg_count - 1) /* the last bd */ + bd[j].bd_info |= BDMA_DESC_EOL; + else + bd[j].bd_info &= ~BDMA_DESC_EOL; + + /* checksume need to clear first */ + bd[j].bd_info &= ~BDMA_DESC_CHECKSUM; + bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8; + } + + sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1); + dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL); + dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE); + dma_ctrl |= (MSDC_BURST_64B << 12 | 1 << 8); + writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL); + writel((u32)dma->gpd_addr, host->base + MSDC_DMA_SA); +} + +static void msdc_prepare_data(struct msdc_host *host, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (!(data->host_cookie & MSDC_PREPARE_FLAG)) { + bool read = (data->flags & MMC_DATA_READ) != 0; + + data->host_cookie |= MSDC_PREPARE_FLAG; + data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, + read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); + } +} + +static void msdc_unprepare_data(struct msdc_host *host, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + + if (data->host_cookie & MSDC_ASYNC_FLAG) + return; + + if (data->host_cookie & MSDC_PREPARE_FLAG) { + bool read = (data->flags & MMC_DATA_READ) != 0; + + dma_unmap_sg(host->dev, data->sg, data->sg_len, + read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); + data->host_cookie &= ~MSDC_PREPARE_FLAG; + } +} + +/* clock control primitives */ +static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) +{ + u32 timeout, clk_ns; + u32 mode = 0; + + host->timeout_ns = ns; + host->timeout_clks = clks; + if (host->sclk == 0) { + timeout = 0; + } else { + clk_ns = 1000000000UL / host->sclk; + timeout = (ns + clk_ns - 1) / clk_ns + clks; + /* in 1048576 sclk cycle unit */ + timeout = (timeout + (0x1 << 20) - 1) >> 20; + sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode); + /*DDR mode will double the clk cycles for data timeout */ + timeout = mode >= 2 ? timeout * 2 : timeout; + timeout = timeout > 1 ? timeout - 1 : 0; + timeout = timeout > 255 ? 255 : timeout; + } + sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, timeout); +} + +static void msdc_gate_clock(struct msdc_host *host) +{ + clk_disable_unprepare(host->src_clk); + clk_disable_unprepare(host->h_clk); +} + +static void msdc_ungate_clock(struct msdc_host *host) +{ + clk_prepare_enable(host->h_clk); + clk_prepare_enable(host->src_clk); + while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) + cpu_relax(); +} + +static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz) +{ + u32 mode; + u32 flags; + u32 div; + u32 sclk; + + if (!hz) { + dev_dbg(host->dev, "set mclk to 0\n"); + host->mclk = 0; + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + return; + } + + flags = readl(host->base + MSDC_INTEN); + sdr_clr_bits(host->base + MSDC_INTEN, flags); + if (ddr) { /* may need to modify later */ + mode = 0x2; /* ddr mode and use divisor */ + if (hz >= (host->src_clk_freq >> 2)) { + div = 0; /* mean div = 1/4 */ + sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */ + } else { + div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); + sclk = (host->src_clk_freq >> 2) / div; + div = (div >> 1); + } + } else if (hz >= host->src_clk_freq) { + mode = 0x1; /* no divisor */ + div = 0; + sclk = host->src_clk_freq; + } else { + mode = 0x0; /* use divisor */ + if (hz >= (host->src_clk_freq >> 1)) { + div = 0; /* mean div = 1/2 */ + sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */ + } else { + div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); + sclk = (host->src_clk_freq >> 2) / div; + } + } + sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, + (mode << 8) | (div % 0xff)); + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) + cpu_relax(); + host->sclk = sclk; + host->mclk = hz; + host->ddr = ddr; + /* need because clk changed. */ + msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); + sdr_set_bits(host->base + MSDC_INTEN, flags); + + dev_dbg(host->dev, "sclk: %d, ddr: %d\n", host->sclk, ddr); +} + +static inline u32 msdc_cmd_find_resp(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + u32 resp; + + switch (mmc_resp_type(cmd)) { + /* Actually, R1, R5, R6, R7 are the same */ + case MMC_RSP_R1: + resp = 0x1; + break; + case MMC_RSP_R1B: + resp = 0x7; + break; + case MMC_RSP_R2: + resp = 0x2; + break; + case MMC_RSP_R3: + resp = 0x3; + break; + case MMC_RSP_NONE: + default: + resp = 0x0; + break; + } + + return resp; +} + +static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + /* rawcmd : + * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 | + * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode + */ + u32 opcode = cmd->opcode; + u32 resp = msdc_cmd_find_resp(host, mrq, cmd); + u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7); + + host->cmd_rsp = resp; + + if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) || + opcode == MMC_STOP_TRANSMISSION) + rawcmd |= (0x1 << 14); + else if (opcode == SD_SWITCH_VOLTAGE) + rawcmd |= (0x1 << 30); + else if (opcode == SD_APP_SEND_SCR || + opcode == SD_APP_SEND_NUM_WR_BLKS || + (opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || + (opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || + (opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC)) + rawcmd |= (0x1 << 11); + + if (cmd->data) { + struct mmc_data *data = cmd->data; + + if (mmc_op_multi(opcode)) { + if (mmc_card_mmc(host->mmc->card) && mrq->sbc && + !(mrq->sbc->arg & 0xFFFF0000)) + rawcmd |= 0x2 << 28; /* AutoCMD23 */ + } + + rawcmd |= ((data->blksz & 0xFFF) << 16); + if (data->flags & MMC_DATA_WRITE) + rawcmd |= (0x1 << 13); + if (data->blocks > 1) + rawcmd |= (0x2 << 11); + else + rawcmd |= (0x1 << 11); + /* Always use dma mode */ + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO); + + if (host->timeout_ns != data->timeout_ns || + host->timeout_clks != data->timeout_clks) + msdc_set_timeout(host, data->timeout_ns, + data->timeout_clks); + + writel(data->blocks, host->base + SDC_BLK_NUM); + } + return rawcmd; +} + +static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq, + struct mmc_command *cmd, struct mmc_data *data) +{ + bool read; + + WARN_ON(host->data); + host->data = data; + read = data->flags & MMC_DATA_READ; + + mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); + msdc_dma_setup(host, &host->dma, data); + sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask); + sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1); + dev_dbg(host->dev, "DMA start\n"); + dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n", + __func__, cmd->opcode, data->blocks, read); +} + +static int msdc_auto_cmd_done(struct msdc_host *host, int events, + struct mmc_command *cmd) +{ + u32 *rsp = cmd->resp; + + rsp[0] = readl(host->base + SDC_ACMD_RESP); + + if (events & MSDC_INT_ACMDRDY) { + cmd->error = 0; + } else { + msdc_reset_hw(host); + if (events & MSDC_INT_ACMDCRCERR) { + cmd->error = -EILSEQ; + host->error |= REQ_STOP_EIO; + } else if (events & MSDC_INT_ACMDTMO) { + cmd->error = -ETIMEDOUT; + host->error |= REQ_STOP_TMO; + } + dev_err(host->dev, + "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error); + } + return cmd->error; +} + +static void msdc_track_cmd_data(struct msdc_host *host, + struct mmc_command *cmd, struct mmc_data *data) +{ + if (host->error) + dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", + __func__, cmd->opcode, cmd->arg, host->error); +} + +static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) +{ + unsigned long flags; + bool ret; + + ret = cancel_delayed_work(&host->req_timeout); + if (!ret) { + /* delay work already running */ + return; + } + spin_lock_irqsave(&host->lock, flags); + host->mrq = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + msdc_track_cmd_data(host, mrq->cmd, mrq->data); + if (mrq->data) + msdc_unprepare_data(host, mrq); + mmc_request_done(host->mmc, mrq); + + pm_runtime_mark_last_busy(host->dev); + pm_runtime_put_autosuspend(host->dev); +} + +/* returns true if command is fully handled; returns false otherwise */ +static bool msdc_cmd_done(struct msdc_host *host, int events, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + bool done = false; + bool sbc_error; + unsigned long flags; + u32 *rsp = cmd->resp; + + if (mrq->sbc && cmd == mrq->cmd && + (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR + | MSDC_INT_ACMDTMO))) + msdc_auto_cmd_done(host, events, mrq->sbc); + + sbc_error = mrq->sbc && mrq->sbc->error; + + if (!sbc_error && !(events & (MSDC_INT_CMDRDY + | MSDC_INT_RSPCRCERR + | MSDC_INT_CMDTMO))) + return done; + + spin_lock_irqsave(&host->lock, flags); + done = !host->cmd; + host->cmd = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + + sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CMDRDY | + MSDC_INTEN_RSPCRCERR | MSDC_INTEN_CMDTMO | + MSDC_INTEN_ACMDRDY | MSDC_INTEN_ACMDCRCERR | + MSDC_INTEN_ACMDTMO); + writel(cmd->arg, host->base + SDC_ARG); + + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + rsp[0] = readl(host->base + SDC_RESP3); + rsp[1] = readl(host->base + SDC_RESP2); + rsp[2] = readl(host->base + SDC_RESP1); + rsp[3] = readl(host->base + SDC_RESP0); + } else { + rsp[0] = readl(host->base + SDC_RESP0); + } + } + + if (!sbc_error && !(events & MSDC_INT_CMDRDY)) { + msdc_reset_hw(host); + if (events & MSDC_INT_RSPCRCERR) { + cmd->error = -EILSEQ; + host->error |= REQ_CMD_EIO; + } else if (events & MSDC_INT_CMDTMO) { + cmd->error = -ETIMEDOUT; + host->error |= REQ_CMD_TMO; + } + } + if (cmd->error) + dev_dbg(host->dev, + "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n", + __func__, cmd->opcode, cmd->arg, rsp[0], + cmd->error); + + msdc_cmd_next(host, mrq, cmd); + return true; +} + +/* It is the core layer's responsibility to ensure card status + * is correct before issue a request. but host design do below + * checks recommended. + */ +static inline bool msdc_cmd_is_ready(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + /* The max busy time we can endure is 20ms */ + unsigned long tmo = jiffies + msecs_to_jiffies(20); + + while ((readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) && + time_before(jiffies, tmo)) + cpu_relax(); + if (readl(host->base + SDC_STS) & SDC_STS_CMDBUSY) { + dev_err(host->dev, "CMD bus busy detected\n"); + host->error |= REQ_CMD_BUSY; + msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); + return false; + } + + if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) { + tmo = jiffies + msecs_to_jiffies(20); + /* R1B or with data, should check SDCBUSY */ + while ((readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) && + time_before(jiffies, tmo)) + cpu_relax(); + if (readl(host->base + SDC_STS) & SDC_STS_SDCBUSY) { + dev_err(host->dev, "Controller busy detected\n"); + host->error |= REQ_CMD_BUSY; + msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); + return false; + } + } + return true; +} + +static void msdc_start_command(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + u32 rawcmd; + + WARN_ON(host->cmd); + host->cmd = cmd; + + if (!msdc_cmd_is_ready(host, mrq, cmd)) + return; + + if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 || + readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) { + dev_err(host->dev, "TX/RX FIFO non-empty before start of IO. Reset\n"); + msdc_reset_hw(host); + } + + cmd->error = 0; + rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); + mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); + + sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CMDRDY | + MSDC_INTEN_RSPCRCERR | MSDC_INTEN_CMDTMO | + MSDC_INTEN_ACMDRDY | MSDC_INTEN_ACMDCRCERR | + MSDC_INTEN_ACMDTMO); + writel(cmd->arg, host->base + SDC_ARG); + writel(rawcmd, host->base + SDC_CMD); +} + +static void msdc_cmd_next(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + if (cmd->error || (mrq->sbc && mrq->sbc->error)) + msdc_request_done(host, mrq); + else if (cmd == mrq->sbc) + msdc_start_command(host, mrq, mrq->cmd); + else if (!cmd->data) + msdc_request_done(host, mrq); + else + msdc_start_data(host, mrq, cmd, cmd->data); +} + +static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct msdc_host *host = mmc_priv(mmc); + + host->error = 0; + WARN_ON(host->mrq); + host->mrq = mrq; + + pm_runtime_get_sync(host->dev); + + if (mrq->data) + msdc_prepare_data(host, mrq); + + /* if SBC is required, we have HW option and SW option. + * if HW option is enabled, and SBC does not have "special" flags, + * use HW option, otherwise use SW option + */ + if (mrq->sbc && (!mmc_card_mmc(mmc->card) || + (mrq->sbc->arg & 0xFFFF0000))) + msdc_start_command(host, mrq, mrq->sbc); + else + msdc_start_command(host, mrq, mrq->cmd); +} + +static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, + bool is_first_req) +{ + struct msdc_host *host = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!data) + return; + + msdc_prepare_data(host, mrq); + data->host_cookie |= MSDC_ASYNC_FLAG; +} + +static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, + int err) +{ + struct msdc_host *host = mmc_priv(mmc); + struct mmc_data *data; + + data = mrq->data; + if (!data) + return; + if (data->host_cookie) { + data->host_cookie &= ~MSDC_ASYNC_FLAG; + msdc_unprepare_data(host, mrq); + } +} + +static void msdc_data_xfer_next(struct msdc_host *host, + struct mmc_request *mrq, struct mmc_data *data) +{ + if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error && + (!data->bytes_xfered || !mrq->sbc)) + msdc_start_command(host, mrq, mrq->stop); + else + msdc_request_done(host, mrq); +} + +static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, + struct mmc_request *mrq, struct mmc_data *data) +{ + struct mmc_command *stop = data->stop; + unsigned long flags; + bool done; + unsigned int check_data = events & + (MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO + | MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR + | MSDC_INT_DMA_PROTECT); + + spin_lock_irqsave(&host->lock, flags); + done = !host->data; + if (check_data) + host->data = NULL; + spin_unlock_irqrestore(&host->lock, flags); + + if (done) + return true; + + if (check_data || (stop && stop->error)) { + dev_dbg(host->dev, "DMA status: 0x%8X\n", + readl(host->base + MSDC_DMA_CFG)); + sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, + 1); + while (readl(host->base + MSDC_DMA_CFG) & MSDC_DMA_CFG_STS) + cpu_relax(); + sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask); + dev_dbg(host->dev, "DMA stop\n"); + + if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) { + data->bytes_xfered = data->blocks * data->blksz; + } else { + dev_err(host->dev, "interrupt events: %x\n", events); + msdc_reset_hw(host); + host->error |= REQ_DAT_ERR; + data->bytes_xfered = 0; + + if (events & MSDC_INT_DATTMO) + data->error = -ETIMEDOUT; + + dev_err(host->dev, "%s: cmd=%d; blocks=%d", + __func__, mrq->cmd->opcode, data->blocks); + dev_err(host->dev, "data_error=%d xfer_size=%d\n", + (int)data->error, data->bytes_xfered); + } + + msdc_data_xfer_next(host, mrq, data); + done = true; + } + return done; +} + +static void msdc_set_buswidth(struct msdc_host *host, u32 width) +{ + u32 val = readl(host->base + SDC_CFG); + + val &= ~SDC_CFG_BUSWIDTH; + + switch (width) { + default: + case MMC_BUS_WIDTH_1: + val |= (MSDC_BUS_1BITS << 16); + break; + case MMC_BUS_WIDTH_4: + val |= (MSDC_BUS_4BITS << 16); + break; + case MMC_BUS_WIDTH_8: + val |= (MSDC_BUS_8BITS << 16); + break; + } + + writel(val, host->base + SDC_CFG); + dev_dbg(host->dev, "Bus Width = %d", width); +} + +static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct msdc_host *host = mmc_priv(mmc); + int min_uv, max_uv; + int ret = 0; + + if (!IS_ERR(mmc->supply.vqmmc)) { + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + min_uv = 3300000; + max_uv = 3300000; + } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + min_uv = 1800000; + max_uv = 1800000; + } else { + dev_err(host->dev, "Unsupported signal voltage!\n"); + return -EINVAL; + } + + ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv); + if (ret) { + dev_err(host->dev, + "Regulator set error %d: %d - %d\n", + ret, min_uv, max_uv); + } else { + /* Apply different pinctrl settings for different signal voltage */ + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) + pinctrl_select_state(host->pinctrl, host->pins_uhs); + else + pinctrl_select_state(host->pinctrl, host->pins_default); + } + } + return ret; +} + +static int msdc_card_busy(struct mmc_host *mmc) +{ + struct msdc_host *host = mmc_priv(mmc); + u32 status = readl(host->base + MSDC_PS); + + /* check if any pin between dat[0:3] is low */ + if (((status >> 16) & 0xf) != 0xf) + return 1; + + return 0; +} + +static void msdc_request_timeout(struct work_struct *work) +{ + struct msdc_host *host = container_of(work, struct msdc_host, + req_timeout.work); + + /* simulate HW timeout status */ + dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__); + if (host->mrq) { + dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__, + host->mrq, host->mrq->cmd->opcode); + if (host->cmd) { + dev_err(host->dev, "%s: aborting cmd=%d\n", + __func__, host->cmd->opcode); + msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq, + host->cmd); + } else if (host->data) { + dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n", + __func__, host->mrq->cmd->opcode, + host->data->blocks); + msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq, + host->data); + } + } +} + +static irqreturn_t msdc_irq(int irq, void *dev_id) +{ + struct msdc_host *host = (struct msdc_host *) dev_id; + + while (true) { + unsigned long flags; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + u32 events, event_mask; + + spin_lock_irqsave(&host->lock, flags); + events = readl(host->base + MSDC_INT); + event_mask = readl(host->base + MSDC_INTEN); + /* clear interrupts */ + writel(events & event_mask, host->base + MSDC_INT); + + mrq = host->mrq; + cmd = host->cmd; + data = host->data; + spin_unlock_irqrestore(&host->lock, flags); + + if (!(events & event_mask)) + break; + + if (!mrq) { + dev_err(host->dev, + "%s: MRQ=NULL; events=%08X; event_mask=%08X\n", + __func__, events, event_mask); + WARN_ON(1); + break; + } + + dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); + + if (cmd) + msdc_cmd_done(host, events, mrq, cmd); + else if (data) + msdc_data_xfer_done(host, events, mrq, data); + } + + return IRQ_HANDLED; +} + +static void msdc_init_hw(struct msdc_host *host) +{ + u32 val; + + /* Configure to MMC/SD mode, clock free running */ + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); + + /* Reset */ + msdc_reset_hw(host); + + /* Disable card detection */ + sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN); + + /* Disable and clear all interrupts */ + writel(0, host->base + MSDC_INTEN); + val = readl(host->base + MSDC_INT); + writel(val, host->base + MSDC_INT); + + writel(0, host->base + MSDC_PAD_TUNE); + writel(0, host->base + MSDC_IOCON); + sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1); + writel(0x403c004f, host->base + MSDC_PATCH_BIT); + sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1); + writel(0xffff0089, host->base + MSDC_PATCH_BIT1); + /* Configure to enable SDIO mode. + * it's must otherwise sdio cmd5 failed + */ + sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO); + + /* disable detect SDIO device interrupt function */ + sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); + + /* Configure to default data timeout */ + sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3); + + dev_dbg(host->dev, "init hardware done!"); +} + +static void msdc_deinit_hw(struct msdc_host *host) +{ + u32 val; + /* Disable and clear all interrupts */ + writel(0, host->base + MSDC_INTEN); + + val = readl(host->base + MSDC_INT); + writel(val, host->base + MSDC_INT); +} + +/* init gpd and bd list in msdc_drv_probe */ +static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma) +{ + struct mt_gpdma_desc *gpd = dma->gpd; + struct mt_bdma_desc *bd = dma->bd; + int i; + + memset(gpd, 0, sizeof(struct mt_gpdma_desc)); + + gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */ + gpd->ptr = (u32)dma->bd_addr; /* physical address */ + + memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM); + for (i = 0; i < (MAX_BD_NUM - 1); i++) + bd[i].next = (u32)dma->bd_addr + sizeof(*bd) * (i + 1); +} + +static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct msdc_host *host = mmc_priv(mmc); + int ret; + u32 ddr = 0; + + pm_runtime_get_sync(host->dev); + + if (ios->timing == MMC_TIMING_UHS_DDR50 || + ios->timing == MMC_TIMING_MMC_DDR52) + ddr = 1; + + msdc_set_buswidth(host, ios->bus_width); + + /* Suspend/Resume will do power off/on */ + switch (ios->power_mode) { + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) { + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, + ios->vdd); + if (ret) { + dev_err(host->dev, "Failed to set vmmc power!\n"); + goto end; + } + } + break; + case MMC_POWER_ON: + if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { + ret = regulator_enable(mmc->supply.vqmmc); + if (ret) + dev_err(host->dev, "Failed to set vqmmc power!\n"); + else + host->vqmmc_enabled = true; + } + break; + case MMC_POWER_OFF: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { + regulator_disable(mmc->supply.vqmmc); + host->vqmmc_enabled = false; + } + break; + default: + break; + } + + if (host->mclk != ios->clock || host->ddr != ddr) + msdc_set_mclk(host, ddr, ios->clock); + +end: + pm_runtime_mark_last_busy(host->dev); + pm_runtime_put_autosuspend(host->dev); +} + +static struct mmc_host_ops mt_msdc_ops = { + .post_req = msdc_post_req, + .pre_req = msdc_pre_req, + .request = msdc_ops_request, + .set_ios = msdc_ops_set_ios, + .start_signal_voltage_switch = msdc_ops_switch_volt, + .card_busy = msdc_card_busy, +}; + +static int msdc_drv_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct msdc_host *host; + struct resource *res; + int ret; + + if (!pdev->dev.of_node) { + dev_err(&pdev->dev, "No DT found\n"); + return -EINVAL; + } + /* Allocate MMC host for this device */ + mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + ret = mmc_of_parse(mmc); + if (ret) + goto host_free; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto host_free; + } + + ret = mmc_regulator_get_supply(mmc); + if (ret == -EPROBE_DEFER) + goto host_free; + + host->src_clk = devm_clk_get(&pdev->dev, "source"); + if (IS_ERR(host->src_clk)) { + ret = PTR_ERR(host->src_clk); + goto host_free; + } + + host->h_clk = devm_clk_get(&pdev->dev, "hclk"); + if (IS_ERR(host->h_clk)) { + ret = PTR_ERR(host->h_clk); + goto host_free; + } + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) { + ret = -EINVAL; + goto host_free; + } + + host->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(host->pinctrl)) { + ret = PTR_ERR(host->pinctrl); + dev_err(&pdev->dev, "Cannot find pinctrl!\n"); + goto host_free; + } + + host->pins_default = pinctrl_lookup_state(host->pinctrl, "default"); + if (IS_ERR(host->pins_default)) { + ret = PTR_ERR(host->pins_default); + dev_err(&pdev->dev, "Cannot find pinctrl default!\n"); + goto host_free; + } + + host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); + if (IS_ERR(host->pins_uhs)) { + ret = PTR_ERR(host->pins_uhs); + dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n"); + goto host_free; + } + + host->dev = &pdev->dev; + host->mmc = mmc; + host->src_clk_freq = clk_get_rate(host->src_clk); + /* Set host parameters to mmc */ + mmc->ops = &mt_msdc_ops; + mmc->f_min = host->src_clk_freq / (4 * 255); + + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; + /* MMC core transfer sizes tunable parameters */ + mmc->max_segs = MAX_BD_NUM; + mmc->max_seg_size = BDMA_DESC_BUFLEN; + mmc->max_blk_size = 2048; + mmc->max_req_size = 512 * 1024; + mmc->max_blk_count = mmc->max_req_size / 512; + host->dma_mask = DMA_BIT_MASK(32); + mmc_dev(mmc)->dma_mask = &host->dma_mask; + + host->timeout_clks = 3 * 1048576; + host->dma.gpd = dma_alloc_coherent(&pdev->dev, + sizeof(struct mt_gpdma_desc), + &host->dma.gpd_addr, GFP_KERNEL); + host->dma.bd = dma_alloc_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct mt_bdma_desc), + &host->dma.bd_addr, GFP_KERNEL); + if (!host->dma.gpd || !host->dma.bd) { + ret = -ENOMEM; + goto release_mem; + } + msdc_init_gpd_bd(host, &host->dma); + INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout); + spin_lock_init(&host->lock); + + platform_set_drvdata(pdev, mmc); + msdc_ungate_clock(host); + msdc_init_hw(host); + + ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, pdev->name, host); + if (ret) + goto release; + + pm_runtime_set_active(host->dev); + pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(host->dev); + pm_runtime_enable(host->dev); + ret = mmc_add_host(mmc); + + if (ret) + goto end; + + return 0; +end: + pm_runtime_disable(host->dev); +release: + platform_set_drvdata(pdev, NULL); + msdc_deinit_hw(host); + msdc_gate_clock(host); +release_mem: + if (host->dma.gpd) + dma_free_coherent(&pdev->dev, + sizeof(struct mt_gpdma_desc), + host->dma.gpd, host->dma.gpd_addr); + if (host->dma.bd) + dma_free_coherent(&pdev->dev, + MAX_BD_NUM * sizeof(struct mt_bdma_desc), + host->dma.bd, host->dma.bd_addr); +host_free: + mmc_free_host(mmc); + + return ret; +} + +static int msdc_drv_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct msdc_host *host; + + mmc = platform_get_drvdata(pdev); + host = mmc_priv(mmc); + + pm_runtime_get_sync(host->dev); + + platform_set_drvdata(pdev, NULL); + mmc_remove_host(host->mmc); + msdc_deinit_hw(host); + msdc_gate_clock(host); + + pm_runtime_disable(host->dev); + pm_runtime_put_noidle(host->dev); + dma_free_coherent(&pdev->dev, + sizeof(struct mt_gpdma_desc), + host->dma.gpd, host->dma.gpd_addr); + dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc), + host->dma.bd, host->dma.bd_addr); + + mmc_free_host(host->mmc); + + return 0; +} + +#ifdef CONFIG_PM +static void msdc_save_reg(struct msdc_host *host) +{ + host->save_para.msdc_cfg = readl(host->base + MSDC_CFG); + host->save_para.iocon = readl(host->base + MSDC_IOCON); + host->save_para.sdc_cfg = readl(host->base + SDC_CFG); + host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); + host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); + host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); +} + +static void msdc_restore_reg(struct msdc_host *host) +{ + writel(host->save_para.msdc_cfg, host->base + MSDC_CFG); + writel(host->save_para.iocon, host->base + MSDC_IOCON); + writel(host->save_para.sdc_cfg, host->base + SDC_CFG); + writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE); + writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); + writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); +} + +static int msdc_runtime_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct msdc_host *host = mmc_priv(mmc); + + msdc_save_reg(host); + msdc_gate_clock(host); + return 0; +} + +static int msdc_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct msdc_host *host = mmc_priv(mmc); + + msdc_ungate_clock(host); + msdc_restore_reg(host); + return 0; +} +#endif + +static const struct dev_pm_ops msdc_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL) +}; + +static const struct of_device_id msdc_of_ids[] = { + { .compatible = "mediatek,mt8135-mmc", }, + {} +}; + +static struct platform_driver mt_msdc_driver = { + .probe = msdc_drv_probe, + .remove = msdc_drv_remove, + .driver = { + .name = "mtk-msdc", + .of_match_table = msdc_of_ids, + .pm = &msdc_dev_pm_ops, + }, +}; + +module_platform_driver(mt_msdc_driver); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MediaTek SD/MMC Card Driver"); diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 317d709f7550..d110f9e98c4b 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -605,11 +605,7 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes) mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS); } - stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); - if (stat) - return stat; - - return 0; + return mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); } static int mxcmci_transfer_data(struct mxcmci_host *host) diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index a82411a2c024..d839147e591d 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -549,7 +549,7 @@ static const struct mmc_host_ops mxs_mmc_ops = { .enable_sdio_irq = mxs_mmc_enable_sdio_irq, }; -static struct platform_device_id mxs_ssp_ids[] = { +static const struct platform_device_id mxs_ssp_ids[] = { { .name = "imx23-mmc", .driver_data = IMX23_SSP, diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 1d3d6c4bfdc6..93137483ecde 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -1474,7 +1474,7 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) return 0; } -static struct platform_device_id rtsx_pci_sdmmc_ids[] = { +static const struct platform_device_id rtsx_pci_sdmmc_ids[] = { { .name = DRV_NAME_RTSX_PCI_SDMMC, }, { diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c index 88af827e086b..6c71fc9f76c7 100644 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -1439,7 +1439,7 @@ static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev) return 0; } -static struct platform_device_id rtsx_usb_sdmmc_ids[] = { +static const struct platform_device_id rtsx_usb_sdmmc_ids[] = { { .name = "rtsx_usb_sdmmc", }, { diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 94cddf381ba3..6291d5042ef2 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1856,7 +1856,7 @@ static int s3cmci_remove(struct platform_device *pdev) return 0; } -static struct platform_device_id s3cmci_driver_ids[] = { +static const struct platform_device_id s3cmci_driver_ids[] = { { .name = "s3c2410-sdi", .driver_data = 0, diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c index 0ef0343c603a..1c65d4690e70 100644 --- a/drivers/mmc/host/sdhci-bcm2835.c +++ b/drivers/mmc/host/sdhci-bcm2835.c @@ -172,9 +172,19 @@ static int bcm2835_sdhci_probe(struct platform_device *pdev) ret = PTR_ERR(pltfm_host->clk); goto err; } + ret = clk_prepare_enable(pltfm_host->clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable host clk\n"); + goto err; + } - return sdhci_add_host(host); + ret = sdhci_add_host(host); + if (ret) + goto err_clk; + return 0; +err_clk: + clk_disable_unprepare(pltfm_host->clk); err: sdhci_pltfm_free(pdev); return ret; diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 82f512d87cb8..faf0cb910c96 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -4,7 +4,7 @@ * derived from the OF-version. * * Copyright (c) 2010 Pengutronix e.K. - * Author: Wolfram Sang <w.sang@pengutronix.de> + * Author: Wolfram Sang <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -112,6 +112,14 @@ #define ESDHC_FLAG_STD_TUNING BIT(5) /* The IP has SDHCI_CAPABILITIES_1 register */ #define ESDHC_FLAG_HAVE_CAP1 BIT(6) +/* + * The IP has errata ERR004536 + * uSDHC: ADMA Length Mismatch Error occurs if the AHB read access is slow, + * when reading data from the card + */ +#define ESDHC_FLAG_ERR004536 BIT(7) +/* The IP supports HS200 mode */ +#define ESDHC_FLAG_HS200 BIT(8) struct esdhc_soc_data { u32 flags; @@ -139,7 +147,13 @@ static struct esdhc_soc_data usdhc_imx6q_data = { static struct esdhc_soc_data usdhc_imx6sl_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING - | ESDHC_FLAG_HAVE_CAP1, + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536 + | ESDHC_FLAG_HS200, +}; + +static struct esdhc_soc_data usdhc_imx6sx_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200, }; struct pltfm_imx_data { @@ -161,7 +175,7 @@ struct pltfm_imx_data { u32 is_ddr; }; -static struct platform_device_id imx_esdhc_devtype[] = { +static const struct platform_device_id imx_esdhc_devtype[] = { { .name = "sdhci-esdhc-imx25", .driver_data = (kernel_ulong_t) &esdhc_imx25_data, @@ -182,6 +196,7 @@ static const struct of_device_id imx_esdhc_dt_ids[] = { { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, }, { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, }, { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, }, + { .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, }, { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, { /* sentinel */ } @@ -298,7 +313,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) u32 data; if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { - if (val & SDHCI_INT_CARD_INT) { + if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) { /* * Clear and then set D3CD bit to avoid missing the * card interrupt. This is a eSDHC controller problem @@ -313,6 +328,11 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) data |= ESDHC_CTRL_D3CD; writel(data, host->ioaddr + SDHCI_HOST_CONTROL); } + + if (val & SDHCI_INT_ADMA_ERROR) { + val &= ~SDHCI_INT_ADMA_ERROR; + val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR; + } } if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) @@ -333,13 +353,6 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) } } - if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { - if (val & SDHCI_INT_ADMA_ERROR) { - val &= ~SDHCI_INT_ADMA_ERROR; - val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR; - } - } - writel(val, host->ioaddr + reg); } @@ -903,7 +916,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, mmc_of_parse_voltage(np, &host->ocr_mask); - return 0; + /* call to generic mmc_of_parse to support additional capabilities */ + return mmc_of_parse(host->mmc); } #else static inline int @@ -924,6 +938,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) struct esdhc_platform_data *boarddata; int err; struct pltfm_imx_data *imx_data; + bool dt = true; host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); if (IS_ERR(host)) @@ -991,6 +1006,16 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL); host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; host->mmc->caps |= MMC_CAP_1_8V_DDR; + + if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200)) + host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; + + /* + * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL + * TO1.1, it's harmless for MX6SL + */ + writel(readl(host->ioaddr + 0x6c) | BIT(7), + host->ioaddr + 0x6c); } if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) @@ -1002,6 +1027,9 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP, host->ioaddr + ESDHC_TUNING_CTRL); + if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) + host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; + boarddata = &imx_data->boarddata; if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { if (!host->mmc->parent->platform_data) { @@ -1011,11 +1039,44 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) } imx_data->boarddata = *((struct esdhc_platform_data *) host->mmc->parent->platform_data); + dt = false; + } + /* write_protect */ + if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) { + err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); + if (err) { + dev_err(mmc_dev(host->mmc), + "failed to request write-protect gpio!\n"); + goto disable_clk; + } + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; } /* card_detect */ - if (boarddata->cd_type == ESDHC_CD_CONTROLLER) + switch (boarddata->cd_type) { + case ESDHC_CD_GPIO: + if (dt) + break; + err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); + if (err) { + dev_err(mmc_dev(host->mmc), + "failed to request card-detect gpio!\n"); + goto disable_clk; + } + /* fall through */ + + case ESDHC_CD_CONTROLLER: + /* we have a working card_detect back */ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + break; + + case ESDHC_CD_PERMANENT: + host->mmc->caps |= MMC_CAP_NONREMOVABLE; + break; + + case ESDHC_CD_NONE: + break; + } switch (boarddata->max_bus_width) { case 8: @@ -1048,11 +1109,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; } - /* call to generic mmc_of_parse to support additional capabilities */ - err = mmc_of_parse(host->mmc); - if (err) - goto disable_clk; - err = sdhci_add_host(host); if (err) goto disable_clk; @@ -1151,5 +1207,5 @@ static struct platform_driver sdhci_esdhc_imx_driver = { module_platform_driver(sdhci_esdhc_imx_driver); MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC"); -MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); +MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 6287d426c96b..21c0c08dfe54 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -20,6 +20,7 @@ */ #include <linux/module.h> +#include <linux/of_device.h> #include "sdhci-pltfm.h" #define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c @@ -168,6 +169,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev) goto clk_disable_all; } + if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-4.9a")) { + host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT; + host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; + } + sdhci_get_of_property(pdev); pltfm_host = sdhci_priv(host); pltfm_host->priv = sdhci_arasan; @@ -208,6 +214,7 @@ static int sdhci_arasan_remove(struct platform_device *pdev) static const struct of_device_id sdhci_arasan_of_match[] = { { .compatible = "arasan,sdhci-8.9a" }, + { .compatible = "arasan,sdhci-4.9a" }, { } }; MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 22e9111b11ff..797be7549a15 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -199,7 +199,7 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { - int pre_div = 2; + int pre_div = 1; int div = 1; u32 temp; @@ -229,7 +229,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", clock, host->max_clk / pre_div / div); - + host->mmc->actual_clock = host->max_clk / pre_div / div; pre_div >>= 1; div--; @@ -361,6 +361,13 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) sdhci_get_of_property(pdev); np = pdev->dev.of_node; + if (of_device_is_compatible(np, "fsl,p5040-esdhc") || + of_device_is_compatible(np, "fsl,p5020-esdhc") || + of_device_is_compatible(np, "fsl,p4080-esdhc") || + of_device_is_compatible(np, "fsl,p1020-esdhc") || + of_device_is_compatible(np, "fsl,t1040-esdhc")) + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { /* * Freescale messed up with P2020 as it has a non-standard diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c index a611217769f5..56fddc622a54 100644 --- a/drivers/mmc/host/sdhci-pci-data.c +++ b/drivers/mmc/host/sdhci-pci-data.c @@ -3,3 +3,6 @@ struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno); EXPORT_SYMBOL_GPL(sdhci_pci_get_data); + +int sdhci_pci_spt_drive_strength; +EXPORT_SYMBOL_GPL(sdhci_pci_spt_drive_strength); diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 7a3fc16d0a6c..94f54d2772e8 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -20,6 +20,7 @@ #include <linux/slab.h> #include <linux/device.h> #include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> #include <linux/scatterlist.h> #include <linux/io.h> #include <linux/gpio.h> @@ -266,6 +267,69 @@ static void sdhci_pci_int_hw_reset(struct sdhci_host *host) usleep_range(300, 1000); } +static int spt_select_drive_strength(struct sdhci_host *host, + struct mmc_card *card, + unsigned int max_dtr, + int host_drv, int card_drv, int *drv_type) +{ + int drive_strength; + + if (sdhci_pci_spt_drive_strength > 0) + drive_strength = sdhci_pci_spt_drive_strength & 0xf; + else + drive_strength = 1; /* 33-ohm */ + + if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0) + drive_strength = 0; /* Default 50-ohm */ + + return drive_strength; +} + +/* Try to read the drive strength from the card */ +static void spt_read_drive_strength(struct sdhci_host *host) +{ + u32 val, i, t; + u16 m; + + if (sdhci_pci_spt_drive_strength) + return; + + sdhci_pci_spt_drive_strength = -1; + + m = sdhci_readw(host, SDHCI_HOST_CONTROL2) & 0x7; + if (m != 3 && m != 5) + return; + val = sdhci_readl(host, SDHCI_PRESENT_STATE); + if (val & 0x3) + return; + sdhci_writel(host, 0x007f0023, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); + sdhci_writew(host, 0x10, SDHCI_TRANSFER_MODE); + sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); + sdhci_writew(host, 512, SDHCI_BLOCK_SIZE); + sdhci_writew(host, 1, SDHCI_BLOCK_COUNT); + sdhci_writel(host, 0, SDHCI_ARGUMENT); + sdhci_writew(host, 0x83b, SDHCI_COMMAND); + for (i = 0; i < 1000; i++) { + val = sdhci_readl(host, SDHCI_INT_STATUS); + if (val & 0xffff8000) + return; + if (val & 0x20) + break; + udelay(1); + } + val = sdhci_readl(host, SDHCI_PRESENT_STATE); + if (!(val & 0x800)) + return; + for (i = 0; i < 47; i++) + val = sdhci_readl(host, SDHCI_BUFFER); + t = val & 0xf00; + if (t != 0x200 && t != 0x300) + return; + + sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf); +} + static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | @@ -276,6 +340,10 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) slot->hw_reset = sdhci_pci_int_hw_reset; if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC) slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_SPT_EMMC) { + spt_read_drive_strength(slot->host); + slot->select_drive_strength = spt_select_drive_strength; + } return 0; } @@ -302,6 +370,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { .probe_slot = byt_emmc_probe_slot, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 | SDHCI_QUIRK2_STOP_WITH_TC, }; @@ -655,14 +724,37 @@ static const struct sdhci_pci_fixes sdhci_rtsx = { .probe_slot = rtsx_probe_slot, }; +/*AMD chipset generation*/ +enum amd_chipset_gen { + AMD_CHIPSET_BEFORE_ML, + AMD_CHIPSET_CZ, + AMD_CHIPSET_NL, + AMD_CHIPSET_UNKNOWN, +}; + static int amd_probe(struct sdhci_pci_chip *chip) { struct pci_dev *smbus_dev; + enum amd_chipset_gen gen; smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); + if (smbus_dev) { + gen = AMD_CHIPSET_BEFORE_ML; + } else { + smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); + if (smbus_dev) { + if (smbus_dev->revision < 0x51) + gen = AMD_CHIPSET_CZ; + else + gen = AMD_CHIPSET_NL; + } else { + gen = AMD_CHIPSET_UNKNOWN; + } + } - if (smbus_dev && (smbus_dev->revision < 0x51)) { + if ((gen == AMD_CHIPSET_BEFORE_ML) || (gen == AMD_CHIPSET_CZ)) { chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; chip->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200; } @@ -1203,6 +1295,20 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host) slot->hw_reset(host); } +static int sdhci_pci_select_drive_strength(struct sdhci_host *host, + struct mmc_card *card, + unsigned int max_dtr, int host_drv, + int card_drv, int *drv_type) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + + if (!slot->select_drive_strength) + return 0; + + return slot->select_drive_strength(host, card, max_dtr, host_drv, + card_drv, drv_type); +} + static const struct sdhci_ops sdhci_pci_ops = { .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, @@ -1210,6 +1316,7 @@ static const struct sdhci_ops sdhci_pci_ops = { .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, + .select_drive_strength = sdhci_pci_select_drive_strength, }; /*****************************************************************************\ diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 1ec684d06d54..541f1cad5247 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -72,6 +72,10 @@ struct sdhci_pci_slot { bool cd_override_level; void (*hw_reset)(struct sdhci_host *host); + int (*select_drive_strength)(struct sdhci_host *host, + struct mmc_card *card, + unsigned int max_dtr, int host_drv, + int card_drv, int *drv_type); }; struct sdhci_pci_chip { diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index f98008b5ea77..beffd8615489 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -252,9 +252,7 @@ static int sdhci_pxav2_remove(struct platform_device *pdev) static struct platform_driver sdhci_pxav2_driver = { .driver = { .name = "sdhci-pxav2", -#ifdef CONFIG_OF - .of_match_table = sdhci_pxav2_of_match, -#endif + .of_match_table = of_match_ptr(sdhci_pxav2_of_match), .pm = SDHCI_PLTFM_PMOPS, }, .probe = sdhci_pxav2_probe, diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index b5103a247bc1..9cd5fc62f130 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -457,12 +457,8 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - if (host->mmc->pm_caps & MMC_PM_KEEP_POWER) { + if (host->mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) device_init_wakeup(&pdev->dev, 1); - host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ; - } else { - device_init_wakeup(&pdev->dev, 0); - } pm_runtime_put_autosuspend(&pdev->dev); @@ -578,9 +574,7 @@ static const struct dev_pm_ops sdhci_pxav3_pmops = { static struct platform_driver sdhci_pxav3_driver = { .driver = { .name = "sdhci-pxav3", -#ifdef CONFIG_OF - .of_match_table = sdhci_pxav3_of_match, -#endif + .of_match_table = of_match_ptr(sdhci_pxav3_of_match), .pm = SDHCI_PXAV3_PMOPS, }, .probe = sdhci_pxav3_probe, diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index c6d2dd7317c1..70c724bc6fc7 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -736,7 +736,7 @@ static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { #define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL) #endif -static struct platform_device_id sdhci_s3c_driver_ids[] = { +static const struct platform_device_id sdhci_s3c_driver_ids[] = { { .name = "s3c-sdhci", .driver_data = (kernel_ulong_t)NULL, diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index 32848eb7ad80..0110bae25b7e 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -17,7 +17,7 @@ #define SDHCI_CLK_DELAY_SETTING 0x4C #define SDHCI_SIRF_8BITBUS BIT(3) -#define SIRF_TUNING_COUNT 128 +#define SIRF_TUNING_COUNT 16384 struct sdhci_sirf_priv { int gpio_cd; @@ -43,10 +43,43 @@ static void sdhci_sirf_set_bus_width(struct sdhci_host *host, int width) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); } +static u32 sdhci_sirf_readl_le(struct sdhci_host *host, int reg) +{ + u32 val = readl(host->ioaddr + reg); + + if (unlikely((reg == SDHCI_CAPABILITIES_1) && + (host->mmc->caps & MMC_CAP_UHS_SDR50))) { + /* fake CAP_1 register */ + val = SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING; + } + + if (unlikely(reg == SDHCI_SLOT_INT_STATUS)) { + u32 prss = val; + /* fake chips as V3.0 host conreoller */ + prss &= ~(0xFF << 16); + val = prss | (SDHCI_SPEC_300 << 16); + } + return val; +} + +static u16 sdhci_sirf_readw_le(struct sdhci_host *host, int reg) +{ + u16 ret = 0; + + ret = readw(host->ioaddr + reg); + + if (unlikely(reg == SDHCI_HOST_VERSION)) { + ret = readw(host->ioaddr + SDHCI_HOST_VERSION); + ret |= SDHCI_SPEC_300; + } + + return ret; +} + static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode) { int tuning_seq_cnt = 3; - u8 phase, tuned_phases[SIRF_TUNING_COUNT]; + int phase; u8 tuned_phase_cnt = 0; int rc = 0, longest_range = 0; int start = -1, end = 0, tuning_value = -1, range = 0; @@ -58,6 +91,7 @@ static int sdhci_sirf_execute_tuning(struct sdhci_host *host, u32 opcode) retry: phase = 0; + tuned_phase_cnt = 0; do { sdhci_writel(host, clock_setting | phase, @@ -65,7 +99,7 @@ retry: if (!mmc_send_tuning(mmc)) { /* Tuning is successful at this tuning point */ - tuned_phases[tuned_phase_cnt++] = phase; + tuned_phase_cnt++; dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", mmc_hostname(mmc), phase); if (start == -1) @@ -85,7 +119,7 @@ retry: start = -1; end = range = 0; } - } while (++phase < ARRAY_SIZE(tuned_phases)); + } while (++phase < SIRF_TUNING_COUNT); if (tuned_phase_cnt && tuning_value > 0) { /* @@ -112,6 +146,8 @@ retry: } static struct sdhci_ops sdhci_sirf_ops = { + .read_l = sdhci_sirf_readl_le, + .read_w = sdhci_sirf_readw_le, .platform_execute_tuning = sdhci_sirf_execute_tuning, .set_clock = sdhci_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c index 682f2bb0f4bf..969c2b0d57fd 100644 --- a/drivers/mmc/host/sdhci-st.c +++ b/drivers/mmc/host/sdhci-st.c @@ -509,4 +509,4 @@ module_platform_driver(sdhci_st_driver); MODULE_DESCRIPTION("SDHCI driver for STMicroelectronics SoCs"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:st-sdhci"); +MODULE_ALIAS("platform:sdhci-st"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index c80287a02735..bc1445238fb3 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -52,7 +52,6 @@ static void sdhci_finish_data(struct sdhci_host *); static void sdhci_finish_command(struct sdhci_host *); static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); -static void sdhci_tuning_timer(unsigned long data); static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); static int sdhci_pre_dma_transfer(struct sdhci_host *host, struct mmc_data *data, @@ -254,17 +253,6 @@ static void sdhci_init(struct sdhci_host *host, int soft) static void sdhci_reinit(struct sdhci_host *host) { sdhci_init(host, 0); - /* - * Retuning stuffs are affected by different cards inserted and only - * applicable to UHS-I cards. So reset these fields to their initial - * value when card is removed. - */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) { - host->flags &= ~SDHCI_USING_RETUNING_TIMER; - - del_timer_sync(&host->tuning_timer); - host->flags &= ~SDHCI_NEEDS_RETUNING; - } sdhci_enable_card_detection(host); } @@ -328,8 +316,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host) local_irq_save(flags); while (blksize) { - if (!sg_miter_next(&host->sg_miter)) - BUG(); + BUG_ON(!sg_miter_next(&host->sg_miter)); len = min(host->sg_miter.length, blksize); @@ -374,8 +361,7 @@ static void sdhci_write_block_pio(struct sdhci_host *host) local_irq_save(flags); while (blksize) { - if (!sg_miter_next(&host->sg_miter)) - BUG(); + BUG_ON(!sg_miter_next(&host->sg_miter)); len = min(host->sg_miter.length, blksize); @@ -848,7 +834,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) int sg_cnt; sg_cnt = sdhci_pre_dma_transfer(host, data, NULL); - if (sg_cnt == 0) { + if (sg_cnt <= 0) { /* * This only happens when someone fed * us an invalid request. @@ -1353,7 +1339,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) struct sdhci_host *host; int present; unsigned long flags; - u32 tuning_opcode; host = mmc_priv(mmc); @@ -1387,39 +1372,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } else { - u32 present_state; - - present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); - /* - * Check if the re-tuning timer has already expired and there - * is no on-going data transfer and DAT0 is not busy. If so, - * we need to execute tuning procedure before sending command. - */ - if ((host->flags & SDHCI_NEEDS_RETUNING) && - !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ)) && - (present_state & SDHCI_DATA_0_LVL_MASK)) { - if (mmc->card) { - /* eMMC uses cmd21 but sd and sdio use cmd19 */ - tuning_opcode = - mmc->card->type == MMC_TYPE_MMC ? - MMC_SEND_TUNING_BLOCK_HS200 : - MMC_SEND_TUNING_BLOCK; - - /* Here we need to set the host->mrq to NULL, - * in case the pending finish_tasklet - * finishes it incorrectly. - */ - host->mrq = NULL; - - spin_unlock_irqrestore(&host->lock, flags); - sdhci_execute_tuning(mmc, tuning_opcode); - spin_lock_irqsave(&host->lock, flags); - - /* Restore original mmc_request structure */ - host->mrq = mrq; - } - } - if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) sdhci_send_command(host, mrq->sbc); else @@ -1562,8 +1514,17 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; + else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; + else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; + else { + pr_warn("%s: invalid driver type, default to " + "driver type B\n", mmc_hostname(mmc)); + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; + } sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); } else { @@ -2065,23 +2026,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) } out: - host->flags &= ~SDHCI_NEEDS_RETUNING; - if (tuning_count) { - host->flags |= SDHCI_USING_RETUNING_TIMER; - mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); + /* + * In case tuning fails, host controllers which support + * re-tuning can try tuning again at a later time, when the + * re-tuning timer expires. So for these controllers, we + * return 0. Since there might be other controllers who do not + * have this capability, we return error for them. + */ + err = 0; } - /* - * In case tuning fails, host controllers which support re-tuning can - * try tuning again at a later time, when the re-tuning timer expires. - * So for these controllers, we return 0. Since there might be other - * controllers who do not have this capability, we return error for - * them. SDHCI_USING_RETUNING_TIMER means the host is currently using - * a retuning timer to do the retuning for the card. - */ - if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) - err = 0; + host->mmc->retune_period = err ? 0 : tuning_count; sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); @@ -2092,6 +2048,18 @@ out_unlock: return err; } +static int sdhci_select_drive_strength(struct mmc_card *card, + unsigned int max_dtr, int host_drv, + int card_drv, int *drv_type) +{ + struct sdhci_host *host = mmc_priv(card->host); + + if (!host->ops->select_drive_strength) + return 0; + + return host->ops->select_drive_strength(host, card, max_dtr, host_drv, + card_drv, drv_type); +} static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) { @@ -2236,6 +2204,7 @@ static const struct mmc_host_ops sdhci_ops = { .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, .execute_tuning = sdhci_execute_tuning, + .select_drive_strength = sdhci_select_drive_strength, .card_event = sdhci_card_event, .card_busy = sdhci_card_busy, }; @@ -2337,20 +2306,6 @@ static void sdhci_timeout_timer(unsigned long data) spin_unlock_irqrestore(&host->lock, flags); } -static void sdhci_tuning_timer(unsigned long data) -{ - struct sdhci_host *host; - unsigned long flags; - - host = (struct sdhci_host *)data; - - spin_lock_irqsave(&host->lock, flags); - - host->flags |= SDHCI_NEEDS_RETUNING; - - spin_unlock_irqrestore(&host->lock, flags); -} - /*****************************************************************************\ * * * Interrupt handling * @@ -2728,11 +2683,8 @@ int sdhci_suspend_host(struct sdhci_host *host) { sdhci_disable_card_detection(host); - /* Disable tuning since we are suspending */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) { - del_timer_sync(&host->tuning_timer); - host->flags &= ~SDHCI_NEEDS_RETUNING; - } + mmc_retune_timer_stop(host->mmc); + mmc_retune_needed(host->mmc); if (!device_may_wakeup(mmc_dev(host->mmc))) { host->ier = 0; @@ -2782,10 +2734,6 @@ int sdhci_resume_host(struct sdhci_host *host) sdhci_enable_card_detection(host); - /* Set the re-tuning expiration flag */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) - host->flags |= SDHCI_NEEDS_RETUNING; - return ret; } @@ -2822,11 +2770,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host) { unsigned long flags; - /* Disable tuning since we are suspending */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) { - del_timer_sync(&host->tuning_timer); - host->flags &= ~SDHCI_NEEDS_RETUNING; - } + mmc_retune_timer_stop(host->mmc); + mmc_retune_needed(host->mmc); spin_lock_irqsave(&host->lock, flags); host->ier &= SDHCI_INT_CARD_INT; @@ -2869,10 +2814,6 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) spin_unlock_irqrestore(&host->lock, flags); } - /* Set the re-tuning expiration flag */ - if (host->flags & SDHCI_USING_RETUNING_TIMER) - host->flags |= SDHCI_NEEDS_RETUNING; - spin_lock_irqsave(&host->lock, flags); host->runtime_suspended = false; @@ -3315,13 +3256,14 @@ int sdhci_add_host(struct sdhci_host *host) SDHCI_MAX_CURRENT_MULTIPLIER; } - /* If OCR set by external regulators, use it instead */ + /* If OCR set by host, use it instead. */ + if (host->ocr_mask) + ocr_avail = host->ocr_mask; + + /* If OCR set by external regulators, give it highest prio. */ if (mmc->ocr_avail) ocr_avail = mmc->ocr_avail; - if (host->ocr_mask) - ocr_avail &= host->ocr_mask; - mmc->ocr_avail = ocr_avail; mmc->ocr_avail_sdio = ocr_avail; if (host->ocr_avail_sdio) @@ -3408,13 +3350,6 @@ int sdhci_add_host(struct sdhci_host *host) init_waitqueue_head(&host->buf_ready_int); - if (host->version >= SDHCI_SPEC_300) { - /* Initialize re-tuning timer */ - init_timer(&host->tuning_timer); - host->tuning_timer.data = (unsigned long)host; - host->tuning_timer.function = sdhci_tuning_timer; - } - sdhci_init(host, 0); ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index e639b7f435e5..5521d29368e4 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -432,13 +432,11 @@ struct sdhci_host { #define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ #define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ #define SDHCI_SDR50_NEEDS_TUNING (1<<4) /* SDR50 needs tuning */ -#define SDHCI_NEEDS_RETUNING (1<<5) /* Host needs retuning */ #define SDHCI_AUTO_CMD12 (1<<6) /* Auto CMD12 support */ #define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */ #define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ -#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ #define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ @@ -504,7 +502,6 @@ struct sdhci_host { unsigned int tuning_count; /* Timer count for re-tuning */ unsigned int tuning_mode; /* Re-tuning mode supported by host */ #define SDHCI_TUNING_MODE_1 0 - struct timer_list tuning_timer; /* Timer for tuning */ struct sdhci_host_next next_data; unsigned long private[0] ____cacheline_aligned; @@ -541,6 +538,10 @@ struct sdhci_ops { void (*platform_init)(struct sdhci_host *host); void (*card_event)(struct sdhci_host *host); void (*voltage_switch)(struct sdhci_host *host); + int (*select_drive_strength)(struct sdhci_host *host, + struct mmc_card *card, + unsigned int max_dtr, int host_drv, + int card_drv, int *drv_type); }; #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 2fe8b91481b3..983b8b32ef96 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -49,7 +49,7 @@ struct f_sdhost_priv { struct device *dev; }; -void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) +static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) { struct f_sdhost_priv *priv = sdhci_priv(host); u32 ctrl = 0; @@ -77,12 +77,12 @@ void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING); } -unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) +static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) { return F_SDH30_MIN_CLOCK; } -void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) +static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) { if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL); @@ -114,8 +114,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) return irq; } - host = sdhci_alloc_host(dev, sizeof(struct sdhci_host) + - sizeof(struct f_sdhost_priv)); + host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) return PTR_ERR(host); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 7eff087cf515..5a1fdd405b1a 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -57,6 +57,7 @@ #include <linux/mmc/slot-gpio.h> #include <linux/mod_devicetable.h> #include <linux/mutex.h> +#include <linux/of_device.h> #include <linux/pagemap.h> #include <linux/platform_device.h> #include <linux/pm_qos.h> @@ -205,14 +206,14 @@ #define CLKDEV_MMC_DATA 20000000 /* 20MHz */ #define CLKDEV_INIT 400000 /* 400 KHz */ -enum mmcif_state { +enum sh_mmcif_state { STATE_IDLE, STATE_REQUEST, STATE_IOS, STATE_TIMEOUT, }; -enum mmcif_wait_for { +enum sh_mmcif_wait_for { MMCIF_WAIT_FOR_REQUEST, MMCIF_WAIT_FOR_CMD, MMCIF_WAIT_FOR_MREAD, @@ -224,12 +225,14 @@ enum mmcif_wait_for { MMCIF_WAIT_FOR_STOP, }; +/* + * difference for each SoC + */ struct sh_mmcif_host { struct mmc_host *mmc; struct mmc_request *mrq; struct platform_device *pd; - struct clk *hclk; - unsigned int clk; + struct clk *clk; int bus_width; unsigned char timing; bool sd_error; @@ -238,8 +241,8 @@ struct sh_mmcif_host { void __iomem *addr; u32 *pio_ptr; spinlock_t lock; /* protect sh_mmcif_host::state */ - enum mmcif_state state; - enum mmcif_wait_for wait_for; + enum sh_mmcif_state state; + enum sh_mmcif_wait_for wait_for; struct delayed_work timeout_work; size_t blocksize; int sg_idx; @@ -249,6 +252,7 @@ struct sh_mmcif_host { bool ccs_enable; /* Command Completion Signal support */ bool clk_ctrl2_enable; struct mutex thread_lock; + u32 clkdiv_map; /* see CE_CLK_CTRL::CLKDIV */ /* DMA support */ struct dma_chan *chan_rx; @@ -257,6 +261,14 @@ struct sh_mmcif_host { bool dma_active; }; +static const struct of_device_id sh_mmcif_of_match[] = { + { .compatible = "renesas,sh-mmcif" }, + { } +}; +MODULE_DEVICE_TABLE(of, sh_mmcif_of_match); + +#define sh_mmcif_host_to_dev(host) (&host->pd->dev) + static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, unsigned int reg, u32 val) { @@ -269,15 +281,16 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, writel(~val & readl(host->addr + reg), host->addr + reg); } -static void mmcif_dma_complete(void *arg) +static void sh_mmcif_dma_complete(void *arg) { struct sh_mmcif_host *host = arg; struct mmc_request *mrq = host->mrq; + struct device *dev = sh_mmcif_host_to_dev(host); - dev_dbg(&host->pd->dev, "Command completed\n"); + dev_dbg(dev, "Command completed\n"); if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n", - dev_name(&host->pd->dev))) + dev_name(dev))) return; complete(&host->dma_complete); @@ -289,6 +302,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) struct scatterlist *sg = data->sg; struct dma_async_tx_descriptor *desc = NULL; struct dma_chan *chan = host->chan_rx; + struct device *dev = sh_mmcif_host_to_dev(host); dma_cookie_t cookie = -EINVAL; int ret; @@ -301,13 +315,13 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) } if (desc) { - desc->callback = mmcif_dma_complete; + desc->callback = sh_mmcif_dma_complete; desc->callback_param = host; cookie = dmaengine_submit(desc); sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); dma_async_issue_pending(chan); } - dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", + dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n", __func__, data->sg_len, ret, cookie); if (!desc) { @@ -323,12 +337,12 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) host->chan_tx = NULL; dma_release_channel(chan); } - dev_warn(&host->pd->dev, + dev_warn(dev, "DMA failed: %d, falling back to PIO\n", ret); sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); } - dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, + dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, desc, cookie, data->sg_len); } @@ -338,6 +352,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) struct scatterlist *sg = data->sg; struct dma_async_tx_descriptor *desc = NULL; struct dma_chan *chan = host->chan_tx; + struct device *dev = sh_mmcif_host_to_dev(host); dma_cookie_t cookie = -EINVAL; int ret; @@ -350,13 +365,13 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) } if (desc) { - desc->callback = mmcif_dma_complete; + desc->callback = sh_mmcif_dma_complete; desc->callback_param = host; cookie = dmaengine_submit(desc); sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); dma_async_issue_pending(chan); } - dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", + dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n", __func__, data->sg_len, ret, cookie); if (!desc) { @@ -372,12 +387,12 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) host->chan_rx = NULL; dma_release_channel(chan); } - dev_warn(&host->pd->dev, + dev_warn(dev, "DMA failed: %d, falling back to PIO\n", ret); sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); } - dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__, + dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__, desc, cookie); } @@ -390,6 +405,7 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host, struct dma_chan *chan; void *slave_data = NULL; struct resource *res; + struct device *dev = sh_mmcif_host_to_dev(host); dma_cap_mask_t mask; int ret; @@ -402,10 +418,10 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host, (void *)pdata->slave_id_rx; chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, - slave_data, &host->pd->dev, + slave_data, dev, direction == DMA_MEM_TO_DEV ? "tx" : "rx"); - dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__, + dev_dbg(dev, "%s: %s: got channel %p\n", __func__, direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan); if (!chan) @@ -435,12 +451,13 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host, static void sh_mmcif_request_dma(struct sh_mmcif_host *host, struct sh_mmcif_plat_data *pdata) { + struct device *dev = sh_mmcif_host_to_dev(host); host->dma_active = false; if (pdata) { if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0) return; - } else if (!host->pd->dev.of_node) { + } else if (!dev->of_node) { return; } @@ -476,21 +493,59 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host) static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) { - struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + struct device *dev = sh_mmcif_host_to_dev(host); + struct sh_mmcif_plat_data *p = dev->platform_data; bool sup_pclk = p ? p->sup_pclk : false; + unsigned int current_clk = clk_get_rate(host->clk); + unsigned int clkdiv; sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); if (!clk) return; - if (sup_pclk && clk == host->clk) - sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); - else - sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & - ((fls(DIV_ROUND_UP(host->clk, - clk) - 1) - 1) << 16)); + if (host->clkdiv_map) { + unsigned int freq, best_freq, myclk, div, diff_min, diff; + int i; + + clkdiv = 0; + diff_min = ~0; + best_freq = 0; + for (i = 31; i >= 0; i--) { + if (!((1 << i) & host->clkdiv_map)) + continue; + + /* + * clk = parent_freq / div + * -> parent_freq = clk x div + */ + + div = 1 << (i + 1); + freq = clk_round_rate(host->clk, clk * div); + myclk = freq / div; + diff = (myclk > clk) ? myclk - clk : clk - myclk; + + if (diff <= diff_min) { + best_freq = freq; + clkdiv = i; + diff_min = diff; + } + } + + dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n", + (best_freq / (1 << (clkdiv + 1))), clk, + best_freq, clkdiv); + + clk_set_rate(host->clk, best_freq); + clkdiv = clkdiv << 16; + } else if (sup_pclk && clk == current_clk) { + clkdiv = CLK_SUP_PCLK; + } else { + clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16; + } + + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv); sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); } @@ -514,6 +569,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) static int sh_mmcif_error_manage(struct sh_mmcif_host *host) { + struct device *dev = sh_mmcif_host_to_dev(host); u32 state1, state2; int ret, timeout; @@ -521,8 +577,8 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host) state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); - dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1); - dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2); + dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1); + dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2); if (state1 & STS1_CMDSEQ) { sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); @@ -534,25 +590,25 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host) mdelay(1); } if (!timeout) { - dev_err(&host->pd->dev, + dev_err(dev, "Forced end of command sequence timeout err\n"); return -EIO; } sh_mmcif_sync_reset(host); - dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); + dev_dbg(dev, "Forced end of command sequence\n"); return -EIO; } if (state2 & STS2_CRC_ERR) { - dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n", + dev_err(dev, " CRC error: state %u, wait %u\n", host->state, host->wait_for); ret = -EIO; } else if (state2 & STS2_TIMEOUT_ERR) { - dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n", + dev_err(dev, " Timeout: state %u, wait %u\n", host->state, host->wait_for); ret = -ETIMEDOUT; } else { - dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n", + dev_dbg(dev, " End/Index error: state %u, wait %u\n", host->state, host->wait_for); ret = -EIO; } @@ -593,13 +649,14 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host, static bool sh_mmcif_read_block(struct sh_mmcif_host *host) { + struct device *dev = sh_mmcif_host_to_dev(host); struct mmc_data *data = host->mrq->data; u32 *p = sg_virt(data->sg); int i; if (host->sd_error) { data->error = sh_mmcif_error_manage(host); - dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); + dev_dbg(dev, "%s(): %d\n", __func__, data->error); return false; } @@ -634,13 +691,14 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host, static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) { + struct device *dev = sh_mmcif_host_to_dev(host); struct mmc_data *data = host->mrq->data; u32 *p = host->pio_ptr; int i; if (host->sd_error) { data->error = sh_mmcif_error_manage(host); - dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); + dev_dbg(dev, "%s(): %d\n", __func__, data->error); return false; } @@ -671,13 +729,14 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host, static bool sh_mmcif_write_block(struct sh_mmcif_host *host) { + struct device *dev = sh_mmcif_host_to_dev(host); struct mmc_data *data = host->mrq->data; u32 *p = sg_virt(data->sg); int i; if (host->sd_error) { data->error = sh_mmcif_error_manage(host); - dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); + dev_dbg(dev, "%s(): %d\n", __func__, data->error); return false; } @@ -712,13 +771,14 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host, static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) { + struct device *dev = sh_mmcif_host_to_dev(host); struct mmc_data *data = host->mrq->data; u32 *p = host->pio_ptr; int i; if (host->sd_error) { data->error = sh_mmcif_error_manage(host); - dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); + dev_dbg(dev, "%s(): %d\n", __func__, data->error); return false; } @@ -756,6 +816,7 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, struct mmc_request *mrq) { + struct device *dev = sh_mmcif_host_to_dev(host); struct mmc_data *data = mrq->data; struct mmc_command *cmd = mrq->cmd; u32 opc = cmd->opcode; @@ -775,7 +836,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, tmp |= CMD_SET_RTYP_17B; break; default: - dev_err(&host->pd->dev, "Unsupported response type.\n"); + dev_err(dev, "Unsupported response type.\n"); break; } switch (opc) { @@ -803,7 +864,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, tmp |= CMD_SET_DATW_8; break; default: - dev_err(&host->pd->dev, "Unsupported bus width.\n"); + dev_err(dev, "Unsupported bus width.\n"); break; } switch (host->timing) { @@ -846,6 +907,8 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, static int sh_mmcif_data_trans(struct sh_mmcif_host *host, struct mmc_request *mrq, u32 opc) { + struct device *dev = sh_mmcif_host_to_dev(host); + switch (opc) { case MMC_READ_MULTIPLE_BLOCK: sh_mmcif_multi_read(host, mrq); @@ -861,7 +924,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host, sh_mmcif_single_read(host, mrq); return 0; default: - dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc); + dev_err(dev, "Unsupported CMD%d\n", opc); return -EINVAL; } } @@ -918,6 +981,8 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, struct mmc_request *mrq) { + struct device *dev = sh_mmcif_host_to_dev(host); + switch (mrq->cmd->opcode) { case MMC_READ_MULTIPLE_BLOCK: sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); @@ -926,7 +991,7 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); break; default: - dev_err(&host->pd->dev, "unsupported stop cmd\n"); + dev_err(dev, "unsupported stop cmd\n"); mrq->stop->error = sh_mmcif_error_manage(host); return; } @@ -937,11 +1002,13 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct sh_mmcif_host *host = mmc_priv(mmc); + struct device *dev = sh_mmcif_host_to_dev(host); unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (host->state != STATE_IDLE) { - dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); + dev_dbg(dev, "%s() rejected, state %u\n", + __func__, host->state); spin_unlock_irqrestore(&host->lock, flags); mrq->cmd->error = -EAGAIN; mmc_request_done(mmc, mrq); @@ -972,17 +1039,37 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) sh_mmcif_start_cmd(host, mrq); } -static int sh_mmcif_clk_update(struct sh_mmcif_host *host) +static void sh_mmcif_clk_setup(struct sh_mmcif_host *host) { - int ret = clk_prepare_enable(host->hclk); + struct device *dev = sh_mmcif_host_to_dev(host); + + if (host->mmc->f_max) { + unsigned int f_max, f_min = 0, f_min_old; + + f_max = host->mmc->f_max; + for (f_min_old = f_max; f_min_old > 2;) { + f_min = clk_round_rate(host->clk, f_min_old / 2); + if (f_min == f_min_old) + break; + f_min_old = f_min; + } + + /* + * This driver assumes this SoC is R-Car Gen2 or later + */ + host->clkdiv_map = 0x3ff; + + host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map)); + host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map)); + } else { + unsigned int clk = clk_get_rate(host->clk); - if (!ret) { - host->clk = clk_get_rate(host->hclk); - host->mmc->f_max = host->clk / 2; - host->mmc->f_min = host->clk / 512; + host->mmc->f_max = clk / 2; + host->mmc->f_min = clk / 512; } - return ret; + dev_dbg(dev, "clk max/min = %d/%d\n", + host->mmc->f_max, host->mmc->f_min); } static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios) @@ -998,11 +1085,13 @@ static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios) static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sh_mmcif_host *host = mmc_priv(mmc); + struct device *dev = sh_mmcif_host_to_dev(host); unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (host->state != STATE_IDLE) { - dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); + dev_dbg(dev, "%s() rejected, state %u\n", + __func__, host->state); spin_unlock_irqrestore(&host->lock, flags); return; } @@ -1013,7 +1102,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->power_mode == MMC_POWER_UP) { if (!host->card_present) { /* See if we also get DMA */ - sh_mmcif_request_dma(host, host->pd->dev.platform_data); + sh_mmcif_request_dma(host, dev->platform_data); host->card_present = true; } sh_mmcif_set_power(host, ios); @@ -1027,8 +1116,8 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } } if (host->power) { - pm_runtime_put_sync(&host->pd->dev); - clk_disable_unprepare(host->hclk); + pm_runtime_put_sync(dev); + clk_disable_unprepare(host->clk); host->power = false; if (ios->power_mode == MMC_POWER_OFF) sh_mmcif_set_power(host, ios); @@ -1039,8 +1128,9 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->clock) { if (!host->power) { - sh_mmcif_clk_update(host); - pm_runtime_get_sync(&host->pd->dev); + clk_prepare_enable(host->clk); + + pm_runtime_get_sync(dev); host->power = true; sh_mmcif_sync_reset(host); } @@ -1055,7 +1145,8 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) static int sh_mmcif_get_cd(struct mmc_host *mmc) { struct sh_mmcif_host *host = mmc_priv(mmc); - struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + struct device *dev = sh_mmcif_host_to_dev(host); + struct sh_mmcif_plat_data *p = dev->platform_data; int ret = mmc_gpio_get_cd(mmc); if (ret >= 0) @@ -1077,6 +1168,7 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) { struct mmc_command *cmd = host->mrq->cmd; struct mmc_data *data = host->mrq->data; + struct device *dev = sh_mmcif_host_to_dev(host); long time; if (host->sd_error) { @@ -1090,7 +1182,7 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) cmd->error = sh_mmcif_error_manage(host); break; } - dev_dbg(&host->pd->dev, "CMD%d error %d\n", + dev_dbg(dev, "CMD%d error %d\n", cmd->opcode, cmd->error); host->sd_error = false; return false; @@ -1170,6 +1262,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) { struct sh_mmcif_host *host = dev_id; struct mmc_request *mrq; + struct device *dev = sh_mmcif_host_to_dev(host); bool wait = false; unsigned long flags; int wait_work; @@ -1184,7 +1277,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) mrq = host->mrq; if (!mrq) { - dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n", + dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n", host->state, host->wait_for); mutex_unlock(&host->thread_lock); return IRQ_HANDLED; @@ -1222,7 +1315,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) case MMCIF_WAIT_FOR_STOP: if (host->sd_error) { mrq->stop->error = sh_mmcif_error_manage(host); - dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error); + dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error); break; } sh_mmcif_get_cmd12response(host, mrq->stop); @@ -1232,7 +1325,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) case MMCIF_WAIT_FOR_WRITE_END: if (host->sd_error) { mrq->data->error = sh_mmcif_error_manage(host); - dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error); + dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error); } break; default: @@ -1275,6 +1368,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) { struct sh_mmcif_host *host = dev_id; + struct device *dev = sh_mmcif_host_to_dev(host); u32 state, mask; state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); @@ -1286,32 +1380,33 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN); if (state & ~MASK_CLEAN) - dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n", + dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n", state); if (state & INT_ERR_STS || state & ~INT_ALL) { host->sd_error = true; - dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state); + dev_dbg(dev, "int err state = 0x%08x\n", state); } if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { if (!host->mrq) - dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state); + dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state); if (!host->dma_active) return IRQ_WAKE_THREAD; else if (host->sd_error) - mmcif_dma_complete(host); + sh_mmcif_dma_complete(host); } else { - dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state); + dev_dbg(dev, "Unexpected IRQ 0x%x\n", state); } return IRQ_HANDLED; } -static void mmcif_timeout_work(struct work_struct *work) +static void sh_mmcif_timeout_work(struct work_struct *work) { struct delayed_work *d = container_of(work, struct delayed_work, work); struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); struct mmc_request *mrq = host->mrq; + struct device *dev = sh_mmcif_host_to_dev(host); unsigned long flags; if (host->dying) @@ -1324,7 +1419,7 @@ static void mmcif_timeout_work(struct work_struct *work) return; } - dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n", + dev_err(dev, "Timeout waiting for %u on CMD%u\n", host->wait_for, mrq->cmd->opcode); host->state = STATE_TIMEOUT; @@ -1361,7 +1456,8 @@ static void mmcif_timeout_work(struct work_struct *work) static void sh_mmcif_init_ocr(struct sh_mmcif_host *host) { - struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data; + struct device *dev = sh_mmcif_host_to_dev(host); + struct sh_mmcif_plat_data *pd = dev->platform_data; struct mmc_host *mmc = host->mmc; mmc_regulator_get_supply(mmc); @@ -1380,7 +1476,8 @@ static int sh_mmcif_probe(struct platform_device *pdev) int ret = 0, irq[2]; struct mmc_host *mmc; struct sh_mmcif_host *host; - struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; + struct device *dev = &pdev->dev; + struct sh_mmcif_plat_data *pd = dev->platform_data; struct resource *res; void __iomem *reg; const char *name; @@ -1388,16 +1485,16 @@ static int sh_mmcif_probe(struct platform_device *pdev) irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); if (irq[0] < 0) { - dev_err(&pdev->dev, "Get irq error\n"); + dev_err(dev, "Get irq error\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg = devm_ioremap_resource(&pdev->dev, res); + reg = devm_ioremap_resource(dev, res); if (IS_ERR(reg)) return PTR_ERR(reg); - mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); + mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev); if (!mmc) return -ENOMEM; @@ -1430,41 +1527,44 @@ static int sh_mmcif_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - pm_runtime_enable(&pdev->dev); + pm_runtime_enable(dev); host->power = false; - host->hclk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(host->hclk)) { - ret = PTR_ERR(host->hclk); - dev_err(&pdev->dev, "cannot get clock: %d\n", ret); + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + dev_err(dev, "cannot get clock: %d\n", ret); goto err_pm; } - ret = sh_mmcif_clk_update(host); + + ret = clk_prepare_enable(host->clk); if (ret < 0) goto err_pm; - ret = pm_runtime_resume(&pdev->dev); + sh_mmcif_clk_setup(host); + + ret = pm_runtime_resume(dev); if (ret < 0) goto err_clk; - INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); + INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work); sh_mmcif_sync_reset(host); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); - name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error"; - ret = devm_request_threaded_irq(&pdev->dev, irq[0], sh_mmcif_intr, + name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error"; + ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host); if (ret) { - dev_err(&pdev->dev, "request_irq error (%s)\n", name); + dev_err(dev, "request_irq error (%s)\n", name); goto err_clk; } if (irq[1] >= 0) { - ret = devm_request_threaded_irq(&pdev->dev, irq[1], + ret = devm_request_threaded_irq(dev, irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); if (ret) { - dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); + dev_err(dev, "request_irq error (sh_mmc:int)\n"); goto err_clk; } } @@ -1481,19 +1581,19 @@ static int sh_mmcif_probe(struct platform_device *pdev) if (ret < 0) goto err_clk; - dev_pm_qos_expose_latency_limit(&pdev->dev, 100); + dev_pm_qos_expose_latency_limit(dev, 100); - dev_info(&pdev->dev, "Chip version 0x%04x, clock rate %luMHz\n", + dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n", sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff, - clk_get_rate(host->hclk) / 1000000UL); + clk_get_rate(host->clk) / 1000000UL); - clk_disable_unprepare(host->hclk); + clk_disable_unprepare(host->clk); return ret; err_clk: - clk_disable_unprepare(host->hclk); + clk_disable_unprepare(host->clk); err_pm: - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); err_host: mmc_free_host(mmc); return ret; @@ -1504,7 +1604,7 @@ static int sh_mmcif_remove(struct platform_device *pdev) struct sh_mmcif_host *host = platform_get_drvdata(pdev); host->dying = true; - clk_prepare_enable(host->hclk); + clk_prepare_enable(host->clk); pm_runtime_get_sync(&pdev->dev); dev_pm_qos_hide_latency_limit(&pdev->dev); @@ -1519,7 +1619,7 @@ static int sh_mmcif_remove(struct platform_device *pdev) */ cancel_delayed_work_sync(&host->timeout_work); - clk_disable_unprepare(host->hclk); + clk_disable_unprepare(host->clk); mmc_free_host(host->mmc); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -1543,12 +1643,6 @@ static int sh_mmcif_resume(struct device *dev) } #endif -static const struct of_device_id mmcif_of_match[] = { - { .compatible = "renesas,sh-mmcif" }, - { } -}; -MODULE_DEVICE_TABLE(of, mmcif_of_match); - static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume) }; @@ -1559,7 +1653,7 @@ static struct platform_driver sh_mmcif_driver = { .driver = { .name = DRIVER_NAME, .pm = &sh_mmcif_dev_pm_ops, - .of_match_table = mmcif_of_match, + .of_match_table = sh_mmcif_of_match, }, }; diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index f746df493892..e897e7fc3b14 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -85,8 +85,10 @@ static int tmio_mmc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -EINVAL; + if (!res) { + ret = -EINVAL; + goto cell_disable; + } pdata->flags |= TMIO_MMC_HAVE_HIGH_REG; @@ -101,7 +103,8 @@ static int tmio_mmc_probe(struct platform_device *pdev) if (ret) goto host_free; - ret = request_irq(irq, tmio_mmc_irq, IRQF_TRIGGER_FALLING, + ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, + IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host); if (ret) goto host_remove; @@ -129,7 +132,6 @@ static int tmio_mmc_remove(struct platform_device *pdev) if (mmc) { struct tmio_mmc_host *host = mmc_priv(mmc); - free_irq(platform_get_irq(pdev, 0), host); tmio_mmc_host_remove(host); if (cell->disable) cell->disable(pdev); diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index dba7e1c19dd7..e3dcf31a8bd6 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -1108,7 +1108,8 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, if (ret < 0) goto host_free; - _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); + _host->ctl = devm_ioremap(&pdev->dev, + res_ctl->start, resource_size(res_ctl)); if (!_host->ctl) { ret = -ENOMEM; goto host_free; @@ -1230,8 +1231,6 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - - iounmap(host->ctl); } EXPORT_SYMBOL(tmio_mmc_host_remove); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 714905384900..6d2c702c8e4a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -168,13 +168,8 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata) #ifdef CONFIG_ACPI static int xgbe_acpi_support(struct xgbe_prv_data *pdata) { - struct acpi_device *adev = pdata->adev; struct device *dev = pdata->dev; u32 property; - acpi_handle handle; - acpi_status status; - unsigned long long data; - int cca; int ret; /* Obtain the system clock setting */ @@ -195,24 +190,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata) } pdata->ptpclk_rate = property; - /* Retrieve the device cache coherency value */ - handle = adev->handle; - do { - status = acpi_evaluate_integer(handle, "_CCA", NULL, &data); - if (!ACPI_FAILURE(status)) { - cca = data; - break; - } - - status = acpi_get_parent(handle, &handle); - } while (!ACPI_FAILURE(status)); - - if (ACPI_FAILURE(status)) { - dev_err(dev, "error obtaining acpi coherency value\n"); - return -EINVAL; - } - pdata->coherent = !!cca; - return 0; } #else /* CONFIG_ACPI */ @@ -243,9 +220,6 @@ static int xgbe_of_support(struct xgbe_prv_data *pdata) } pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk); - /* Retrieve the device cache coherency value */ - pdata->coherent = of_dma_is_coherent(dev->of_node); - return 0; } #else /* CONFIG_OF */ @@ -364,6 +338,7 @@ static int xgbe_probe(struct platform_device *pdev) goto err_io; /* Set the DMA coherency values */ + pdata->coherent = device_dma_is_coherent(pdata->dev); if (pdata->coherent) { pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; pdata->arcache = XGBE_DMA_OS_ARCACHE; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 524d11098c56..e052f05558c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1185,6 +1185,7 @@ enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, + int user, u64 *pbar2_qoffset, unsigned int *pbar2_qid); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 803d91beec6f..a9355593e65e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2145,6 +2145,7 @@ EXPORT_SYMBOL(cxgb4_read_sge_timestamp); int cxgb4_bar2_sge_qregs(struct net_device *dev, unsigned int qid, enum cxgb4_bar2_qtype qtype, + int user, u64 *pbar2_qoffset, unsigned int *pbar2_qid) { @@ -2153,6 +2154,7 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev, (qtype == CXGB4_BAR2_QTYPE_EGRESS ? T4_BAR2_QTYPE_EGRESS : T4_BAR2_QTYPE_INGRESS), + user, pbar2_qoffset, pbar2_qid); } @@ -2351,7 +2353,7 @@ static void process_db_drop(struct work_struct *work) int ret; ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, - &bar2_qoffset, &bar2_qid); + 0, &bar2_qoffset, &bar2_qid); if (ret) dev_err(adap->pdev_dev, "doorbell drop recovery: " "qid=%d, pidx_inc=%d\n", qid, pidx_inc); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 78ab4d406ce2..e33934a6f59e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -306,6 +306,7 @@ enum cxgb4_bar2_qtype { CXGB4_BAR2_QTYPE_EGRESS, CXGB4_BAR2_QTYPE_INGRESS }; int cxgb4_bar2_sge_qregs(struct net_device *dev, unsigned int qid, enum cxgb4_bar2_qtype qtype, + int user, u64 *pbar2_qoffset, unsigned int *pbar2_qid); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 0d2eddab04ef..1b99aecde736 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2429,8 +2429,8 @@ static void __iomem *bar2_address(struct adapter *adapter, u64 bar2_qoffset; int ret; - ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, - &bar2_qoffset, pbar2_qid); + ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, 0, + &bar2_qoffset, pbar2_qid); if (ret) return NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e8578a742f2a..61d8b3ec959e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -5102,6 +5102,7 @@ int t4_prep_adapter(struct adapter *adapter) * @adapter: the adapter * @qid: the Queue ID * @qtype: the Ingress or Egress type for @qid + * @user: true if this request is for a user mode queue * @pbar2_qoffset: BAR2 Queue Offset * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues * @@ -5125,6 +5126,7 @@ int t4_prep_adapter(struct adapter *adapter) int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, + int user, u64 *pbar2_qoffset, unsigned int *pbar2_qid) { @@ -5132,9 +5134,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, u64 bar2_page_offset, bar2_qoffset; unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; - /* T4 doesn't support BAR2 SGE Queue registers. - */ - if (is_t4(adapter->params.chip)) + /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */ + if (!user && is_t4(adapter->params.chip)) return -EINVAL; /* Get our SGE Page Size parameters. diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index ced5ecab5aa7..70de39c6a397 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1674,6 +1674,25 @@ static int map_internal_clock(struct mlx4_dev *dev) return 0; } +int mlx4_get_internal_clock_params(struct mlx4_dev *dev, + struct mlx4_clock_params *params) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (mlx4_is_slave(dev)) + return -ENOTSUPP; + + if (!params) + return -EINVAL; + + params->bar = priv->fw.clock_bar; + params->offset = priv->fw.clock_offset; + params->size = MLX4_CLOCK_SIZE; + + return 0; +} +EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); + static void unmap_internal_clock(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c index ee1b0b965f34..1368dac00da0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mad.c @@ -36,7 +36,7 @@ #include <linux/mlx5/cmd.h> #include "mlx5_core.h" -int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, +int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port) { struct mlx5_mad_ifc_mbox_in *in = NULL; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 4b00545a3ace..65944dd8bf6b 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1304,7 +1304,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) if (!cpumask_test_cpu(cpu, thread_mask)) { ++count; cpumask_or(thread_mask, thread_mask, - topology_thread_cpumask(cpu)); + topology_sibling_cpumask(cpu)); } } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 3dc1f68b322d..6ce973187225 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -3058,7 +3058,6 @@ static void cas_init_mac(struct cas *cp) /* setup core arbitration weight register */ writel(CAWR_RR_DIS, cp->regs + REG_CAWR); - /* XXX Use pci_dma_burst_advice() */ #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) /* set the infinite burst register for chips that don't have * pci issues. diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index ea091bc5ff09..1e09243d5449 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/if_ether.h> +#include <linux/vmalloc.h> #include <asm/sync_bitops.h> #include "hyperv_net.h" diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9118cea91882..35a482d526d9 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -27,6 +27,7 @@ #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/nls.h> +#include <linux/vmalloc.h> #include "hyperv_net.h" diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 66edd99bc302..7ddb1ab70891 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -35,7 +35,8 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, for (n = 0; n < s->gpios->ndescs; n++) values[n] = (desired_child >> n) & 1; - gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values); + gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc, + values); return 0; } diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index 15f9b7c9e4d3..3f6738612f45 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c @@ -1313,8 +1313,6 @@ static int ntb_setup_intx(struct ntb_device *ndev) struct pci_dev *pdev = ndev->pdev; int rc; - pci_msi_off(pdev); - /* Verify intx is enabled */ pci_intx(pdev, 1); diff --git a/drivers/of/address.c b/drivers/of/address.c index 78a7dcbec7d8..6906a3f61bd8 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -765,7 +765,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address) spin_lock(&io_range_lock); list_for_each_entry(res, &io_range_list, list) { if (address >= res->start && address < res->start + res->size) { - addr = res->start - address + offset; + addr = address - res->start + offset; break; } offset += res->size; diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 7a8f1c5e65af..73de4efcbe6e 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -1,6 +1,10 @@ # # PCI configuration # +config PCI_BUS_ADDR_T_64BIT + def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) + depends on PCI + config PCI_MSI bool "Message Signaled Interrupts (MSI and MSI-X)" depends on PCI diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 90fa3a78fb7c..6fbd3f2b5992 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -92,11 +92,11 @@ void pci_bus_remove_resources(struct pci_bus *bus) } static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL}; -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT static struct pci_bus_region pci_64_bit = {0, - (dma_addr_t) 0xffffffffffffffffULL}; -static struct pci_bus_region pci_high = {(dma_addr_t) 0x100000000ULL, - (dma_addr_t) 0xffffffffffffffffULL}; + (pci_bus_addr_t) 0xffffffffffffffffULL}; +static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL, + (pci_bus_addr_t) 0xffffffffffffffffULL}; #endif /* @@ -200,7 +200,7 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, resource_size_t), void *alignf_data) { -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT int rc; if (res->flags & IORESOURCE_MEM_64) { diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 1dfb567b3522..c132bddc03f3 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -89,11 +89,20 @@ config PCI_XGENE depends on ARCH_XGENE depends on OF select PCIEPORTBUS + select PCI_MSI_IRQ_DOMAIN if PCI_MSI help Say Y here if you want internal PCI support on APM X-Gene SoC. There are 5 internal PCIe ports available. Each port is GEN3 capable and have varied lanes from x1 to x8. +config PCI_XGENE_MSI + bool "X-Gene v1 PCIe MSI feature" + depends on PCI_XGENE && PCI_MSI + default y + help + Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. + This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. + config PCI_LAYERSCAPE bool "Freescale Layerscape PCIe controller" depends on OF && ARM @@ -125,4 +134,15 @@ config PCIE_IPROC_PLATFORM Say Y here if you want to use the Broadcom iProc PCIe controller through the generic platform bus interface +config PCIE_IPROC_BCMA + bool "Broadcom iProc PCIe BCMA bus driver" + depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST) + select PCIE_IPROC + select BCMA + select PCI_DOMAINS + default ARCH_BCM_5301X + help + Say Y here if you want to use the Broadcom iProc PCIe controller + through the BCMA bus interface + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index f733b4e27642..140d66f796e4 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -11,7 +11,9 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o obj-$(CONFIG_PCI_XGENE) += pci-xgene.o +obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o +obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c index 2d57e19a2cd4..80db09e47800 100644 --- a/drivers/pci/host/pci-dra7xx.c +++ b/drivers/pci/host/pci-dra7xx.c @@ -93,9 +93,9 @@ static int dra7xx_pcie_link_up(struct pcie_port *pp) static int dra7xx_pcie_establish_link(struct pcie_port *pp) { - u32 reg; - unsigned int retries = 1000; struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); + u32 reg; + unsigned int retries; if (dw_pcie_link_up(pp)) { dev_err(pp->dev, "link is already up\n"); @@ -106,19 +106,14 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp) reg |= LTSSM_EN; dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); - while (retries--) { - reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); - if (reg & LINK_UP) - break; + for (retries = 0; retries < 1000; retries++) { + if (dw_pcie_link_up(pp)) + return 0; usleep_range(10, 20); } - if (retries == 0) { - dev_err(pp->dev, "link is not up\n"); - return -ETIMEDOUT; - } - - return 0; + dev_err(pp->dev, "link is not up\n"); + return -EINVAL; } static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c index c139237e0e52..f9f468d9a819 100644 --- a/drivers/pci/host/pci-exynos.c +++ b/drivers/pci/host/pci-exynos.c @@ -316,9 +316,9 @@ static void exynos_pcie_assert_reset(struct pcie_port *pp) static int exynos_pcie_establish_link(struct pcie_port *pp) { - u32 val; - int count = 0; struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); + u32 val; + unsigned int retries; if (dw_pcie_link_up(pp)) { dev_err(pp->dev, "Link already up\n"); @@ -357,27 +357,23 @@ static int exynos_pcie_establish_link(struct pcie_port *pp) PCIE_APP_LTSSM_ENABLE); /* check if the link is up or not */ - while (!dw_pcie_link_up(pp)) { - mdelay(100); - count++; - if (count == 10) { - while (exynos_phy_readl(exynos_pcie, - PCIE_PHY_PLL_LOCKED) == 0) { - val = exynos_blk_readl(exynos_pcie, - PCIE_PHY_PLL_LOCKED); - dev_info(pp->dev, "PLL Locked: 0x%x\n", val); - } - /* power off phy */ - exynos_pcie_power_off_phy(pp); - - dev_err(pp->dev, "PCIe Link Fail\n"); - return -EINVAL; + for (retries = 0; retries < 10; retries++) { + if (dw_pcie_link_up(pp)) { + dev_info(pp->dev, "Link up\n"); + return 0; } + mdelay(100); } - dev_info(pp->dev, "Link up\n"); + while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) { + val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED); + dev_info(pp->dev, "PLL Locked: 0x%x\n", val); + } + /* power off phy */ + exynos_pcie_power_off_phy(pp); - return 0; + dev_err(pp->dev, "PCIe Link Fail\n"); + return -EINVAL; } static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp) diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index fdb95367721e..233a196c6e66 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -47,6 +47,8 @@ struct imx6_pcie { #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf +#define PCIE_RC_LCSR 0x80 + /* PCIe Port Logic registers (memory-mapped) */ #define PL_OFFSET 0x700 #define PCIE_PL_PFLR (PL_OFFSET + 0x08) @@ -335,21 +337,36 @@ static void imx6_pcie_init_phy(struct pcie_port *pp) static int imx6_pcie_wait_for_link(struct pcie_port *pp) { - int count = 200; + unsigned int retries; - while (!dw_pcie_link_up(pp)) { + for (retries = 0; retries < 200; retries++) { + if (dw_pcie_link_up(pp)) + return 0; usleep_range(100, 1000); - if (--count) - continue; - - dev_err(pp->dev, "phy link never came up\n"); - dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", - readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), - readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); - return -EINVAL; } - return 0; + dev_err(pp->dev, "phy link never came up\n"); + dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", + readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), + readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); + return -EINVAL; +} + +static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp) +{ + u32 tmp; + unsigned int retries; + + for (retries = 0; retries < 200; retries++) { + tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); + /* Test if the speed change finished. */ + if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) + return 0; + usleep_range(100, 1000); + } + + dev_err(pp->dev, "Speed change timeout\n"); + return -EINVAL; } static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) @@ -359,11 +376,11 @@ static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) return dw_handle_msi_irq(pp); } -static int imx6_pcie_start_link(struct pcie_port *pp) +static int imx6_pcie_establish_link(struct pcie_port *pp) { struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); - uint32_t tmp; - int ret, count; + u32 tmp; + int ret; /* * Force Gen1 operation when starting the link. In case the link is @@ -397,29 +414,22 @@ static int imx6_pcie_start_link(struct pcie_port *pp) tmp |= PORT_LOGIC_SPEED_CHANGE; writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); - count = 200; - while (count--) { - tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); - /* Test if the speed change finished. */ - if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) - break; - usleep_range(100, 1000); + ret = imx6_pcie_wait_for_speed_change(pp); + if (ret) { + dev_err(pp->dev, "Failed to bring link up!\n"); + return ret; } /* Make sure link training is finished as well! */ - if (count) - ret = imx6_pcie_wait_for_link(pp); - else - ret = -EINVAL; - + ret = imx6_pcie_wait_for_link(pp); if (ret) { dev_err(pp->dev, "Failed to bring link up!\n"); - } else { - tmp = readl(pp->dbi_base + 0x80); - dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); + return ret; } - return ret; + tmp = readl(pp->dbi_base + PCIE_RC_LCSR); + dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); + return 0; } static void imx6_pcie_host_init(struct pcie_port *pp) @@ -432,7 +442,7 @@ static void imx6_pcie_host_init(struct pcie_port *pp) dw_pcie_setup_rc(pp); - imx6_pcie_start_link(pp); + imx6_pcie_establish_link(pp); if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); @@ -440,19 +450,19 @@ static void imx6_pcie_host_init(struct pcie_port *pp) static void imx6_pcie_reset_phy(struct pcie_port *pp) { - uint32_t temp; + u32 tmp; - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); - temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | - PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); + pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); + tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | + PHY_RX_OVRD_IN_LO_RX_PLL_EN); + pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); usleep_range(2000, 3000); - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); - temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | + pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); + tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); + pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); } static int imx6_pcie_link_up(struct pcie_port *pp) diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index 75333b0c4f0a..b75d684aefcd 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -88,7 +88,7 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) { struct pcie_port *pp = &ks_pcie->pp; - int count = 200; + unsigned int retries; dw_pcie_setup_rc(pp); @@ -99,17 +99,15 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) ks_dw_pcie_initiate_link_train(ks_pcie); /* check if the link is up or not */ - while (!dw_pcie_link_up(pp)) { + for (retries = 0; retries < 200; retries++) { + if (dw_pcie_link_up(pp)) + return 0; usleep_range(100, 1000); - if (--count) { - ks_dw_pcie_initiate_link_train(ks_pcie); - continue; - } - dev_err(pp->dev, "phy link never came up\n"); - return -EINVAL; + ks_dw_pcie_initiate_link_train(ks_pcie); } - return 0; + dev_err(pp->dev, "phy link never came up\n"); + return -EINVAL; } static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc) diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index 4a6e62f67579..b2328ea13dcf 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c @@ -62,22 +62,27 @@ static int ls_pcie_link_up(struct pcie_port *pp) return 1; } +static int ls_pcie_establish_link(struct pcie_port *pp) +{ + unsigned int retries; + + for (retries = 0; retries < 200; retries++) { + if (dw_pcie_link_up(pp)) + return 0; + usleep_range(100, 1000); + } + + dev_err(pp->dev, "phy link never came up\n"); + return -EINVAL; +} + static void ls_pcie_host_init(struct pcie_port *pp) { struct ls_pcie *pcie = to_ls_pcie(pp); - int count = 0; u32 val; dw_pcie_setup_rc(pp); - - while (!ls_pcie_link_up(pp)) { - usleep_range(100, 1000); - count++; - if (count >= 200) { - dev_err(pp->dev, "phy link never came up\n"); - return; - } - } + ls_pcie_establish_link(pp); /* * LS1021A Workaround for internal TKT228622 diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 1ab863551920..70aa09556ec5 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -751,21 +751,6 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) return 1; } -static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys) -{ - struct mvebu_pcie *pcie = sys_to_pcie(sys); - struct pci_bus *bus; - - bus = pci_create_root_bus(&pcie->pdev->dev, sys->busnr, - &mvebu_pcie_ops, sys, &sys->resources); - if (!bus) - return NULL; - - pci_scan_child_bus(bus); - - return bus; -} - static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, const struct resource *res, resource_size_t start, @@ -809,12 +794,11 @@ static void mvebu_pcie_enable(struct mvebu_pcie *pcie) hw.nr_controllers = 1; hw.private_data = (void **)&pcie; hw.setup = mvebu_pcie_setup; - hw.scan = mvebu_pcie_scan_bus; hw.map_irq = of_irq_parse_and_map_pci; hw.ops = &mvebu_pcie_ops; hw.align_resource = mvebu_pcie_align_resource; - pci_common_init(&hw); + pci_common_init_dev(&pcie->pdev->dev, &hw); } /* diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 00e92720d7f7..10c05718dbfd 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -630,21 +630,6 @@ static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) return irq; } -static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys) -{ - struct tegra_pcie *pcie = sys_to_pcie(sys); - struct pci_bus *bus; - - bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys, - &sys->resources); - if (!bus) - return NULL; - - pci_scan_child_bus(bus); - - return bus; -} - static irqreturn_t tegra_pcie_isr(int irq, void *arg) { const char *err_msg[] = { @@ -1831,7 +1816,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie) hw.private_data = (void **)&pcie; hw.setup = tegra_pcie_setup; hw.map_irq = tegra_pcie_map_irq; - hw.scan = tegra_pcie_scan_bus; hw.ops = &tegra_pcie_ops; pci_common_init_dev(pcie->dev, &hw); diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c new file mode 100644 index 000000000000..2d31d4d6fd08 --- /dev/null +++ b/drivers/pci/host/pci-xgene-msi.c @@ -0,0 +1,596 @@ +/* + * APM X-Gene MSI Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Tanmay Inamdar <tinamdar@apm.com> + * Duc Dang <dhdang@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/cpu.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/of_pci.h> + +#define MSI_IR0 0x000000 +#define MSI_INT0 0x800000 +#define IDX_PER_GROUP 8 +#define IRQS_PER_IDX 16 +#define NR_HW_IRQS 16 +#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) + +struct xgene_msi_group { + struct xgene_msi *msi; + int gic_irq; + u32 msi_grp; +}; + +struct xgene_msi { + struct device_node *node; + struct msi_controller mchip; + struct irq_domain *domain; + u64 msi_addr; + void __iomem *msi_regs; + unsigned long *bitmap; + struct mutex bitmap_lock; + struct xgene_msi_group *msi_groups; + int num_cpus; +}; + +/* Global data */ +static struct xgene_msi xgene_msi_ctrl; + +static struct irq_chip xgene_msi_top_irq_chip = { + .name = "X-Gene1 MSI", + .irq_enable = pci_msi_unmask_irq, + .irq_disable = pci_msi_mask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_unmask = pci_msi_unmask_irq, +}; + +static struct msi_domain_info xgene_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &xgene_msi_top_irq_chip, +}; + +/* + * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where + * n is group number (0..F), x is index of registers in each group (0..7) + * The register layout is as follows: + * MSI0IR0 base_addr + * MSI0IR1 base_addr + 0x10000 + * ... ... + * MSI0IR6 base_addr + 0x60000 + * MSI0IR7 base_addr + 0x70000 + * MSI1IR0 base_addr + 0x80000 + * MSI1IR1 base_addr + 0x90000 + * ... ... + * MSI1IR7 base_addr + 0xF0000 + * MSI2IR0 base_addr + 0x100000 + * ... ... + * MSIFIR0 base_addr + 0x780000 + * MSIFIR1 base_addr + 0x790000 + * ... ... + * MSIFIR7 base_addr + 0x7F0000 + * MSIINT0 base_addr + 0x800000 + * MSIINT1 base_addr + 0x810000 + * ... ... + * MSIINTF base_addr + 0x8F0000 + * + * Each index register supports 16 MSI vectors (0..15) to generate interrupt. + * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination + * registers. + * + * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate + * the MSI pending status caused by 1 of its 8 index registers. + */ + +/* MSInIRx read helper */ +static u32 xgene_msi_ir_read(struct xgene_msi *msi, + u32 msi_grp, u32 msir_idx) +{ + return readl_relaxed(msi->msi_regs + MSI_IR0 + + (msi_grp << 19) + (msir_idx << 16)); +} + +/* MSIINTn read helper */ +static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) +{ + return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); +} + +/* + * With 2048 MSI vectors supported, the MSI message can be constructed using + * following scheme: + * - Divide into 8 256-vector groups + * Group 0: 0-255 + * Group 1: 256-511 + * Group 2: 512-767 + * ... + * Group 7: 1792-2047 + * - Each 256-vector group is divided into 16 16-vector groups + * As an example: 16 16-vector groups for 256-vector group 0-255 is + * Group 0: 0-15 + * Group 1: 16-32 + * ... + * Group 15: 240-255 + * - The termination address of MSI vector in 256-vector group n and 16-vector + * group x is the address of MSIxIRn + * - The data for MSI vector in 16-vector group x is x + */ +static u32 hwirq_to_reg_set(unsigned long hwirq) +{ + return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); +} + +static u32 hwirq_to_group(unsigned long hwirq) +{ + return (hwirq % NR_HW_IRQS); +} + +static u32 hwirq_to_msi_data(unsigned long hwirq) +{ + return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); +} + +static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct xgene_msi *msi = irq_data_get_irq_chip_data(data); + u32 reg_set = hwirq_to_reg_set(data->hwirq); + u32 group = hwirq_to_group(data->hwirq); + u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); + + msg->address_hi = upper_32_bits(target_addr); + msg->address_lo = lower_32_bits(target_addr); + msg->data = hwirq_to_msi_data(data->hwirq); +} + +/* + * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain + * the expected behaviour of .set_affinity for each MSI interrupt, the 16 + * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs + * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another + * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a + * consequence, the total MSI vectors that X-Gene v1 supports will be + * reduced to 256 (2048/8) vectors. + */ +static int hwirq_to_cpu(unsigned long hwirq) +{ + return (hwirq % xgene_msi_ctrl.num_cpus); +} + +static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) +{ + return (hwirq - hwirq_to_cpu(hwirq)); +} + +static int xgene_msi_set_affinity(struct irq_data *irqdata, + const struct cpumask *mask, bool force) +{ + int target_cpu = cpumask_first(mask); + int curr_cpu; + + curr_cpu = hwirq_to_cpu(irqdata->hwirq); + if (curr_cpu == target_cpu) + return IRQ_SET_MASK_OK_DONE; + + /* Update MSI number to target the new CPU */ + irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; + + return IRQ_SET_MASK_OK; +} + +static struct irq_chip xgene_msi_bottom_irq_chip = { + .name = "MSI", + .irq_set_affinity = xgene_msi_set_affinity, + .irq_compose_msi_msg = xgene_compose_msi_msg, +}; + +static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct xgene_msi *msi = domain->host_data; + int msi_irq; + + mutex_lock(&msi->bitmap_lock); + + msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, + msi->num_cpus, 0); + if (msi_irq < NR_MSI_VEC) + bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); + else + msi_irq = -ENOSPC; + + mutex_unlock(&msi->bitmap_lock); + + if (msi_irq < 0) + return msi_irq; + + irq_domain_set_info(domain, virq, msi_irq, + &xgene_msi_bottom_irq_chip, domain->host_data, + handle_simple_irq, NULL, NULL); + set_irq_flags(virq, IRQF_VALID); + + return 0; +} + +static void xgene_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct xgene_msi *msi = irq_data_get_irq_chip_data(d); + u32 hwirq; + + mutex_lock(&msi->bitmap_lock); + + hwirq = hwirq_to_canonical_hwirq(d->hwirq); + bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); + + mutex_unlock(&msi->bitmap_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = xgene_irq_domain_alloc, + .free = xgene_irq_domain_free, +}; + +static int xgene_allocate_domains(struct xgene_msi *msi) +{ + msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC, + &msi_domain_ops, msi); + if (!msi->domain) + return -ENOMEM; + + msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node, + &xgene_msi_domain_info, + msi->domain); + + if (!msi->mchip.domain) { + irq_domain_remove(msi->domain); + return -ENOMEM; + } + + return 0; +} + +static void xgene_free_domains(struct xgene_msi *msi) +{ + if (msi->mchip.domain) + irq_domain_remove(msi->mchip.domain); + if (msi->domain) + irq_domain_remove(msi->domain); +} + +static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) +{ + int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); + + xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); + if (!xgene_msi->bitmap) + return -ENOMEM; + + mutex_init(&xgene_msi->bitmap_lock); + + xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, + sizeof(struct xgene_msi_group), + GFP_KERNEL); + if (!xgene_msi->msi_groups) + return -ENOMEM; + + return 0; +} + +static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct xgene_msi_group *msi_groups; + struct xgene_msi *xgene_msi; + unsigned int virq; + int msir_index, msir_val, hw_irq; + u32 intr_index, grp_select, msi_grp; + + chained_irq_enter(chip, desc); + + msi_groups = irq_desc_get_handler_data(desc); + xgene_msi = msi_groups->msi; + msi_grp = msi_groups->msi_grp; + + /* + * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt + * If bit x of this register is set (x is 0..7), one or more interupts + * corresponding to MSInIRx is set. + */ + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); + while (grp_select) { + msir_index = ffs(grp_select) - 1; + /* + * Calculate MSInIRx address to read to check for interrupts + * (refer to termination address and data assignment + * described in xgene_compose_msi_msg() ) + */ + msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); + while (msir_val) { + intr_index = ffs(msir_val) - 1; + /* + * Calculate MSI vector number (refer to the termination + * address and data assignment described in + * xgene_compose_msi_msg function) + */ + hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * + NR_HW_IRQS) + msi_grp; + /* + * As we have multiple hw_irq that maps to single MSI, + * always look up the virq using the hw_irq as seen from + * CPU0 + */ + hw_irq = hwirq_to_canonical_hwirq(hw_irq); + virq = irq_find_mapping(xgene_msi->domain, hw_irq); + WARN_ON(!virq); + if (virq != 0) + generic_handle_irq(virq); + msir_val &= ~(1 << intr_index); + } + grp_select &= ~(1 << msir_index); + + if (!grp_select) { + /* + * We handled all interrupts happened in this group, + * resample this group MSI_INTx register in case + * something else has been made pending in the meantime + */ + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); + } + } + + chained_irq_exit(chip, desc); +} + +static int xgene_msi_remove(struct platform_device *pdev) +{ + int virq, i; + struct xgene_msi *msi = platform_get_drvdata(pdev); + + for (i = 0; i < NR_HW_IRQS; i++) { + virq = msi->msi_groups[i].gic_irq; + if (virq != 0) { + irq_set_chained_handler(virq, NULL); + irq_set_handler_data(virq, NULL); + } + } + kfree(msi->msi_groups); + + kfree(msi->bitmap); + msi->bitmap = NULL; + + xgene_free_domains(msi); + + return 0; +} + +static int xgene_msi_hwirq_alloc(unsigned int cpu) +{ + struct xgene_msi *msi = &xgene_msi_ctrl; + struct xgene_msi_group *msi_group; + cpumask_var_t mask; + int i; + int err; + + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { + msi_group = &msi->msi_groups[i]; + if (!msi_group->gic_irq) + continue; + + irq_set_chained_handler(msi_group->gic_irq, + xgene_msi_isr); + err = irq_set_handler_data(msi_group->gic_irq, msi_group); + if (err) { + pr_err("failed to register GIC IRQ handler\n"); + return -EINVAL; + } + /* + * Statically allocate MSI GIC IRQs to each CPU core. + * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated + * to each core. + */ + if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + err = irq_set_affinity(msi_group->gic_irq, mask); + if (err) + pr_err("failed to set affinity for GIC IRQ"); + free_cpumask_var(mask); + } else { + pr_err("failed to alloc CPU mask for affinity\n"); + err = -EINVAL; + } + + if (err) { + irq_set_chained_handler(msi_group->gic_irq, NULL); + irq_set_handler_data(msi_group->gic_irq, NULL); + return err; + } + } + + return 0; +} + +static void xgene_msi_hwirq_free(unsigned int cpu) +{ + struct xgene_msi *msi = &xgene_msi_ctrl; + struct xgene_msi_group *msi_group; + int i; + + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { + msi_group = &msi->msi_groups[i]; + if (!msi_group->gic_irq) + continue; + + irq_set_chained_handler(msi_group->gic_irq, NULL); + irq_set_handler_data(msi_group->gic_irq, NULL); + } +} + +static int xgene_msi_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + xgene_msi_hwirq_alloc(cpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + xgene_msi_hwirq_free(cpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block xgene_msi_cpu_notifier = { + .notifier_call = xgene_msi_cpu_callback, +}; + +static const struct of_device_id xgene_msi_match_table[] = { + {.compatible = "apm,xgene1-msi"}, + {}, +}; + +static int xgene_msi_probe(struct platform_device *pdev) +{ + struct resource *res; + int rc, irq_index; + struct xgene_msi *xgene_msi; + unsigned int cpu; + int virt_msir; + u32 msi_val, msi_idx; + + xgene_msi = &xgene_msi_ctrl; + + platform_set_drvdata(pdev, xgene_msi); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xgene_msi->msi_regs)) { + dev_err(&pdev->dev, "no reg space\n"); + rc = -EINVAL; + goto error; + } + xgene_msi->msi_addr = res->start; + + xgene_msi->num_cpus = num_possible_cpus(); + + rc = xgene_msi_init_allocator(xgene_msi); + if (rc) { + dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); + goto error; + } + + rc = xgene_allocate_domains(xgene_msi); + if (rc) { + dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); + goto error; + } + + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { + virt_msir = platform_get_irq(pdev, irq_index); + if (virt_msir < 0) { + dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", + irq_index); + rc = -EINVAL; + goto error; + } + xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; + xgene_msi->msi_groups[irq_index].msi_grp = irq_index; + xgene_msi->msi_groups[irq_index].msi = xgene_msi; + } + + /* + * MSInIRx registers are read-to-clear; before registering + * interrupt handlers, read all of them to clear spurious + * interrupts that may occur before the driver is probed. + */ + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { + for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) + msi_val = xgene_msi_ir_read(xgene_msi, irq_index, + msi_idx); + /* Read MSIINTn to confirm */ + msi_val = xgene_msi_int_read(xgene_msi, irq_index); + if (msi_val) { + dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); + rc = -EINVAL; + goto error; + } + } + + cpu_notifier_register_begin(); + + for_each_online_cpu(cpu) + if (xgene_msi_hwirq_alloc(cpu)) { + dev_err(&pdev->dev, "failed to register MSI handlers\n"); + cpu_notifier_register_done(); + goto error; + } + + rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier); + if (rc) { + dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); + cpu_notifier_register_done(); + goto error; + } + + cpu_notifier_register_done(); + + xgene_msi->mchip.of_node = pdev->dev.of_node; + rc = of_pci_msi_chip_add(&xgene_msi->mchip); + if (rc) { + dev_err(&pdev->dev, "failed to add MSI controller chip\n"); + goto error_notifier; + } + + dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); + + return 0; + +error_notifier: + unregister_hotcpu_notifier(&xgene_msi_cpu_notifier); +error: + xgene_msi_remove(pdev); + return rc; +} + +static struct platform_driver xgene_msi_driver = { + .driver = { + .name = "xgene-msi", + .owner = THIS_MODULE, + .of_match_table = xgene_msi_match_table, + }, + .probe = xgene_msi_probe, + .remove = xgene_msi_remove, +}; + +static int __init xgene_pcie_msi_init(void) +{ + return platform_driver_register(&xgene_msi_driver); +} +subsys_initcall(xgene_pcie_msi_init); diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index ee082c0366ec..a9dfb70d623a 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -59,6 +59,12 @@ #define SZ_1T (SZ_1G*1024ULL) #define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) +#define ROOT_CAP_AND_CTRL 0x5C + +/* PCIe IP version */ +#define XGENE_PCIE_IP_VER_UNKN 0 +#define XGENE_PCIE_IP_VER_1 1 + struct xgene_pcie_port { struct device_node *node; struct device *dev; @@ -67,6 +73,7 @@ struct xgene_pcie_port { void __iomem *cfg_base; unsigned long cfg_addr; bool link_up; + u32 version; }; static inline u32 pcie_bar_low_val(u32 addr, u32 flags) @@ -130,9 +137,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { - struct xgene_pcie_port *port = bus->sysdata; - - if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up || + if ((pci_is_root_bus(bus) && devfn != 0) || xgene_pcie_hide_rc_bars(bus, offset)) return NULL; @@ -140,9 +145,37 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, return xgene_pcie_get_cfg_base(bus) + offset; } +static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct xgene_pcie_port *port = bus->sysdata; + + if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) != + PCIBIOS_SUCCESSFUL) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* + * The v1 controller has a bug in its Configuration Request + * Retry Status (CRS) logic: when CRS is enabled and we read the + * Vendor and Device ID of a non-existent device, the controller + * fabricates return data of 0xFFFF0001 ("device exists but is not + * ready") instead of 0xFFFFFFFF ("device does not exist"). This + * causes the PCI core to retry the read until it times out. + * Avoid this by not claiming to support CRS. + */ + if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && + ((where & ~0x3) == ROOT_CAP_AND_CTRL)) + *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + + if (size <= 2) + *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + + return PCIBIOS_SUCCESSFUL; +} + static struct pci_ops xgene_pcie_ops = { .map_bus = xgene_pcie_map_bus, - .read = pci_generic_config_read32, + .read = xgene_pcie_config_read32, .write = pci_generic_config_write32, }; @@ -468,6 +501,23 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, return 0; } +static int xgene_pcie_msi_enable(struct pci_bus *bus) +{ + struct device_node *msi_node; + + msi_node = of_parse_phandle(bus->dev.of_node, + "msi-parent", 0); + if (!msi_node) + return -ENODEV; + + bus->msi = of_pci_find_msi_chip_by_node(msi_node); + if (!bus->msi) + return -ENODEV; + + bus->msi->dev = &bus->dev; + return 0; +} + static int xgene_pcie_probe_bridge(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; @@ -483,6 +533,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) port->node = of_node_get(pdev->dev.of_node); port->dev = &pdev->dev; + port->version = XGENE_PCIE_IP_VER_UNKN; + if (of_device_is_compatible(port->node, "apm,xgene-pcie")) + port->version = XGENE_PCIE_IP_VER_1; + ret = xgene_pcie_map_reg(port, pdev); if (ret) return ret; @@ -504,6 +558,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) if (!bus) return -ENOMEM; + if (IS_ENABLED(CONFIG_PCI_MSI)) + if (xgene_pcie_msi_enable(bus)) + dev_info(port->dev, "failed to enable MSI\n"); + pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); pci_bus_add_devices(bus); diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 2e9f84fdd9ce..69486be7181e 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -31,6 +31,7 @@ #define PORT_LINK_MODE_1_LANES (0x1 << 16) #define PORT_LINK_MODE_2_LANES (0x3 << 16) #define PORT_LINK_MODE_4_LANES (0x7 << 16) +#define PORT_LINK_MODE_8_LANES (0xf << 16) #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) @@ -38,6 +39,7 @@ #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) +#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) #define PCIE_MSI_ADDR_LO 0x820 #define PCIE_MSI_ADDR_HI 0x824 @@ -150,6 +152,21 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, return ret; } +static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, + int type, u64 cpu_addr, u64 pci_addr, u32 size) +{ + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, + PCIE_ATU_VIEWPORT); + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); + dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), + PCIE_ATU_LIMIT); + dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); + dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); +} + static struct irq_chip dw_msi_irq_chip = { .name = "PCI-MSI", .irq_enable = pci_msi_unmask_irq, @@ -493,6 +510,11 @@ int dw_pcie_host_init(struct pcie_port *pp) if (pp->ops->host_init) pp->ops->host_init(pp); + if (!pp->ops->rd_other_conf) + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_MEM, pp->mem_mod_base, + pp->mem_bus_addr, pp->mem_size); + dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); /* program correct class for RC */ @@ -515,115 +537,73 @@ int dw_pcie_host_init(struct pcie_port *pp) return 0; } -static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) -{ - /* Program viewport 0 : OUTBOUND : CFG0 */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} - -static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) -{ - /* Program viewport 1 : OUTBOUND : CFG1 */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} - -static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) -{ - /* Program viewport 0 : OUTBOUND : MEM */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr), - PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} - -static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) -{ - /* Program viewport 1 : OUTBOUND : IO */ - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, - PCIE_ATU_VIEWPORT); - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); - dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); - dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1, - PCIE_ATU_LIMIT); - dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET); - dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr), - PCIE_ATU_UPPER_TARGET); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -} - static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { - int ret = PCIBIOS_SUCCESSFUL; - u32 address, busdev; + int ret, type; + u32 address, busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); address = where & ~0x3; if (bus->parent->number == pp->root_bus_nr) { - dw_pcie_prog_viewport_cfg0(pp, busdev); - ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size, - val); - dw_pcie_prog_viewport_mem_outbound(pp); + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_mod_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; } else { - dw_pcie_prog_viewport_cfg1(pp, busdev); - ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size, - val); - dw_pcie_prog_viewport_io_outbound(pp); + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_mod_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; } + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_cfg_read(va_cfg_base + address, where, size, val); + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_IO, pp->io_mod_base, + pp->io_bus_addr, pp->io_size); + return ret; } static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { - int ret = PCIBIOS_SUCCESSFUL; - u32 address, busdev; + int ret, type; + u32 address, busdev, cfg_size; + u64 cpu_addr; + void __iomem *va_cfg_base; busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); address = where & ~0x3; if (bus->parent->number == pp->root_bus_nr) { - dw_pcie_prog_viewport_cfg0(pp, busdev); - ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size, - val); - dw_pcie_prog_viewport_mem_outbound(pp); + type = PCIE_ATU_TYPE_CFG0; + cpu_addr = pp->cfg0_mod_base; + cfg_size = pp->cfg0_size; + va_cfg_base = pp->va_cfg0_base; } else { - dw_pcie_prog_viewport_cfg1(pp, busdev); - ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size, - val); - dw_pcie_prog_viewport_io_outbound(pp); + type = PCIE_ATU_TYPE_CFG1; + cpu_addr = pp->cfg1_mod_base; + cfg_size = pp->cfg1_size; + va_cfg_base = pp->va_cfg1_base; } + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + type, cpu_addr, + busdev, cfg_size); + ret = dw_pcie_cfg_write(va_cfg_base + address, where, size, val); + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, + PCIE_ATU_TYPE_IO, pp->io_mod_base, + pp->io_bus_addr, pp->io_size); + return ret; } @@ -728,13 +708,11 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) struct pcie_port *pp = sys_to_pcie(sys); pp->root_bus_nr = sys->busnr; - bus = pci_create_root_bus(pp->dev, sys->busnr, + bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops, sys, &sys->resources); if (!bus) return NULL; - pci_scan_child_bus(bus); - if (bus && pp->ops->scan_bus) pp->ops->scan_bus(pp); @@ -778,6 +756,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp) case 4: val |= PORT_LINK_MODE_4_LANES; break; + case 8: + val |= PORT_LINK_MODE_8_LANES; + break; } dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); @@ -794,6 +775,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp) case 4: val |= PORT_LOGIC_LINK_WIDTH_4_LANES; break; + case 8: + val |= PORT_LOGIC_LINK_WIDTH_8_LANES; + break; } dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL); diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c new file mode 100644 index 000000000000..96a7d999fd5e --- /dev/null +++ b/drivers/pci/host/pcie-iproc-bcma.c @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/phy/phy.h> +#include <linux/bcma/bcma.h> +#include <linux/ioport.h> + +#include "pcie-iproc.h" + + +/* NS: CLASS field is R/O, and set to wrong 0x200 value */ +static void bcma_pcie2_fixup_class(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_PCI << 8; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); + +static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_sys_data *sys = dev->sysdata; + struct iproc_pcie *pcie = sys->private_data; + struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); + + return bcma_core_irq(bdev, 5); +} + +static int iproc_pcie_bcma_probe(struct bcma_device *bdev) +{ + struct iproc_pcie *pcie; + LIST_HEAD(res); + struct resource res_mem; + int ret; + + pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = &bdev->dev; + bcma_set_drvdata(bdev, pcie); + + pcie->base = bdev->io_addr; + + res_mem.start = bdev->addr_s[0]; + res_mem.end = bdev->addr_s[0] + SZ_128M - 1; + res_mem.name = "PCIe MEM space"; + res_mem.flags = IORESOURCE_MEM; + pci_add_resource(&res, &res_mem); + + pcie->map_irq = iproc_pcie_bcma_map_irq; + + ret = iproc_pcie_setup(pcie, &res); + if (ret) + dev_err(pcie->dev, "PCIe controller setup failed\n"); + + pci_free_resource_list(&res); + + return ret; +} + +static void iproc_pcie_bcma_remove(struct bcma_device *bdev) +{ + struct iproc_pcie *pcie = bcma_get_drvdata(bdev); + + iproc_pcie_remove(pcie); +} + +static const struct bcma_device_id iproc_pcie_bcma_table[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), + {}, +}; +MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table); + +static struct bcma_driver iproc_pcie_bcma_driver = { + .name = KBUILD_MODNAME, + .id_table = iproc_pcie_bcma_table, + .probe = iproc_pcie_bcma_probe, + .remove = iproc_pcie_bcma_remove, +}; + +static int __init iproc_pcie_bcma_init(void) +{ + return bcma_driver_register(&iproc_pcie_bcma_driver); +} +module_init(iproc_pcie_bcma_init); + +static void __exit iproc_pcie_bcma_exit(void) +{ + bcma_driver_unregister(&iproc_pcie_bcma_driver); +} +module_exit(iproc_pcie_bcma_exit); + +MODULE_AUTHOR("Hauke Mehrtens"); +MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index afad6c21fcfa..9aedc8eb2c6e 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -69,15 +69,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) return ret; } - pcie->resources = &res; + pcie->map_irq = of_irq_parse_and_map_pci; - ret = iproc_pcie_setup(pcie); - if (ret) { + ret = iproc_pcie_setup(pcie, &res); + if (ret) dev_err(pcie->dev, "PCIe controller setup failed\n"); - return ret; - } - return 0; + pci_free_resource_list(&res); + + return ret; } static int iproc_pcie_pltfm_remove(struct platform_device *pdev) diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 329e1b54528b..d77481ea553e 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -183,7 +183,7 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie) writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN); } -int iproc_pcie_setup(struct iproc_pcie *pcie) +int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { int ret; struct pci_bus *bus; @@ -211,7 +211,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie) pcie->sysdata.private_data = pcie; bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, - &pcie->sysdata, pcie->resources); + &pcie->sysdata, res); if (!bus) { dev_err(pcie->dev, "unable to create PCI root bus\n"); ret = -ENOMEM; @@ -229,7 +229,7 @@ int iproc_pcie_setup(struct iproc_pcie *pcie) pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); + pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); pci_bus_add_devices(bus); return 0; diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index e28075ed1856..ba0a108309cc 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -29,14 +29,14 @@ struct iproc_pcie { struct device *dev; void __iomem *base; - struct list_head *resources; struct pci_sys_data sysdata; struct pci_bus *root_bus; struct phy *phy; int irqs[IPROC_PCIE_MAX_NUM_IRQS]; + int (*map_irq)(const struct pci_dev *, u8, u8); }; -int iproc_pcie_setup(struct iproc_pcie *pcie); +int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); int iproc_pcie_remove(struct iproc_pcie *pcie); #endif /* _PCIE_IPROC_H */ diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c index 020d78890719..dfec4281bd50 100644 --- a/drivers/pci/host/pcie-spear13xx.c +++ b/drivers/pci/host/pcie-spear13xx.c @@ -146,10 +146,10 @@ struct pcie_app_reg { static int spear13xx_pcie_establish_link(struct pcie_port *pp) { u32 val; - int count = 0; struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; u32 exp_cap_off = EXP_CAP_ID_OFFSET; + unsigned int retries; if (dw_pcie_link_up(pp)) { dev_err(pp->dev, "link already up\n"); @@ -201,17 +201,16 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp) &app_reg->app_ctrl_0); /* check if the link is up or not */ - while (!dw_pcie_link_up(pp)) { - mdelay(100); - count++; - if (count == 10) { - dev_err(pp->dev, "link Fail\n"); - return -EINVAL; + for (retries = 0; retries < 10; retries++) { + if (dw_pcie_link_up(pp)) { + dev_info(pp->dev, "link up\n"); + return 0; } + mdelay(100); } - dev_info(pp->dev, "link up\n"); - return 0; + dev_err(pp->dev, "link Fail\n"); + return -EINVAL; } static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile index 4a9aa08b08f1..b616e7588ff4 100644 --- a/drivers/pci/hotplug/Makefile +++ b/drivers/pci/hotplug/Makefile @@ -61,9 +61,6 @@ pciehp-objs := pciehp_core.o \ pciehp_ctrl.o \ pciehp_pci.o \ pciehp_hpc.o -ifdef CONFIG_ACPI -pciehp-objs += pciehp_acpi.o -endif shpchp-objs := shpchp_core.o \ shpchp_ctrl.o \ diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index bcb90e4888dd..ff538568a617 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -632,15 +632,14 @@ static void trim_stale_devices(struct pci_dev *dev) { struct acpi_device *adev = ACPI_COMPANION(&dev->dev); struct pci_bus *bus = dev->subordinate; - bool alive = false; + bool alive = dev->ignore_hotplug; if (adev) { acpi_status status; unsigned long long sta; status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); - alive = (ACPI_SUCCESS(status) && device_status_valid(sta)) - || dev->ignore_hotplug; + alive = alive || (ACPI_SUCCESS(status) && device_status_valid(sta)); } if (!alive) alive = pci_device_is_present(dev); diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index b11521953485..57cd1327346f 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -132,11 +132,7 @@ struct controller { int pciehp_sysfs_enable_slot(struct slot *slot); int pciehp_sysfs_disable_slot(struct slot *slot); -u8 pciehp_handle_attention_button(struct slot *p_slot); -u8 pciehp_handle_switch_change(struct slot *p_slot); -u8 pciehp_handle_presence_change(struct slot *p_slot); -u8 pciehp_handle_power_fault(struct slot *p_slot); -void pciehp_handle_linkstate_change(struct slot *p_slot); +void pciehp_queue_interrupt_event(struct slot *slot, u32 event_type); int pciehp_configure_device(struct slot *p_slot); int pciehp_unconfigure_device(struct slot *p_slot); void pciehp_queue_pushbutton_work(struct work_struct *work); @@ -167,21 +163,4 @@ static inline const char *slot_name(struct slot *slot) return hotplug_slot_name(slot->hotplug_slot); } -#ifdef CONFIG_ACPI -#include <linux/pci-acpi.h> - -void __init pciehp_acpi_slot_detection_init(void); -int pciehp_acpi_slot_detection_check(struct pci_dev *dev); - -static inline void pciehp_firmware_init(void) -{ - pciehp_acpi_slot_detection_init(); -} -#else -#define pciehp_firmware_init() do {} while (0) -static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev) -{ - return 0; -} -#endif /* CONFIG_ACPI */ #endif /* _PCIEHP_H */ diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c deleted file mode 100644 index 93cc9266e8cb..000000000000 --- a/drivers/pci/hotplug/pciehp_acpi.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * ACPI related functions for PCI Express Hot Plug driver. - * - * Copyright (C) 2008 Kenji Kaneshige - * Copyright (C) 2008 Fujitsu Limited. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ - -#include <linux/acpi.h> -#include <linux/pci.h> -#include <linux/pci_hotplug.h> -#include <linux/slab.h> -#include <linux/module.h> -#include "pciehp.h" - -#define PCIEHP_DETECT_PCIE (0) -#define PCIEHP_DETECT_ACPI (1) -#define PCIEHP_DETECT_AUTO (2) -#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO - -struct dummy_slot { - u32 number; - struct list_head list; -}; - -static int slot_detection_mode; -static char *pciehp_detect_mode; -module_param(pciehp_detect_mode, charp, 0444); -MODULE_PARM_DESC(pciehp_detect_mode, - "Slot detection mode: pcie, acpi, auto\n" - " pcie - Use PCIe based slot detection\n" - " acpi - Use ACPI for slot detection\n" - " auto(default) - Auto select mode. Use acpi option if duplicate\n" - " slot ids are found. Otherwise, use pcie option\n"); - -int pciehp_acpi_slot_detection_check(struct pci_dev *dev) -{ - if (slot_detection_mode != PCIEHP_DETECT_ACPI) - return 0; - if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev))) - return 0; - return -ENODEV; -} - -static int __init parse_detect_mode(void) -{ - if (!pciehp_detect_mode) - return PCIEHP_DETECT_DEFAULT; - if (!strcmp(pciehp_detect_mode, "pcie")) - return PCIEHP_DETECT_PCIE; - if (!strcmp(pciehp_detect_mode, "acpi")) - return PCIEHP_DETECT_ACPI; - if (!strcmp(pciehp_detect_mode, "auto")) - return PCIEHP_DETECT_AUTO; - warn("bad specifier '%s' for pciehp_detect_mode. Use default\n", - pciehp_detect_mode); - return PCIEHP_DETECT_DEFAULT; -} - -static int __initdata dup_slot_id; -static int __initdata acpi_slot_detected; -static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots); - -/* Dummy driver for duplicate name detection */ -static int __init dummy_probe(struct pcie_device *dev) -{ - u32 slot_cap; - acpi_handle handle; - struct dummy_slot *slot, *tmp; - struct pci_dev *pdev = dev->port; - - pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); - slot = kzalloc(sizeof(*slot), GFP_KERNEL); - if (!slot) - return -ENOMEM; - slot->number = (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19; - list_for_each_entry(tmp, &dummy_slots, list) { - if (tmp->number == slot->number) - dup_slot_id++; - } - list_add_tail(&slot->list, &dummy_slots); - handle = ACPI_HANDLE(&pdev->dev); - if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle)) - acpi_slot_detected = 1; - return -ENODEV; /* dummy driver always returns error */ -} - -static struct pcie_port_service_driver __initdata dummy_driver = { - .name = "pciehp_dummy", - .port_type = PCIE_ANY_PORT, - .service = PCIE_PORT_SERVICE_HP, - .probe = dummy_probe, -}; - -static int __init select_detection_mode(void) -{ - struct dummy_slot *slot, *tmp; - - if (pcie_port_service_register(&dummy_driver)) - return PCIEHP_DETECT_ACPI; - pcie_port_service_unregister(&dummy_driver); - list_for_each_entry_safe(slot, tmp, &dummy_slots, list) { - list_del(&slot->list); - kfree(slot); - } - if (acpi_slot_detected && dup_slot_id) - return PCIEHP_DETECT_ACPI; - return PCIEHP_DETECT_PCIE; -} - -void __init pciehp_acpi_slot_detection_init(void) -{ - slot_detection_mode = parse_detect_mode(); - if (slot_detection_mode != PCIEHP_DETECT_AUTO) - goto out; - slot_detection_mode = select_detection_mode(); -out: - if (slot_detection_mode == PCIEHP_DETECT_ACPI) - info("Using ACPI for slot detection.\n"); -} diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 07aa722bb12c..612b21a14df5 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -77,11 +77,6 @@ static int reset_slot (struct hotplug_slot *slot, int probe); */ static void release_slot(struct hotplug_slot *hotplug_slot) { - struct slot *slot = hotplug_slot->private; - - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", - __func__, hotplug_slot_name(hotplug_slot)); - kfree(hotplug_slot->ops); kfree(hotplug_slot->info); kfree(hotplug_slot); @@ -129,14 +124,10 @@ static int init_slot(struct controller *ctrl) slot->hotplug_slot = hotplug; snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl)); - ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n", - pci_domain_nr(ctrl->pcie->port->subordinate), - ctrl->pcie->port->subordinate->number, PSN(ctrl)); retval = pci_hp_register(hotplug, ctrl->pcie->port->subordinate, 0, name); if (retval) - ctrl_err(ctrl, - "pci_hp_register failed with error %d\n", retval); + ctrl_err(ctrl, "pci_hp_register failed: error %d\n", retval); out: if (retval) { kfree(ops); @@ -158,9 +149,6 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct slot *slot = hotplug_slot->private; - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", - __func__, slot_name(slot)); - pciehp_set_attention_status(slot, status); return 0; } @@ -170,9 +158,6 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", - __func__, slot_name(slot)); - return pciehp_sysfs_enable_slot(slot); } @@ -181,9 +166,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", - __func__, slot_name(slot)); - return pciehp_sysfs_disable_slot(slot); } @@ -191,9 +173,6 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", - __func__, slot_name(slot)); - pciehp_get_power_status(slot, value); return 0; } @@ -202,9 +181,6 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", - __func__, slot_name(slot)); - pciehp_get_attention_status(slot, value); return 0; } @@ -213,9 +189,6 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", - __func__, slot_name(slot)); - pciehp_get_latch_status(slot, value); return 0; } @@ -224,9 +197,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", - __func__, slot_name(slot)); - pciehp_get_adapter_status(slot, value); return 0; } @@ -235,9 +205,6 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe) { struct slot *slot = hotplug_slot->private; - ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", - __func__, slot_name(slot)); - return pciehp_reset_slot(slot, probe); } @@ -248,24 +215,21 @@ static int pciehp_probe(struct pcie_device *dev) struct slot *slot; u8 occupied, poweron; - if (pciehp_force) - dev_info(&dev->device, - "Bypassing BIOS check for pciehp use on %s\n", - pci_name(dev->port)); - else if (pciehp_acpi_slot_detection_check(dev->port)) - goto err_out_none; + /* If this is not a "hotplug" service, we have no business here. */ + if (dev->service != PCIE_PORT_SERVICE_HP) + return -ENODEV; if (!dev->port->subordinate) { /* Can happen if we run out of bus numbers during probe */ dev_err(&dev->device, "Hotplug bridge without secondary bus, ignoring\n"); - goto err_out_none; + return -ENODEV; } ctrl = pcie_init(dev); if (!ctrl) { dev_err(&dev->device, "Controller initialization failed\n"); - goto err_out_none; + return -ENODEV; } set_service_data(dev, ctrl); @@ -275,14 +239,14 @@ static int pciehp_probe(struct pcie_device *dev) if (rc == -EBUSY) ctrl_warn(ctrl, "Slot already registered by another hotplug driver\n"); else - ctrl_err(ctrl, "Slot initialization failed\n"); + ctrl_err(ctrl, "Slot initialization failed (%d)\n", rc); goto err_out_release_ctlr; } /* Enable events after we have setup the data structures */ rc = pcie_init_notification(ctrl); if (rc) { - ctrl_err(ctrl, "Notification initialization failed\n"); + ctrl_err(ctrl, "Notification initialization failed (%d)\n", rc); goto err_out_free_ctrl_slot; } @@ -305,7 +269,6 @@ err_out_free_ctrl_slot: cleanup_slot(ctrl); err_out_release_ctlr: pciehp_release_ctrl(ctrl); -err_out_none: return -ENODEV; } @@ -366,7 +329,6 @@ static int __init pcied_init(void) { int retval = 0; - pciehp_firmware_init(); retval = pcie_port_service_register(&hpdriver_portdrv); dbg("pcie_port_service_register = %d\n", retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index f052e951b23e..f3796124ad7c 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -37,138 +37,20 @@ static void interrupt_event_handler(struct work_struct *work); -static int queue_interrupt_event(struct slot *p_slot, u32 event_type) +void pciehp_queue_interrupt_event(struct slot *p_slot, u32 event_type) { struct event_info *info; info = kmalloc(sizeof(*info), GFP_ATOMIC); - if (!info) - return -ENOMEM; + if (!info) { + ctrl_err(p_slot->ctrl, "dropped event %d (ENOMEM)\n", event_type); + return; + } + INIT_WORK(&info->work, interrupt_event_handler); info->event_type = event_type; info->p_slot = p_slot; - INIT_WORK(&info->work, interrupt_event_handler); - queue_work(p_slot->wq, &info->work); - - return 0; -} - -u8 pciehp_handle_attention_button(struct slot *p_slot) -{ - u32 event_type; - struct controller *ctrl = p_slot->ctrl; - - /* Attention Button Change */ - ctrl_dbg(ctrl, "Attention button interrupt received\n"); - - /* - * Button pressed - See if need to TAKE ACTION!!! - */ - ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot)); - event_type = INT_BUTTON_PRESS; - - queue_interrupt_event(p_slot, event_type); - - return 0; -} - -u8 pciehp_handle_switch_change(struct slot *p_slot) -{ - u8 getstatus; - u32 event_type; - struct controller *ctrl = p_slot->ctrl; - - /* Switch Change */ - ctrl_dbg(ctrl, "Switch interrupt received\n"); - - pciehp_get_latch_status(p_slot, &getstatus); - if (getstatus) { - /* - * Switch opened - */ - ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot)); - event_type = INT_SWITCH_OPEN; - } else { - /* - * Switch closed - */ - ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot)); - event_type = INT_SWITCH_CLOSE; - } - - queue_interrupt_event(p_slot, event_type); - - return 1; -} - -u8 pciehp_handle_presence_change(struct slot *p_slot) -{ - u32 event_type; - u8 presence_save; - struct controller *ctrl = p_slot->ctrl; - - /* Presence Change */ - ctrl_dbg(ctrl, "Presence/Notify input change\n"); - - /* Switch is open, assume a presence change - * Save the presence state - */ - pciehp_get_adapter_status(p_slot, &presence_save); - if (presence_save) { - /* - * Card Present - */ - ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot)); - event_type = INT_PRESENCE_ON; - } else { - /* - * Not Present - */ - ctrl_info(ctrl, "Card not present on Slot(%s)\n", - slot_name(p_slot)); - event_type = INT_PRESENCE_OFF; - } - - queue_interrupt_event(p_slot, event_type); - - return 1; -} - -u8 pciehp_handle_power_fault(struct slot *p_slot) -{ - u32 event_type; - struct controller *ctrl = p_slot->ctrl; - - /* power fault */ - ctrl_dbg(ctrl, "Power fault interrupt received\n"); - ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); - event_type = INT_POWER_FAULT; - ctrl_info(ctrl, "Power fault bit %x set\n", 0); - queue_interrupt_event(p_slot, event_type); - - return 1; -} - -void pciehp_handle_linkstate_change(struct slot *p_slot) -{ - u32 event_type; - struct controller *ctrl = p_slot->ctrl; - - /* Link Status Change */ - ctrl_dbg(ctrl, "Data Link Layer State change\n"); - - if (pciehp_check_link_active(ctrl)) { - ctrl_info(ctrl, "slot(%s): Link Up event\n", - slot_name(p_slot)); - event_type = INT_LINK_UP; - } else { - ctrl_info(ctrl, "slot(%s): Link Down event\n", - slot_name(p_slot)); - event_type = INT_LINK_DOWN; - } - - queue_interrupt_event(p_slot, event_type); } /* The following routines constitute the bulk of the @@ -298,10 +180,6 @@ static void pciehp_power_thread(struct work_struct *work) switch (info->req) { case DISABLE_REQ: - ctrl_dbg(p_slot->ctrl, - "Disabling domain:bus:device=%04x:%02x:00\n", - pci_domain_nr(p_slot->ctrl->pcie->port->subordinate), - p_slot->ctrl->pcie->port->subordinate->number); mutex_lock(&p_slot->hotplug_lock); pciehp_disable_slot(p_slot); mutex_unlock(&p_slot->hotplug_lock); @@ -310,10 +188,6 @@ static void pciehp_power_thread(struct work_struct *work) mutex_unlock(&p_slot->lock); break; case ENABLE_REQ: - ctrl_dbg(p_slot->ctrl, - "Enabling domain:bus:device=%04x:%02x:00\n", - pci_domain_nr(p_slot->ctrl->pcie->port->subordinate), - p_slot->ctrl->pcie->port->subordinate->number); mutex_lock(&p_slot->hotplug_lock); ret = pciehp_enable_slot(p_slot); mutex_unlock(&p_slot->hotplug_lock); @@ -416,7 +290,7 @@ static void handle_button_press_event(struct slot *p_slot) ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot)); break; default: - ctrl_warn(ctrl, "Not a valid state\n"); + ctrl_warn(ctrl, "ignoring invalid state %#x\n", p_slot->state); break; } } @@ -507,8 +381,8 @@ static void handle_link_event(struct slot *p_slot, u32 event) } break; default: - ctrl_err(ctrl, "Not a valid state on slot(%s)\n", - slot_name(p_slot)); + ctrl_err(ctrl, "ignoring invalid state %#x on slot(%s)\n", + p_slot->state, slot_name(p_slot)); kfree(info); break; } @@ -532,7 +406,6 @@ static void interrupt_event_handler(struct work_struct *work) pciehp_green_led_off(p_slot); break; case INT_PRESENCE_ON: - ctrl_dbg(ctrl, "Surprise Insertion\n"); handle_surprise_event(p_slot); break; case INT_PRESENCE_OFF: @@ -540,7 +413,6 @@ static void interrupt_event_handler(struct work_struct *work) * Regardless of surprise capability, we need to * definitely remove a card that has been pulled out! */ - ctrl_dbg(ctrl, "Surprise Removal\n"); handle_surprise_event(p_slot); break; case INT_LINK_UP: @@ -647,8 +519,8 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) slot_name(p_slot)); break; default: - ctrl_err(ctrl, "Not a valid state on slot %s\n", - slot_name(p_slot)); + ctrl_err(ctrl, "invalid state %#x on slot %s\n", + p_slot->state, slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); @@ -682,8 +554,8 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot) slot_name(p_slot)); break; default: - ctrl_err(ctrl, "Not a valid state on slot %s\n", - slot_name(p_slot)); + ctrl_err(ctrl, "invalid state %#x on slot %s\n", + p_slot->state, slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 0ebf754fc177..2913f7e68a10 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -176,20 +176,17 @@ static void pcie_wait_cmd(struct controller *ctrl) jiffies_to_msecs(jiffies - ctrl->cmd_started)); } -/** - * pcie_write_cmd - Issue controller command - * @ctrl: controller to which the command is issued - * @cmd: command value written to slot control register - * @mask: bitmask of slot control register to be modified - */ -static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) +static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd, + u16 mask, bool wait) { struct pci_dev *pdev = ctrl_dev(ctrl); u16 slot_ctrl; mutex_lock(&ctrl->ctrl_lock); - /* Wait for any previous command that might still be in progress */ + /* + * Always wait for any previous command that might still be in progress + */ pcie_wait_cmd(ctrl); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl); @@ -201,9 +198,33 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) ctrl->cmd_started = jiffies; ctrl->slot_ctrl = slot_ctrl; + /* + * Optionally wait for the hardware to be ready for a new command, + * indicating completion of the above issued command. + */ + if (wait) + pcie_wait_cmd(ctrl); + mutex_unlock(&ctrl->ctrl_lock); } +/** + * pcie_write_cmd - Issue controller command + * @ctrl: controller to which the command is issued + * @cmd: command value written to slot control register + * @mask: bitmask of slot control register to be modified + */ +static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) +{ + pcie_do_write_cmd(ctrl, cmd, mask, true); +} + +/* Same as above without waiting for the hardware to latch */ +static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask) +{ + pcie_do_write_cmd(ctrl, cmd, mask, false); +} + bool pciehp_check_link_active(struct controller *ctrl) { struct pci_dev *pdev = ctrl_dev(ctrl); @@ -291,7 +312,8 @@ int pciehp_check_link_status(struct controller *ctrl) ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); if ((lnk_status & PCI_EXP_LNKSTA_LT) || !(lnk_status & PCI_EXP_LNKSTA_NLW)) { - ctrl_err(ctrl, "Link Training Error occurs\n"); + ctrl_err(ctrl, "link training error: status %#06x\n", + lnk_status); return -1; } @@ -422,7 +444,7 @@ void pciehp_set_attention_status(struct slot *slot, u8 value) default: return; } - pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC); + pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); } @@ -434,7 +456,8 @@ void pciehp_green_led_on(struct slot *slot) if (!PWR_LED(ctrl)) return; - pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC); + pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, + PCI_EXP_SLTCTL_PIC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_IND_ON); @@ -447,7 +470,8 @@ void pciehp_green_led_off(struct slot *slot) if (!PWR_LED(ctrl)) return; - pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC); + pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + PCI_EXP_SLTCTL_PIC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_IND_OFF); @@ -460,7 +484,8 @@ void pciehp_green_led_blink(struct slot *slot) if (!PWR_LED(ctrl)) return; - pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC); + pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, + PCI_EXP_SLTCTL_PIC); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PWR_IND_BLINK); @@ -510,6 +535,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) struct pci_dev *dev; struct slot *slot = ctrl->slot; u16 detected, intr_loc; + u8 open, present; + bool link; /* * In order to guarantee that all interrupt events are @@ -532,7 +559,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) intr_loc); } while (detected); - ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); + ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", intr_loc); /* Check Command Complete Interrupt Pending */ if (intr_loc & PCI_EXP_SLTSTA_CC) { @@ -555,25 +582,44 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) return IRQ_HANDLED; /* Check MRL Sensor Changed */ - if (intr_loc & PCI_EXP_SLTSTA_MRLSC) - pciehp_handle_switch_change(slot); + if (intr_loc & PCI_EXP_SLTSTA_MRLSC) { + pciehp_get_latch_status(slot, &open); + ctrl_info(ctrl, "Latch %s on Slot(%s)\n", + open ? "open" : "close", slot_name(slot)); + pciehp_queue_interrupt_event(slot, open ? INT_SWITCH_OPEN : + INT_SWITCH_CLOSE); + } /* Check Attention Button Pressed */ - if (intr_loc & PCI_EXP_SLTSTA_ABP) - pciehp_handle_attention_button(slot); + if (intr_loc & PCI_EXP_SLTSTA_ABP) { + ctrl_info(ctrl, "Button pressed on Slot(%s)\n", + slot_name(slot)); + pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS); + } /* Check Presence Detect Changed */ - if (intr_loc & PCI_EXP_SLTSTA_PDC) - pciehp_handle_presence_change(slot); + if (intr_loc & PCI_EXP_SLTSTA_PDC) { + pciehp_get_adapter_status(slot, &present); + ctrl_info(ctrl, "Card %spresent on Slot(%s)\n", + present ? "" : "not ", slot_name(slot)); + pciehp_queue_interrupt_event(slot, present ? INT_PRESENCE_ON : + INT_PRESENCE_OFF); + } /* Check Power Fault Detected */ if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { ctrl->power_fault_detected = 1; - pciehp_handle_power_fault(slot); + ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(slot)); + pciehp_queue_interrupt_event(slot, INT_POWER_FAULT); } - if (intr_loc & PCI_EXP_SLTSTA_DLLSC) - pciehp_handle_linkstate_change(slot); + if (intr_loc & PCI_EXP_SLTSTA_DLLSC) { + link = pciehp_check_link_active(ctrl); + ctrl_info(ctrl, "slot(%s): Link %s event\n", + slot_name(slot), link ? "Up" : "Down"); + pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP : + INT_LINK_DOWN); + } return IRQ_HANDLED; } @@ -613,7 +659,7 @@ void pcie_enable_notification(struct controller *ctrl) PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_DLLSCE); - pcie_write_cmd(ctrl, cmd, mask); + pcie_write_cmd_nowait(ctrl, cmd, mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd); } @@ -664,7 +710,7 @@ int pciehp_reset_slot(struct slot *slot, int probe) pci_reset_bridge_secondary_bus(ctrl->pcie->port); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask); - pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask); + pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask); ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask); if (pciehp_poll_mode) @@ -724,48 +770,13 @@ static void pcie_cleanup_slot(struct controller *ctrl) static inline void dbg_ctrl(struct controller *ctrl) { - int i; - u16 reg16; struct pci_dev *pdev = ctrl->pcie->port; + u16 reg16; if (!pciehp_debug) return; - ctrl_info(ctrl, "Hotplug Controller:\n"); - ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", - pci_name(pdev), pdev->irq); - ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor); - ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device); - ctrl_info(ctrl, " Subsystem ID : 0x%04x\n", - pdev->subsystem_device); - ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n", - pdev->subsystem_vendor); - ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", - pci_pcie_cap(pdev)); - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - if (!pci_resource_len(pdev, i)) - continue; - ctrl_info(ctrl, " PCI resource [%d] : %pR\n", - i, &pdev->resource[i]); - } ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); - ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl)); - ctrl_info(ctrl, " Attention Button : %3s\n", - ATTN_BUTTN(ctrl) ? "yes" : "no"); - ctrl_info(ctrl, " Power Controller : %3s\n", - POWER_CTRL(ctrl) ? "yes" : "no"); - ctrl_info(ctrl, " MRL Sensor : %3s\n", - MRL_SENS(ctrl) ? "yes" : "no"); - ctrl_info(ctrl, " Attention Indicator : %3s\n", - ATTN_LED(ctrl) ? "yes" : "no"); - ctrl_info(ctrl, " Power Indicator : %3s\n", - PWR_LED(ctrl) ? "yes" : "no"); - ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n", - HP_SUPR_RM(ctrl) ? "yes" : "no"); - ctrl_info(ctrl, " EMI Present : %3s\n", - EMI(ctrl) ? "yes" : "no"); - ctrl_info(ctrl, " Command Completed : %3s\n", - NO_CMD_CMPL(ctrl) ? "no" : "yes"); pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, ®16); ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, ®16); @@ -794,10 +805,8 @@ struct controller *pcie_init(struct pcie_device *dev) /* Check if Data Link Layer Link Active Reporting is implemented */ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap); - if (link_cap & PCI_EXP_LNKCAP_DLLLARC) { - ctrl_dbg(ctrl, "Link Active Reporting supported\n"); + if (link_cap & PCI_EXP_LNKCAP_DLLLARC) ctrl->link_active_reporting = 1; - } /* Clear all remaining event bits in Slot Status register */ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, @@ -805,13 +814,15 @@ struct controller *pcie_init(struct pcie_device *dev) PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC); - ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n", + ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n", (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19, FLAG(slot_cap, PCI_EXP_SLTCAP_ABP), - FLAG(slot_cap, PCI_EXP_SLTCAP_AIP), - FLAG(slot_cap, PCI_EXP_SLTCAP_PIP), FLAG(slot_cap, PCI_EXP_SLTCAP_PCP), FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP), + FLAG(slot_cap, PCI_EXP_SLTCAP_AIP), + FLAG(slot_cap, PCI_EXP_SLTCAP_PIP), + FLAG(slot_cap, PCI_EXP_SLTCAP_HPC), + FLAG(slot_cap, PCI_EXP_SLTCAP_HPS), FLAG(slot_cap, PCI_EXP_SLTCAP_EIP), FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS), FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC)); diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index a94dd2c4183a..7eb4109a3df4 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c @@ -23,20 +23,11 @@ */ static DEFINE_SPINLOCK(ht_irq_lock); -struct ht_irq_cfg { - struct pci_dev *dev; - /* Update callback used to cope with buggy hardware */ - ht_irq_update_t *update; - unsigned pos; - unsigned idx; - struct ht_irq_msg msg; -}; - - void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) { struct ht_irq_cfg *cfg = irq_get_handler_data(irq); unsigned long flags; + spin_lock_irqsave(&ht_irq_lock, flags); if (cfg->msg.address_lo != msg->address_lo) { pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx); @@ -55,6 +46,7 @@ void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) { struct ht_irq_cfg *cfg = irq_get_handler_data(irq); + *msg = cfg->msg; } @@ -86,7 +78,6 @@ void unmask_ht_irq(struct irq_data *data) */ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) { - struct ht_irq_cfg *cfg; int max_irq, pos, irq; unsigned long flags; u32 data; @@ -105,29 +96,9 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) if (idx > max_irq) return -EINVAL; - cfg = kmalloc(sizeof(*cfg), GFP_KERNEL); - if (!cfg) - return -ENOMEM; - - cfg->dev = dev; - cfg->update = update; - cfg->pos = pos; - cfg->idx = 0x10 + (idx * 2); - /* Initialize msg to a value that will never match the first write. */ - cfg->msg.address_lo = 0xffffffff; - cfg->msg.address_hi = 0xffffffff; - - irq = irq_alloc_hwirq(dev_to_node(&dev->dev)); - if (!irq) { - kfree(cfg); - return -EBUSY; - } - irq_set_handler_data(irq, cfg); - - if (arch_setup_ht_irq(irq, dev) < 0) { - ht_destroy_irq(irq); - return -EBUSY; - } + irq = arch_setup_ht_irq(idx, pos, dev, update); + if (irq > 0) + dev_dbg(&dev->dev, "irq %d for HT\n", irq); return irq; } @@ -158,13 +129,6 @@ EXPORT_SYMBOL(ht_create_irq); */ void ht_destroy_irq(unsigned int irq) { - struct ht_irq_cfg *cfg; - - cfg = irq_get_handler_data(irq); - irq_set_chip(irq, NULL); - irq_set_handler_data(irq, NULL); - irq_free_hwirq(irq); - - kfree(cfg); + arch_teardown_ht_irq(irq); } EXPORT_SYMBOL(ht_destroy_irq); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index c3e7dfcf9ff5..f66be868ad21 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -185,27 +185,6 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev) return default_restore_msi_irqs(dev); } -static void msi_set_enable(struct pci_dev *dev, int enable) -{ - u16 control; - - pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); - control &= ~PCI_MSI_FLAGS_ENABLE; - if (enable) - control |= PCI_MSI_FLAGS_ENABLE; - pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); -} - -static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) -{ - u16 ctrl; - - pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); - ctrl &= ~clear; - ctrl |= set; - pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); -} - static inline __attribute_const__ u32 msi_mask(unsigned x) { /* Don't shift by >= width of type */ @@ -452,7 +431,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) entry = irq_get_msi_desc(dev->irq); pci_intx_for_msi(dev, 0); - msi_set_enable(dev, 0); + pci_msi_set_enable(dev, 0); arch_restore_msi_irqs(dev); pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); @@ -473,14 +452,14 @@ static void __pci_restore_msix_state(struct pci_dev *dev) /* route the table */ pci_intx_for_msi(dev, 0); - msix_clear_and_set_ctrl(dev, 0, + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); arch_restore_msi_irqs(dev); list_for_each_entry(entry, &dev->msi_list, list) msix_mask_irq(entry, entry->masked); - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); } void pci_restore_msi_state(struct pci_dev *dev) @@ -647,7 +626,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) int ret; unsigned mask; - msi_set_enable(dev, 0); /* Disable MSI during set up */ + pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ entry = msi_setup_entry(dev, nvec); if (!entry) @@ -683,7 +662,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) /* Set MSI enabled bits */ pci_intx_for_msi(dev, 0); - msi_set_enable(dev, 1); + pci_msi_set_enable(dev, 1); dev->msi_enabled = 1; dev->irq = entry->irq; @@ -775,7 +754,7 @@ static int msix_capability_init(struct pci_dev *dev, void __iomem *base; /* Ensure MSI-X is disabled while it is set up */ - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); /* Request & Map MSI-X table region */ @@ -801,7 +780,7 @@ static int msix_capability_init(struct pci_dev *dev, * MSI-X registers. We need to mask all the vectors to prevent * interrupts coming in before they're fully set up. */ - msix_clear_and_set_ctrl(dev, 0, + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); msix_program_entries(dev, entries); @@ -814,7 +793,7 @@ static int msix_capability_init(struct pci_dev *dev, pci_intx_for_msi(dev, 0); dev->msix_enabled = 1; - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); return 0; @@ -919,7 +898,7 @@ void pci_msi_shutdown(struct pci_dev *dev) BUG_ON(list_empty(&dev->msi_list)); desc = list_first_entry(&dev->msi_list, struct msi_desc, list); - msi_set_enable(dev, 0); + pci_msi_set_enable(dev, 0); pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; @@ -1027,7 +1006,7 @@ void pci_msix_shutdown(struct pci_dev *dev) __pci_msix_desc_mask_irq(entry, 1); } - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_intx_for_msi(dev, 1); dev->msix_enabled = 0; } @@ -1062,18 +1041,6 @@ EXPORT_SYMBOL(pci_msi_enabled); void pci_msi_init_pci_dev(struct pci_dev *dev) { INIT_LIST_HEAD(&dev->msi_list); - - /* Disable the msi hardware to avoid screaming interrupts - * during boot. This is the power on reset default so - * usually this should be a noop. - */ - dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); - if (dev->msi_cap) - msi_set_enable(dev, 0); - - dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); - if (dev->msix_cap) - msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); } /** diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 6f6f175f51f7..314a625b78d6 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -420,7 +420,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) [PCI_D0] = ACPI_STATE_D0, [PCI_D1] = ACPI_STATE_D1, [PCI_D2] = ACPI_STATE_D2, - [PCI_D3hot] = ACPI_STATE_D3_COLD, + [PCI_D3hot] = ACPI_STATE_D3_HOT, [PCI_D3cold] = ACPI_STATE_D3_COLD, }; int error = -EINVAL; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index acc4b6ef78c4..0008c950452c 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3101,39 +3101,6 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); -/** - * pci_msi_off - disables any MSI or MSI-X capabilities - * @dev: the PCI device to operate on - * - * If you want to use MSI, see pci_enable_msi() and friends. - * This is a lower-level primitive that allows us to disable - * MSI operation at the device level. - */ -void pci_msi_off(struct pci_dev *dev) -{ - int pos; - u16 control; - - /* - * This looks like it could go in msi.c, but we need it even when - * CONFIG_PCI_MSI=n. For the same reason, we can't use - * dev->msi_cap or dev->msix_cap here. - */ - pos = pci_find_capability(dev, PCI_CAP_ID_MSI); - if (pos) { - pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); - control &= ~PCI_MSI_FLAGS_ENABLE; - pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); - } - pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); - control &= ~PCI_MSIX_FLAGS_ENABLE; - pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); - } -} -EXPORT_SYMBOL_GPL(pci_msi_off); - int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) { return dma_set_max_seg_size(&dev->dev, size); @@ -4324,6 +4291,17 @@ bool pci_device_is_present(struct pci_dev *pdev) } EXPORT_SYMBOL_GPL(pci_device_is_present); +void pci_ignore_hotplug(struct pci_dev *dev) +{ + struct pci_dev *bridge = dev->bus->self; + + dev->ignore_hotplug = 1; + /* Propagate the "ignore hotplug" setting to the parent bridge. */ + if (bridge) + bridge->ignore_hotplug = 1; +} +EXPORT_SYMBOL_GPL(pci_ignore_hotplug); + #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; static DEFINE_SPINLOCK(resource_alignment_lock); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 9bd762c237ab..4ff0ff1c4088 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -146,6 +146,27 @@ static inline void pci_no_msi(void) { } static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } #endif +static inline void pci_msi_set_enable(struct pci_dev *dev, int enable) +{ + u16 control; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + control &= ~PCI_MSI_FLAGS_ENABLE; + if (enable) + control |= PCI_MSI_FLAGS_ENABLE; + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); +} + +static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) +{ + u16 ctrl; + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + ctrl &= ~clear; + ctrl |= set; + pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); +} + void pci_realloc_get_opt(char *); static inline int pci_no_d1d2(struct pci_dev *dev) @@ -216,17 +237,6 @@ void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *fail_head); bool pci_bus_clip_resource(struct pci_dev *dev, int idx); -/** - * pci_ari_enabled - query ARI forwarding status - * @bus: the PCI bus - * - * Returns 1 if ARI forwarding is enabled, or 0 if not enabled; - */ -static inline int pci_ari_enabled(struct pci_bus *bus) -{ - return bus->self && bus->self->ari_enabled; -} - void pci_reassigndev_resource_alignment(struct pci_dev *dev); void pci_disable_bridge_window(struct pci_dev *dev); diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 5653ea94547f..9803e3d039fe 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -425,8 +425,7 @@ static pci_ers_result_t reset_link(struct pci_dev *dev) if (driver && driver->reset_link) { status = driver->reset_link(udev); - } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM || - pci_pcie_type(udev) == PCI_EXP_TYPE_ROOT_PORT) { + } else if (udev->has_secondary_link) { status = default_reset_link(udev); } else { dev_printk(KERN_DEBUG, &dev->dev, diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 7d4fcdc512aa..317e3558a35e 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -127,15 +127,12 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) { struct pci_dev *child; struct pci_bus *linkbus = link->pdev->subordinate; + u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0; - list_for_each_entry(child, &linkbus->devices, bus_list) { - if (enable) - pcie_capability_set_word(child, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_CLKREQ_EN); - else - pcie_capability_clear_word(child, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_CLKREQ_EN); - } + list_for_each_entry(child, &linkbus->devices, bus_list) + pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN, + val); link->clkpm_enabled = !!enable; } @@ -525,7 +522,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) INIT_LIST_HEAD(&link->children); INIT_LIST_HEAD(&link->link); link->pdev = pdev; - if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) { + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) { struct pcie_link_state *parent; parent = pdev->bus->parent->self->link_state; if (!parent) { @@ -559,10 +556,15 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) if (!aspm_support_enabled) return; - if (!pci_is_pcie(pdev) || pdev->link_state) + if (pdev->link_state) return; - if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && - pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) + + /* + * We allocate pcie_link_state for the component on the upstream + * end of a Link, so there's nothing to do unless this device has a + * Link on its secondary side. + */ + if (!pdev->has_secondary_link) return; /* VIA has a strange chipset, root port is under a bridge */ @@ -675,10 +677,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev) { struct pcie_link_state *link = pdev->link_state; - if (aspm_disabled || !pci_is_pcie(pdev) || !link) - return; - if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && - (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)) + if (aspm_disabled || !link) return; /* * Devices changed PM state, we should recheck if latency @@ -696,16 +695,12 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { struct pcie_link_state *link = pdev->link_state; - if (aspm_disabled || !pci_is_pcie(pdev) || !link) + if (aspm_disabled || !link) return; if (aspm_policy != POLICY_POWERSAVE) return; - if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && - (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)) - return; - down_read(&pci_bus_sem); mutex_lock(&aspm_lock); pcie_config_aspm_path(link); @@ -714,8 +709,7 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev) up_read(&pci_bus_sem); } -static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, - bool force) +static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) { struct pci_dev *parent = pdev->bus->self; struct pcie_link_state *link; @@ -723,8 +717,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, if (!pci_is_pcie(pdev)) return; - if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) + if (pdev->has_secondary_link) parent = pdev; if (!parent || !parent->link_state) return; @@ -737,7 +730,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, * a similar mechanism using "PciASPMOptOut", which is also * ignored in this situation. */ - if (aspm_disabled && !force) { + if (aspm_disabled) { dev_warn(&pdev->dev, "can't disable ASPM; OS doesn't have ASPM control\n"); return; } @@ -763,7 +756,7 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, void pci_disable_link_state_locked(struct pci_dev *pdev, int state) { - __pci_disable_link_state(pdev, state, false, false); + __pci_disable_link_state(pdev, state, false); } EXPORT_SYMBOL(pci_disable_link_state_locked); @@ -778,7 +771,7 @@ EXPORT_SYMBOL(pci_disable_link_state_locked); */ void pci_disable_link_state(struct pci_dev *pdev, int state) { - __pci_disable_link_state(pdev, state, true, false); + __pci_disable_link_state(pdev, state, true); } EXPORT_SYMBOL(pci_disable_link_state); @@ -907,9 +900,7 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { struct pcie_link_state *link_state = pdev->link_state; - if (!pci_is_pcie(pdev) || - (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && - pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) + if (!link_state) return; if (link_state->aspm_support) @@ -924,9 +915,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { struct pcie_link_state *link_state = pdev->link_state; - if (!pci_is_pcie(pdev) || - (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT && - pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) + if (!link_state) return; if (link_state->aspm_support) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 6675a7a1b9fc..cefd636681b6 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -254,8 +254,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, } if (res->flags & IORESOURCE_MEM_64) { - if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) && - sz64 > 0x100000000ULL) { + if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8) + && sz64 > 0x100000000ULL) { res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; res->start = 0; res->end = 0; @@ -264,7 +264,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, goto out; } - if ((sizeof(dma_addr_t) < 8) && l) { + if ((sizeof(pci_bus_addr_t) < 8) && l) { /* Above 32-bit boundary; try to reallocate */ res->flags |= IORESOURCE_UNSET; res->start = 0; @@ -399,7 +399,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; u64 base64, limit64; - dma_addr_t base, limit; + pci_bus_addr_t base, limit; struct pci_bus_region region; struct resource *res; @@ -426,8 +426,8 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) } } - base = (dma_addr_t) base64; - limit = (dma_addr_t) limit64; + base = (pci_bus_addr_t) base64; + limit = (pci_bus_addr_t) limit64; if (base != base64) { dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", @@ -973,6 +973,8 @@ void set_pcie_port_type(struct pci_dev *pdev) { int pos; u16 reg16; + int type; + struct pci_dev *parent; pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!pos) @@ -982,6 +984,22 @@ void set_pcie_port_type(struct pci_dev *pdev) pdev->pcie_flags_reg = reg16; pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; + + /* + * A Root Port is always the upstream end of a Link. No PCIe + * component has two Links. Two Links are connected by a Switch + * that has a Port on each Link and internal logic to connect the + * two Ports. + */ + type = pci_pcie_type(pdev); + if (type == PCI_EXP_TYPE_ROOT_PORT) + pdev->has_secondary_link = 1; + else if (type == PCI_EXP_TYPE_UPSTREAM || + type == PCI_EXP_TYPE_DOWNSTREAM) { + parent = pci_upstream_bridge(pdev); + if (!parent->has_secondary_link) + pdev->has_secondary_link = 1; + } } void set_pcie_hotplug_bridge(struct pci_dev *pdev) @@ -1085,6 +1103,22 @@ int pci_cfg_space_size(struct pci_dev *dev) #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) +static void pci_msi_setup_pci_dev(struct pci_dev *dev) +{ + /* + * Disable the MSI hardware to avoid screaming interrupts + * during boot. This is the power on reset default so + * usually this should be a noop. + */ + dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); + if (dev->msi_cap) + pci_msi_set_enable(dev, 0); + + dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); + if (dev->msix_cap) + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); +} + /** * pci_setup_device - fill in class and map information of a device * @dev: the device structure to fill @@ -1140,6 +1174,8 @@ int pci_setup_device(struct pci_dev *dev) /* "Unknown power state" */ dev->current_state = PCI_UNKNOWN; + pci_msi_setup_pci_dev(dev); + /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); /* device class may be changed after fixup */ @@ -1611,7 +1647,7 @@ static int only_one_child(struct pci_bus *bus) return 0; if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) return 1; - if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM && + if (parent->has_secondary_link && !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) return 1; return 0; @@ -2094,25 +2130,6 @@ struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, } EXPORT_SYMBOL(pci_scan_root_bus); -/* Deprecated; use pci_scan_root_bus() instead */ -struct pci_bus *pci_scan_bus_parented(struct device *parent, - int bus, struct pci_ops *ops, void *sysdata) -{ - LIST_HEAD(resources); - struct pci_bus *b; - - pci_add_resource(&resources, &ioport_resource); - pci_add_resource(&resources, &iomem_resource); - pci_add_resource(&resources, &busn_resource); - b = pci_create_root_bus(parent, bus, ops, sysdata, &resources); - if (b) - pci_scan_child_bus(b); - else - pci_free_resource_list(&resources); - return b; -} -EXPORT_SYMBOL(pci_scan_bus_parented); - struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata) { diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c6dc1dfd25d5..e9fd0e90fa3b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -819,13 +819,6 @@ static void quirk_amd_ioapic(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); - -static void quirk_ioapic_rmw(struct pci_dev *dev) -{ - if (dev->devfn == 0 && dev->bus->number == 0) - sis_apic_bug = 1; -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); #endif /* CONFIG_X86_IO_APIC */ /* @@ -1600,7 +1593,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_a static void quirk_pcie_mch(struct pci_dev *pdev) { - pci_msi_off(pdev); pdev->no_msi = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); @@ -1614,7 +1606,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quir */ static void quirk_pcie_pxh(struct pci_dev *dev) { - pci_msi_off(dev); dev->no_msi = 1; dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n"); } @@ -3572,6 +3563,8 @@ static void quirk_dma_func1_alias(struct pci_dev *dev) * SKUs this function is not present, making this a ghost requester. * https://bugzilla.kernel.org/show_bug.cgi?id=42679 */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, + quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ @@ -3740,6 +3733,8 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = { /* Wellsburg (X99) PCH */ 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17, 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e, + /* Lynx Point (9 series) PCH */ + 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e, }; static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c index 7e1304d2e389..dfbab61a1b47 100644 --- a/drivers/pci/vc.c +++ b/drivers/pci/vc.c @@ -108,8 +108,7 @@ static void pci_vc_enable(struct pci_dev *dev, int pos, int res) struct pci_dev *link = NULL; /* Enable VCs from the downstream device */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) + if (!dev->has_secondary_link) return; ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 7cfd2db02deb..240f38872085 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -446,9 +446,15 @@ static int pcifront_scan_root(struct pcifront_device *pdev, unsigned int domain, unsigned int bus) { struct pci_bus *b; + LIST_HEAD(resources); struct pcifront_sd *sd = NULL; struct pci_bus_entry *bus_entry = NULL; int err = 0; + static struct resource busn_res = { + .start = 0, + .end = 255, + .flags = IORESOURCE_BUS, + }; #ifndef CONFIG_PCI_DOMAINS if (domain != 0) { @@ -470,17 +476,21 @@ static int pcifront_scan_root(struct pcifront_device *pdev, err = -ENOMEM; goto err_out; } + pci_add_resource(&resources, &ioport_resource); + pci_add_resource(&resources, &iomem_resource); + pci_add_resource(&resources, &busn_res); pcifront_init_sd(sd, domain, bus, pdev); pci_lock_rescan_remove(); - b = pci_scan_bus_parented(&pdev->xdev->dev, bus, - &pcifront_bus_ops, sd); + b = pci_scan_root_bus(&pdev->xdev->dev, bus, + &pcifront_bus_ops, sd, &resources); if (!b) { dev_err(&pdev->xdev->dev, "Error creating PCI Frontend Bus!\n"); err = -ENOMEM; pci_unlock_rescan_remove(); + pci_free_resource_list(&resources); goto err_out; } @@ -488,7 +498,7 @@ static int pcifront_scan_root(struct pcifront_device *pdev, list_add(&bus_entry->list, &pdev->root_buses); - /* pci_scan_bus_parented skips devices which do not have a have + /* pci_scan_root_bus skips devices which do not have a * devfn==0. The pcifront_scan_bus enumerates all devfn. */ err = pcifront_scan_bus(pdev, domain, bus, b); diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 64d0515b76bd..55ef7d1fd8da 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c @@ -94,8 +94,7 @@ static void __iomem *set_cis_map(struct pcmcia_socket *s, mem->res = pcmcia_find_mem_region(0, s->map_size, s->map_size, 0, s); if (mem->res == NULL) { - dev_printk(KERN_NOTICE, &s->dev, - "cs: unable to map card memory!\n"); + dev_notice(&s->dev, "cs: unable to map card memory!\n"); return NULL; } s->cis_virt = NULL; @@ -381,8 +380,7 @@ int verify_cis_cache(struct pcmcia_socket *s) buf = kmalloc(256, GFP_KERNEL); if (buf == NULL) { - dev_printk(KERN_WARNING, &s->dev, - "no memory for verifying CIS\n"); + dev_warn(&s->dev, "no memory for verifying CIS\n"); return -ENOMEM; } mutex_lock(&s->ops_mutex); @@ -414,14 +412,14 @@ int pcmcia_replace_cis(struct pcmcia_socket *s, const u8 *data, const size_t len) { if (len > CISTPL_MAX_CIS_SIZE) { - dev_printk(KERN_WARNING, &s->dev, "replacement CIS too big\n"); + dev_warn(&s->dev, "replacement CIS too big\n"); return -EINVAL; } mutex_lock(&s->ops_mutex); kfree(s->fake_cis); s->fake_cis = kmalloc(len, GFP_KERNEL); if (s->fake_cis == NULL) { - dev_printk(KERN_WARNING, &s->dev, "no memory to replace CIS\n"); + dev_warn(&s->dev, "no memory to replace CIS\n"); mutex_unlock(&s->ops_mutex); return -ENOMEM; } @@ -434,17 +432,17 @@ int pcmcia_replace_cis(struct pcmcia_socket *s, /* The high-level CIS tuple services */ -typedef struct tuple_flags { +struct tuple_flags { u_int link_space:4; u_int has_link:1; u_int mfc_fn:3; u_int space:4; -} tuple_flags; +}; -#define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space) -#define HAS_LINK(f) (((tuple_flags *)(&(f)))->has_link) -#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn) -#define SPACE(f) (((tuple_flags *)(&(f)))->space) +#define LINK_SPACE(f) (((struct tuple_flags *)(&(f)))->link_space) +#define HAS_LINK(f) (((struct tuple_flags *)(&(f)))->has_link) +#define MFC_FN(f) (((struct tuple_flags *)(&(f)))->mfc_fn) +#define SPACE(f) (((struct tuple_flags *)(&(f)))->space) int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) @@ -1451,26 +1449,16 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info) done: /* invalidate CIS cache on failure */ if (!dev_ok || !ident_ok || !count) { -#if defined(CONFIG_MTD_PCMCIA_ANONYMOUS) - /* Set up as an anonymous card. If we don't have anonymous - memory support then just error the card as there is no - point trying to second guess. - - Note: some cards have just a device entry, it may be - worth extending support to cover these in future */ - if (!dev_ok || !ident_ok) { - dev_info(&s->dev, "no CIS, assuming an anonymous memory card.\n"); - pcmcia_replace_cis(s, "\xFF", 1); - count = 1; - ret = 0; - } else -#endif - { - mutex_lock(&s->ops_mutex); - destroy_cis_cache(s); - mutex_unlock(&s->ops_mutex); + mutex_lock(&s->ops_mutex); + destroy_cis_cache(s); + mutex_unlock(&s->ops_mutex); + /* We differentiate between dev_ok, ident_ok and count + failures to allow for an override for anonymous cards + in ds.c */ + if (!dev_ok || !ident_ok) ret = -EIO; - } + else + ret = -EFAULT; } if (info) diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index 5292db69c426..8007bfda720a 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -177,8 +177,8 @@ int pcmcia_register_socket(struct pcmcia_socket *socket) wait_for_completion(&socket->thread_done); if (!socket->thread) { - dev_printk(KERN_WARNING, &socket->dev, - "PCMCIA: warning: socket thread did not start\n"); + dev_warn(&socket->dev, + "PCMCIA: warning: socket thread did not start\n"); return -EIO; } @@ -275,7 +275,7 @@ static int socket_reset(struct pcmcia_socket *skt) msleep(unreset_check * 10); } - dev_printk(KERN_ERR, &skt->dev, "time out after reset.\n"); + dev_err(&skt->dev, "time out after reset\n"); return -ETIMEDOUT; } @@ -325,8 +325,8 @@ static void socket_shutdown(struct pcmcia_socket *s) s->ops->get_status(s, &status); if (status & SS_POWERON) { - dev_printk(KERN_ERR, &s->dev, - "*** DANGER *** unable to remove socket power\n"); + dev_err(&s->dev, + "*** DANGER *** unable to remove socket power\n"); } s->state &= ~SOCKET_INUSE; @@ -356,15 +356,13 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay) } if (status & SS_PENDING) { - dev_printk(KERN_ERR, &skt->dev, - "voltage interrogation timed out.\n"); + dev_err(&skt->dev, "voltage interrogation timed out\n"); return -ETIMEDOUT; } if (status & SS_CARDBUS) { if (!(skt->features & SS_CAP_CARDBUS)) { - dev_printk(KERN_ERR, &skt->dev, - "cardbus cards are not supported.\n"); + dev_err(&skt->dev, "cardbus cards are not supported\n"); return -EINVAL; } skt->state |= SOCKET_CARDBUS; @@ -379,7 +377,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay) else if (!(status & SS_XVCARD)) skt->socket.Vcc = skt->socket.Vpp = 50; else { - dev_printk(KERN_ERR, &skt->dev, "unsupported voltage key.\n"); + dev_err(&skt->dev, "unsupported voltage key\n"); return -EIO; } @@ -396,7 +394,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay) skt->ops->get_status(skt, &status); if (!(status & SS_POWERON)) { - dev_printk(KERN_ERR, &skt->dev, "unable to apply power.\n"); + dev_err(&skt->dev, "unable to apply power\n"); return -EIO; } @@ -429,8 +427,7 @@ static int socket_insert(struct pcmcia_socket *skt) if (ret == 0) { skt->state |= SOCKET_PRESENT; - dev_printk(KERN_NOTICE, &skt->dev, - "pccard: %s card inserted into slot %d\n", + dev_notice(&skt->dev, "pccard: %s card inserted into slot %d\n", (skt->state & SOCKET_CARDBUS) ? "CardBus" : "PCMCIA", skt->sock); @@ -558,8 +555,7 @@ static int socket_resume(struct pcmcia_socket *skt) static void socket_remove(struct pcmcia_socket *skt) { - dev_printk(KERN_NOTICE, &skt->dev, - "pccard: card ejected from slot %d\n", skt->sock); + dev_notice(&skt->dev, "pccard: card ejected from slot %d\n", skt->sock); socket_shutdown(skt); } @@ -605,8 +601,7 @@ static int pccardd(void *__skt) /* register with the device core */ ret = device_register(&skt->dev); if (ret) { - dev_printk(KERN_WARNING, &skt->dev, - "PCMCIA: unable to register socket\n"); + dev_warn(&skt->dev, "PCMCIA: unable to register socket\n"); skt->thread = NULL; complete(&skt->thread_done); return 0; diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index d3baf0bfca9f..0decee6c556e 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -81,8 +81,8 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv) struct pcmcia_dynid { - struct list_head node; - struct pcmcia_device_id id; + struct list_head node; + struct pcmcia_device_id id; }; /** @@ -284,8 +284,8 @@ static int pcmcia_device_probe(struct device *dev) dev_dbg(dev, "base %x, regs %x", p_dev->config_base, p_dev->config_regs); } else { - dev_printk(KERN_INFO, dev, - "pcmcia: could not parse base and rmask0 of CIS\n"); + dev_info(dev, + "pcmcia: could not parse base and rmask0 of CIS\n"); p_dev->config_base = 0; p_dev->config_regs = 0; } @@ -382,15 +382,15 @@ static int pcmcia_device_remove(struct device *dev) /* check for proper unloading */ if (p_dev->_irq || p_dev->_io || p_dev->_locked) - dev_printk(KERN_INFO, dev, - "pcmcia: driver %s did not release config properly\n", - p_drv->name); + dev_info(dev, + "pcmcia: driver %s did not release config properly\n", + p_drv->name); for (i = 0; i < MAX_WIN; i++) if (p_dev->_win & CLIENT_WIN_REQ(i)) - dev_printk(KERN_INFO, dev, - "pcmcia: driver %s did not release window properly\n", - p_drv->name); + dev_info(dev, + "pcmcia: driver %s did not release window properly\n", + p_drv->name); /* references from pcmcia_probe_device */ pcmcia_put_dev(p_dev); @@ -566,7 +566,7 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, c->io[i].name = p_dev->devname; c->io[i].flags = IORESOURCE_IO; } - for (i = 0; i< MAX_WIN; i++) { + for (i = 0; i < MAX_WIN; i++) { c->mem[i].name = p_dev->devname; c->mem[i].flags = IORESOURCE_MEM; } @@ -578,8 +578,7 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, mutex_unlock(&s->ops_mutex); - dev_printk(KERN_NOTICE, &p_dev->dev, - "pcmcia: registering new device %s (IRQ: %d)\n", + dev_notice(&p_dev->dev, "pcmcia: registering new device %s (IRQ: %d)\n", p_dev->devname, p_dev->irq); pcmcia_device_query(p_dev); @@ -634,8 +633,24 @@ static int pcmcia_card_add(struct pcmcia_socket *s) ret = pccard_validate_cis(s, &no_chains); if (ret || !no_chains) { - dev_dbg(&s->dev, "invalid CIS or invalid resources\n"); - return -ENODEV; +#if defined(CONFIG_MTD_PCMCIA_ANONYMOUS) + /* Set up as an anonymous card. If we don't have anonymous + memory support then just error the card as there is no + point trying to second guess. + + Note: some cards have just a device entry, it may be + worth extending support to cover these in future */ + if (ret == -EIO) { + dev_info(&s->dev, "no CIS, assuming an anonymous memory card.\n"); + pcmcia_replace_cis(s, "\xFF", 1); + no_chains = 1; + ret = 0; + } else +#endif + { + dev_dbg(&s->dev, "invalid CIS or invalid resources\n"); + return -ENODEV; + } } if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc)) @@ -651,7 +666,7 @@ static int pcmcia_card_add(struct pcmcia_socket *s) } -static int pcmcia_requery_callback(struct device *dev, void * _data) +static int pcmcia_requery_callback(struct device *dev, void *_data) { struct pcmcia_device *p_dev = to_pcmcia_dev(dev); if (!p_dev->dev.driver) { @@ -729,7 +744,7 @@ static void pcmcia_requery(struct pcmcia_socket *s) * the one provided by the card is broken. The firmware files reside in * /lib/firmware/ in userspace. */ -static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) +static int pcmcia_load_firmware(struct pcmcia_device *dev, char *filename) { struct pcmcia_socket *s = dev->socket; const struct firmware *fw; @@ -745,16 +760,14 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) if (request_firmware(&fw, filename, &dev->dev) == 0) { if (fw->size >= CISTPL_MAX_CIS_SIZE) { ret = -EINVAL; - dev_printk(KERN_ERR, &dev->dev, - "pcmcia: CIS override is too big\n"); + dev_err(&dev->dev, "pcmcia: CIS override is too big\n"); goto release; } if (!pcmcia_replace_cis(s, fw->data, fw->size)) ret = 0; else { - dev_printk(KERN_ERR, &dev->dev, - "pcmcia: CIS override failed\n"); + dev_err(&dev->dev, "pcmcia: CIS override failed\n"); goto release; } @@ -781,7 +794,8 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) #else /* !CONFIG_PCMCIA_LOAD_CIS */ -static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename) +static inline int pcmcia_load_firmware(struct pcmcia_device *dev, + char *filename) { return -ENODEV; } @@ -1148,10 +1162,9 @@ static int pcmcia_dev_suspend(struct device *dev, pm_message_t state) if (p_drv->suspend) { ret = p_drv->suspend(p_dev); if (ret) { - dev_printk(KERN_ERR, dev, - "pcmcia: device %s (driver %s) did " - "not want to go to sleep (%d)\n", - p_dev->devname, p_drv->name, ret); + dev_err(dev, + "pcmcia: device %s (driver %s) did not want to go to sleep (%d)\n", + p_dev->devname, p_drv->name, ret); mutex_lock(&p_dev->socket->ops_mutex); p_dev->suspended = 0; mutex_unlock(&p_dev->socket->ops_mutex); @@ -1206,7 +1219,7 @@ static int pcmcia_dev_resume(struct device *dev) } -static int pcmcia_bus_suspend_callback(struct device *dev, void * _data) +static int pcmcia_bus_suspend_callback(struct device *dev, void *_data) { struct pcmcia_socket *skt = _data; struct pcmcia_device *p_dev = to_pcmcia_dev(dev); @@ -1217,7 +1230,7 @@ static int pcmcia_bus_suspend_callback(struct device *dev, void * _data) return runtime_suspend(dev); } -static int pcmcia_bus_resume_callback(struct device *dev, void * _data) +static int pcmcia_bus_resume_callback(struct device *dev, void *_data) { struct pcmcia_socket *skt = _data; struct pcmcia_device *p_dev = to_pcmcia_dev(dev); @@ -1342,14 +1355,13 @@ static int pcmcia_bus_add_socket(struct device *dev, socket = pcmcia_get_socket(socket); if (!socket) { - dev_printk(KERN_ERR, dev, - "PCMCIA obtaining reference to socket failed\n"); + dev_err(dev, "PCMCIA obtaining reference to socket failed\n"); return -ENODEV; } ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr); if (ret) { - dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n"); + dev_err(dev, "PCMCIA registration failed\n"); pcmcia_put_socket(socket); return ret; } @@ -1361,7 +1373,7 @@ static int pcmcia_bus_add_socket(struct device *dev, ret = pccard_register_pcmcia(socket, &pcmcia_bus_callback); if (ret) { - dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n"); + dev_err(dev, "PCMCIA registration failed\n"); pcmcia_put_socket(socket); return ret; } diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c index 7f9950d324df..61cf61ac621e 100644 --- a/drivers/pcmcia/electra_cf.c +++ b/drivers/pcmcia/electra_cf.c @@ -48,14 +48,14 @@ struct electra_cf_socket { struct platform_device *ofdev; unsigned long mem_phys; - void __iomem * mem_base; + void __iomem *mem_base; unsigned long mem_size; - void __iomem * io_virt; + void __iomem *io_virt; unsigned int io_base; unsigned int io_size; u_int irq; struct resource iomem; - void __iomem * gpio_base; + void __iomem *gpio_base; int gpio_detect; int gpio_vsense; int gpio_3v; @@ -202,7 +202,7 @@ static int electra_cf_probe(struct platform_device *ofdev) if (err) return -EINVAL; - cf = kzalloc(sizeof *cf, GFP_KERNEL); + cf = kzalloc(sizeof(*cf), GFP_KERNEL); if (!cf) return -ENOMEM; @@ -216,8 +216,10 @@ static int electra_cf_probe(struct platform_device *ofdev) cf->io_size = PAGE_ALIGN(resource_size(&io)); area = __get_vm_area(cf->io_size, 0, PHB_IO_BASE, PHB_IO_END); - if (area == NULL) - return -ENOMEM; + if (area == NULL) { + status = -ENOMEM; + goto fail1; + } cf->io_virt = (void __iomem *)(area->addr); @@ -320,7 +322,8 @@ fail1: iounmap(cf->mem_base); if (cf->gpio_base) iounmap(cf->gpio_base); - device_init_wakeup(&ofdev->dev, 0); + if (area) + device_init_wakeup(&ofdev->dev, 0); kfree(cf); return status; @@ -369,5 +372,5 @@ static struct platform_driver electra_cf_driver = { module_platform_driver(electra_cf_driver); MODULE_LICENSE("GPL"); -MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); +MODULE_AUTHOR("Olof Johansson <olof@lixom.net>"); MODULE_DESCRIPTION("PA Semi Electra CF driver"); diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c index a2c138719bac..eb0d80a429e4 100644 --- a/drivers/pcmcia/i82365.c +++ b/drivers/pcmcia/i82365.c @@ -132,14 +132,14 @@ module_param(recov_time, int, 0444); /*====================================================================*/ -typedef struct cirrus_state_t { +struct cirrus_state { u_char misc1, misc2; u_char timer[6]; -} cirrus_state_t; +}; -typedef struct vg46x_state_t { +struct vg46x_state { u_char ctl, ema; -} vg46x_state_t; +}; struct i82365_socket { u_short type, flags; @@ -149,8 +149,8 @@ struct i82365_socket { u_short psock; u_char cs_irq, intr; union { - cirrus_state_t cirrus; - vg46x_state_t vg46x; + struct cirrus_state cirrus; + struct vg46x_state vg46x; } state; }; @@ -173,11 +173,11 @@ static struct timer_list poll_timer; /*====================================================================*/ /* These definitions must match the pcic table! */ -typedef enum pcic_id { +enum pcic_id { IS_I82365A, IS_I82365B, IS_I82365DF, IS_IBM, IS_RF5Cx96, IS_VLSI, IS_VG468, IS_VG469, IS_PD6710, IS_PD672X, IS_VT83C469, -} pcic_id; +}; /* Flags for classifying groups of controllers */ #define IS_VADEM 0x0001 @@ -189,12 +189,12 @@ typedef enum pcic_id { #define IS_REGISTERED 0x2000 #define IS_ALIVE 0x8000 -typedef struct pcic_t { +struct pcic { char *name; u_short flags; -} pcic_t; +}; -static pcic_t pcic[] = { +static struct pcic pcic[] = { { "Intel i82365sl A step", 0 }, { "Intel i82365sl B step", 0 }, { "Intel i82365sl DF", IS_DF_PWR }, @@ -208,7 +208,7 @@ static pcic_t pcic[] = { { "VIA VT83C469", IS_CIRRUS|IS_VIA }, }; -#define PCIC_COUNT (sizeof(pcic)/sizeof(pcic_t)) +#define PCIC_COUNT ARRAY_SIZE(pcic) /*====================================================================*/ @@ -294,7 +294,7 @@ static void i365_set_pair(u_short sock, u_short reg, u_short data) static void cirrus_get_state(u_short s) { int i; - cirrus_state_t *p = &socket[s].state.cirrus; + struct cirrus_state *p = &socket[s].state.cirrus; p->misc1 = i365_get(s, PD67_MISC_CTL_1); p->misc1 &= (PD67_MC1_MEDIA_ENA | PD67_MC1_INPACK_ENA); p->misc2 = i365_get(s, PD67_MISC_CTL_2); @@ -306,7 +306,7 @@ static void cirrus_set_state(u_short s) { int i; u_char misc; - cirrus_state_t *p = &socket[s].state.cirrus; + struct cirrus_state *p = &socket[s].state.cirrus; misc = i365_get(s, PD67_MISC_CTL_2); i365_set(s, PD67_MISC_CTL_2, p->misc2); @@ -321,7 +321,7 @@ static void cirrus_set_state(u_short s) static u_int __init cirrus_set_opts(u_short s, char *buf) { struct i82365_socket *t = &socket[s]; - cirrus_state_t *p = &socket[s].state.cirrus; + struct cirrus_state *p = &socket[s].state.cirrus; u_int mask = 0xffff; if (has_ring == -1) has_ring = 1; @@ -377,7 +377,7 @@ static u_int __init cirrus_set_opts(u_short s, char *buf) static void vg46x_get_state(u_short s) { - vg46x_state_t *p = &socket[s].state.vg46x; + struct vg46x_state *p = &socket[s].state.vg46x; p->ctl = i365_get(s, VG468_CTL); if (socket[s].type == IS_VG469) p->ema = i365_get(s, VG469_EXT_MODE); @@ -385,7 +385,7 @@ static void vg46x_get_state(u_short s) static void vg46x_set_state(u_short s) { - vg46x_state_t *p = &socket[s].state.vg46x; + struct vg46x_state *p = &socket[s].state.vg46x; i365_set(s, VG468_CTL, p->ctl); if (socket[s].type == IS_VG469) i365_set(s, VG469_EXT_MODE, p->ema); @@ -393,7 +393,7 @@ static void vg46x_set_state(u_short s) static u_int __init vg46x_set_opts(u_short s, char *buf) { - vg46x_state_t *p = &socket[s].state.vg46x; + struct vg46x_state *p = &socket[s].state.vg46x; flip(p->ctl, VG468_CTL_ASYNC, async_clock); flip(p->ema, VG469_MODE_CABLE, cable_mode); @@ -1285,13 +1285,6 @@ static int __init init_i82365(void) ret = pcmcia_register_socket(&socket[i].socket); if (!ret) socket[i].flags |= IS_REGISTERED; - -#if 0 /* driver model ordering issue */ - class_device_create_file(&socket[i].socket.dev, - &class_device_attr_info); - class_device_create_file(&socket[i].socket.dev, - &class_device_attr_exca); -#endif } /* Finally, schedule a polling interrupt */ diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c index 0075bd7162ed..70b089430fcc 100644 --- a/drivers/pcmcia/m32r_cfc.c +++ b/drivers/pcmcia/m32r_cfc.c @@ -754,13 +754,6 @@ static int __init init_m32r_pcc(void) ret = pcmcia_register_socket(&socket[i].socket); if (!ret) socket[i].flags |= IS_REGISTERED; - -#if 0 /* driver model ordering issue */ - class_device_create_file(&socket[i].socket.dev, - &class_device_attr_info); - class_device_create_file(&socket[i].socket.dev, - &class_device_attr_exca); -#endif } /* Finally, schedule a polling interrupt */ diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c index a77e571b08b8..eb126b98ed8a 100644 --- a/drivers/pcmcia/m32r_pcc.c +++ b/drivers/pcmcia/m32r_pcc.c @@ -716,13 +716,6 @@ static int __init init_m32r_pcc(void) ret = pcmcia_register_socket(&socket[i].socket); if (!ret) socket[i].flags |= IS_REGISTERED; - -#if 0 /* driver model ordering issue */ - class_device_create_file(&socket[i].socket.dev, - &class_device_attr_info); - class_device_create_file(&socket[i].socket.dev, - &class_device_attr_exca); -#endif } /* Finally, schedule a polling interrupt */ diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c index e2c92415b892..1c05d74e850d 100644 --- a/drivers/pcmcia/pcmcia_cis.c +++ b/drivers/pcmcia/pcmcia_cis.c @@ -44,7 +44,7 @@ int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, buf = kmalloc(256, GFP_KERNEL); if (buf == NULL) { - dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n"); + dev_warn(&s->dev, "no memory to read tuple\n"); return -ENOMEM; } tuple.DesiredTuple = code; @@ -94,7 +94,7 @@ int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function, buf = kzalloc(256, GFP_KERNEL); if (buf == NULL) { - dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n"); + dev_warn(&s->dev, "no memory to read tuple\n"); return -ENOMEM; } diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index e8c19def1b0f..34aad895a239 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -508,8 +508,7 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev) s->socket.Vpp = p_dev->vpp; if (s->ops->set_socket(s, &s->socket)) { mutex_unlock(&s->ops_mutex); - dev_printk(KERN_WARNING, &p_dev->dev, - "Unable to set socket state\n"); + dev_warn(&p_dev->dev, "Unable to set socket state\n"); return -EINVAL; } @@ -736,13 +735,11 @@ __pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev, ret = request_irq(p_dev->irq, handler, 0, p_dev->devname, p_dev->priv); if (ret) { ret = pcmcia_request_irq(p_dev, handler); - dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: " - "request for exclusive IRQ could not be fulfilled.\n"); - dev_printk(KERN_WARNING, &p_dev->dev, "pcmcia: the driver " - "needs updating to supported shared IRQ lines.\n"); + dev_warn(&p_dev->dev, "pcmcia: request for exclusive IRQ could not be fulfilled\n"); + dev_warn(&p_dev->dev, "pcmcia: the driver needs updating to supported shared IRQ lines\n"); } if (ret) - dev_printk(KERN_INFO, &p_dev->dev, "request_irq() failed\n"); + dev_info(&p_dev->dev, "request_irq() failed\n"); else p_dev->_irq = 1; diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 065704c605d5..5ef7b46a2578 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -191,15 +191,13 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, int any; u_char *b, hole, most; - dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:", - base, base+num-1); + dev_info(&s->dev, "cs: IO port probe %#x-%#x:", base, base+num-1); /* First, what does a floating port look like? */ b = kzalloc(256, GFP_KERNEL); if (!b) { - printk("\n"); - dev_printk(KERN_ERR, &s->dev, - "do_io_probe: unable to kmalloc 256 bytes"); + pr_cont("\n"); + dev_err(&s->dev, "do_io_probe: unable to kmalloc 256 bytes\n"); return; } for (i = base, most = 0; i < base+num; i += 8) { @@ -223,7 +221,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); if (!res) { if (!any) - printk(" excluding"); + pr_cont(" excluding"); if (!bad) bad = any = i; continue; @@ -234,13 +232,13 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, free_region(res); if (j < 8) { if (!any) - printk(" excluding"); + pr_cont(" excluding"); if (!bad) bad = any = i; } else { if (bad) { sub_interval(&s_data->io_db, bad, i-bad); - printk(" %#x-%#x", bad, i-1); + pr_cont(" %#x-%#x", bad, i-1); bad = 0; } } @@ -248,15 +246,15 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, if (bad) { if ((num > 16) && (bad == base) && (i == base+num)) { sub_interval(&s_data->io_db, bad, i-bad); - printk(" nothing: probe failed.\n"); + pr_cont(" nothing: probe failed.\n"); return; } else { sub_interval(&s_data->io_db, bad, i-bad); - printk(" %#x-%#x", bad, i-1); + pr_cont(" %#x-%#x", bad, i-1); } } - printk(any ? "\n" : " clean.\n"); + pr_cont("%s\n", !any ? " clean" : ""); } #endif @@ -413,8 +411,8 @@ static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num, struct socket_data *s_data = s->resource_data; u_long i, j, bad, fail, step; - dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:", - base, base+num-1); + dev_info(&s->dev, "cs: memory probe 0x%06lx-0x%06lx:", + base, base+num-1); bad = fail = 0; step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff); /* don't allow too large steps */ @@ -438,13 +436,13 @@ static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num, } if (i != j) { if (!bad) - printk(" excluding"); - printk(" %#05lx-%#05lx", i, j-1); + pr_cont(" excluding"); + pr_cont(" %#05lx-%#05lx", i, j-1); sub_interval(&s_data->mem_db, i, j-i); bad += j-i; } } - printk(bad ? "\n" : " clean.\n"); + pr_cont("%s\n", !bad ? " clean" : ""); return num - bad; } @@ -495,7 +493,7 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask) return 0; if (s_data->mem_db_valid.next != &s_data->mem_db_valid) return 0; - dev_printk(KERN_NOTICE, &s->dev, + dev_notice(&s->dev, "cs: warning: no high memory space available!\n"); return -ENODEV; } @@ -975,9 +973,9 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s) if (res == &ioport_resource) continue; - dev_printk(KERN_INFO, &s->cb_dev->dev, - "pcmcia: parent PCI bridge window: %pR\n", - res); + dev_info(&s->cb_dev->dev, + "pcmcia: parent PCI bridge window: %pR\n", + res); if (!adjust_io(s, ADD_MANAGED_RESOURCE, res->start, res->end)) done |= IORESOURCE_IO; @@ -990,9 +988,9 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s) if (res == &iomem_resource) continue; - dev_printk(KERN_INFO, &s->cb_dev->dev, - "pcmcia: parent PCI bridge window: %pR\n", - res); + dev_info(&s->cb_dev->dev, + "pcmcia: parent PCI bridge window: %pR\n", + res); if (!adjust_memory(s, ADD_MANAGED_RESOURCE, res->start, res->end)) done |= IORESOURCE_MEM; } diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h index a71789486cdf..5cb670e037a0 100644 --- a/drivers/pcmcia/ti113x.h +++ b/drivers/pcmcia/ti113x.h @@ -372,8 +372,8 @@ static void ti12xx_irqroute_func0(struct yenta_socket *socket) mfunc = mfunc_old = config_readl(socket, TI122X_MFUNC); devctl = config_readb(socket, TI113X_DEVICE_CONTROL); - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: mfunc 0x%08x, devctl 0x%02x\n", mfunc, devctl); + dev_info(&socket->dev->dev, "TI: mfunc 0x%08x, devctl 0x%02x\n", + mfunc, devctl); /* make sure PCI interrupts are enabled before probing */ ti_init(socket); @@ -387,8 +387,8 @@ static void ti12xx_irqroute_func0(struct yenta_socket *socket) * We're here which means PCI interrupts are _not_ delivered. try to * find the right setting (all serial or parallel) */ - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: probing PCI interrupt failed, trying to fix\n"); + dev_info(&socket->dev->dev, + "TI: probing PCI interrupt failed, trying to fix\n"); /* for serial PCI make sure MFUNC3 is set to IRQSER */ if ((devctl & TI113X_DCR_IMODE_MASK) == TI12XX_DCR_IMODE_ALL_SERIAL) { @@ -412,8 +412,8 @@ static void ti12xx_irqroute_func0(struct yenta_socket *socket) pci_irq_status = yenta_probe_cb_irq(socket); if (pci_irq_status == 1) { - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: all-serial interrupts ok\n"); + dev_info(&socket->dev->dev, + "TI: all-serial interrupts ok\n"); mfunc_old = mfunc; goto out; } @@ -428,8 +428,8 @@ static void ti12xx_irqroute_func0(struct yenta_socket *socket) } /* serial PCI interrupts not working fall back to parallel */ - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: falling back to parallel PCI interrupts\n"); + dev_info(&socket->dev->dev, + "TI: falling back to parallel PCI interrupts\n"); devctl &= ~TI113X_DCR_IMODE_MASK; devctl |= TI113X_DCR_IMODE_SERIAL; /* serial ISA could be right */ config_writeb(socket, TI113X_DEVICE_CONTROL, devctl); @@ -460,8 +460,7 @@ static void ti12xx_irqroute_func0(struct yenta_socket *socket) pci_irq_status = yenta_probe_cb_irq(socket); if (pci_irq_status == 1) { mfunc_old = mfunc; - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: parallel PCI interrupts ok\n"); + dev_info(&socket->dev->dev, "TI: parallel PCI interrupts ok\n"); } else { /* not working, back to old value */ mfunc = mfunc_old; @@ -473,9 +472,8 @@ static void ti12xx_irqroute_func0(struct yenta_socket *socket) out: if (pci_irq_status < 1) { socket->cb_irq = 0; - dev_printk(KERN_INFO, &socket->dev->dev, - "Yenta TI: no PCI interrupts. Fish. " - "Please report.\n"); + dev_info(&socket->dev->dev, + "Yenta TI: no PCI interrupts. Fish. Please report.\n"); } } @@ -547,9 +545,8 @@ static void ti12xx_irqroute_func1(struct yenta_socket *socket) mfunc = mfunc_old = config_readl(socket, TI122X_MFUNC); devctl = config_readb(socket, TI113X_DEVICE_CONTROL); - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: mfunc 0x%08x, devctl 0x%02x\n", - mfunc, devctl); + dev_info(&socket->dev->dev, "TI: mfunc 0x%08x, devctl 0x%02x\n", + mfunc, devctl); /* if IRQs are configured as tied, align irq of func1 with func0 */ sysctl = config_readl(socket, TI113X_SYSTEM_CONTROL); @@ -568,8 +565,8 @@ static void ti12xx_irqroute_func1(struct yenta_socket *socket) * We're here which means PCI interrupts are _not_ delivered. try to * find the right setting */ - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: probing PCI interrupt failed, trying to fix\n"); + dev_info(&socket->dev->dev, + "TI: probing PCI interrupt failed, trying to fix\n"); /* if all serial: set INTRTIE, probe again */ if ((devctl & TI113X_DCR_IMODE_MASK) == TI12XX_DCR_IMODE_ALL_SERIAL) { @@ -578,8 +575,8 @@ static void ti12xx_irqroute_func1(struct yenta_socket *socket) if (ti12xx_tie_interrupts(socket, &old_irq)) { pci_irq_status = yenta_probe_cb_irq(socket); if (pci_irq_status == 1) { - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: all-serial interrupts, tied ok\n"); + dev_info(&socket->dev->dev, + "TI: all-serial interrupts, tied ok\n"); goto out; } @@ -616,8 +613,8 @@ static void ti12xx_irqroute_func1(struct yenta_socket *socket) pci_irq_status = yenta_probe_cb_irq(socket); if (pci_irq_status == 1) { - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: parallel PCI interrupts ok\n"); + dev_info(&socket->dev->dev, + "TI: parallel PCI interrupts ok\n"); goto out; } @@ -632,8 +629,8 @@ static void ti12xx_irqroute_func1(struct yenta_socket *socket) if (ti12xx_tie_interrupts(socket, &old_irq)) { pci_irq_status = yenta_probe_cb_irq(socket); if (pci_irq_status == 1) { - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: parallel PCI interrupts, tied ok\n"); + dev_info(&socket->dev->dev, + "TI: parallel PCI interrupts, tied ok\n"); goto out; } @@ -644,8 +641,8 @@ static void ti12xx_irqroute_func1(struct yenta_socket *socket) out: if (pci_irq_status < 1) { socket->cb_irq = 0; - dev_printk(KERN_INFO, &socket->dev->dev, - "TI: no PCI interrupts. Fish. Please report.\n"); + dev_info(&socket->dev->dev, + "TI: no PCI interrupts. Fish. Please report.\n"); } } @@ -849,13 +846,12 @@ static int ti12xx_override(struct yenta_socket *socket) /* make sure that memory burst is active */ val_orig = val = config_readl(socket, TI113X_SYSTEM_CONTROL); if (disable_clkrun && PCI_FUNC(socket->dev->devfn) == 0) { - dev_printk(KERN_INFO, &socket->dev->dev, - "Disabling CLKRUN feature\n"); + dev_info(&socket->dev->dev, "Disabling CLKRUN feature\n"); val |= TI113X_SCR_KEEPCLK; } if (!(val & TI122X_SCR_MRBURSTUP)) { - dev_printk(KERN_INFO, &socket->dev->dev, - "Enabling burst memory read transactions\n"); + dev_info(&socket->dev->dev, + "Enabling burst memory read transactions\n"); val |= TI122X_SCR_MRBURSTUP; } if (val_orig != val) @@ -866,12 +862,10 @@ static int ti12xx_override(struct yenta_socket *socket) * CSC interrupts to PCI rather than INTVAL. */ val = config_readb(socket, TI1250_DIAGNOSTIC); - dev_printk(KERN_INFO, &socket->dev->dev, - "Using %s to route CSC interrupts to PCI\n", - (val & TI1250_DIAG_PCI_CSC) ? "CSCINT" : "INTVAL"); - dev_printk(KERN_INFO, &socket->dev->dev, - "Routing CardBus interrupts to %s\n", - (val & TI1250_DIAG_PCI_IREQ) ? "PCI" : "ISA"); + dev_info(&socket->dev->dev, "Using %s to route CSC interrupts to PCI\n", + (val & TI1250_DIAG_PCI_CSC) ? "CSCINT" : "INTVAL"); + dev_info(&socket->dev->dev, "Routing CardBus interrupts to %s\n", + (val & TI1250_DIAG_PCI_IREQ) ? "PCI" : "ISA"); /* do irqrouting, depending on function */ if (PCI_FUNC(socket->dev->devfn) == 0) @@ -896,9 +890,9 @@ static int ti1250_override(struct yenta_socket *socket) diag |= TI1250_DIAG_PCI_CSC | TI1250_DIAG_PCI_IREQ; if (diag != old) { - dev_printk(KERN_INFO, &socket->dev->dev, - "adjusting diagnostic: %02x -> %02x\n", - old, diag); + dev_info(&socket->dev->dev, + "adjusting diagnostic: %02x -> %02x\n", + old, diag); config_writeb(socket, TI1250_DIAGNOSTIC, diag); } @@ -963,9 +957,9 @@ static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus) /* default to clear TLTEnable bit, old behaviour */ test_c9 &= ~ENE_TEST_C9_TLTENABLE; - dev_printk(KERN_INFO, &socket->dev->dev, - "EnE: chaning testregister 0xC9, %02x -> %02x\n", - old_c9, test_c9); + dev_info(&socket->dev->dev, + "EnE: changing testregister 0xC9, %02x -> %02x\n", + old_c9, test_c9); config_writeb(socket, ENE_TEST_C9, test_c9); } diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h index 615a45a8fe86..582688fe7505 100644 --- a/drivers/pcmcia/topic.h +++ b/drivers/pcmcia/topic.h @@ -104,6 +104,9 @@ #define TOPIC_EXCA_IF_CONTROL 0x3e /* 8 bit */ #define TOPIC_EXCA_IFC_33V_ENA 0x01 +#define TOPIC_PCI_CFG_PPBCN 0x3e /* 16-bit */ +#define TOPIC_PCI_CFG_PPBCN_WBEN 0x0400 + static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff) { struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket); @@ -138,6 +141,7 @@ static int topic97_override(struct yenta_socket *socket) static int topic95_override(struct yenta_socket *socket) { u8 fctrl; + u16 ppbcn; /* enable 3.3V support for 16bit cards */ fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL); @@ -146,6 +150,18 @@ static int topic95_override(struct yenta_socket *socket) /* tell yenta to use exca registers to power 16bit cards */ socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF; + /* Disable write buffers to prevent lockups under load with numerous + Cardbus cards, observed on Tecra 500CDT and reported elsewhere on the + net. This is not a power-on default according to the datasheet + but some BIOSes seem to set it. */ + if (pci_read_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, &ppbcn) == 0 + && socket->dev->revision <= 7 + && (ppbcn & TOPIC_PCI_CFG_PPBCN_WBEN)) { + ppbcn &= ~TOPIC_PCI_CFG_PPBCN_WBEN; + pci_write_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, ppbcn); + dev_info(&socket->dev->dev, "Disabled ToPIC95 Cardbus write buffers.\n"); + } + return 0; } diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c index 21973d55a055..1e5fa211fb70 100644 --- a/drivers/pcmcia/vrc4171_card.c +++ b/drivers/pcmcia/vrc4171_card.c @@ -84,32 +84,32 @@ MODULE_LICENSE("GPL"); #define IO_MAX_MAPS 2 #define MEM_MAX_MAPS 5 -typedef enum { +enum vrc4171_slot { SLOT_PROBE = 0, SLOT_NOPROBE_IO, SLOT_NOPROBE_MEM, SLOT_NOPROBE_ALL, SLOT_INITIALIZED, -} vrc4171_slot_t; +}; -typedef enum { +enum vrc4171_slotb { SLOTB_IS_NONE, SLOTB_IS_PCCARD, SLOTB_IS_CF, SLOTB_IS_FLASHROM, -} vrc4171_slotb_t; +}; -typedef struct vrc4171_socket { - vrc4171_slot_t slot; +struct vrc4171_socket { + enum vrc4171_slot slot; struct pcmcia_socket pcmcia_socket; char name[24]; int csc_irq; int io_irq; spinlock_t lock; -} vrc4171_socket_t; +}; -static vrc4171_socket_t vrc4171_sockets[CARD_MAX_SLOTS]; -static vrc4171_slotb_t vrc4171_slotb = SLOTB_IS_NONE; +static struct vrc4171_socket vrc4171_sockets[CARD_MAX_SLOTS]; +static enum vrc4171_slotb vrc4171_slotb = SLOTB_IS_NONE; static char vrc4171_card_name[] = "NEC VRC4171 Card Controller"; static unsigned int vrc4171_irq; static uint16_t vrc4171_irq_mask = 0xdeb8; @@ -141,7 +141,7 @@ static inline uint16_t vrc4171_get_irq_status(void) return inw(INTERRUPT_STATUS); } -static inline void vrc4171_set_multifunction_pin(vrc4171_slotb_t config) +static inline void vrc4171_set_multifunction_pin(enum vrc4171_slotb config) { uint16_t config1; @@ -234,7 +234,7 @@ static inline int search_nonuse_irq(void) static int pccard_init(struct pcmcia_socket *sock) { - vrc4171_socket_t *socket; + struct vrc4171_socket *socket; unsigned int slot; sock->features |= SS_CAP_PCCARD | SS_CAP_PAGE_REGS; @@ -317,7 +317,7 @@ static inline uint8_t set_Vcc_value(u_char Vcc) static int pccard_set_socket(struct pcmcia_socket *sock, socket_state_t *state) { - vrc4171_socket_t *socket; + struct vrc4171_socket *socket; unsigned int slot; uint8_t voltage, power, control, cscint; @@ -517,7 +517,7 @@ static inline unsigned int get_events(int slot) static irqreturn_t pccard_interrupt(int irq, void *dev_id) { - vrc4171_socket_t *socket; + struct vrc4171_socket *socket; unsigned int events; irqreturn_t retval = IRQ_NONE; uint16_t status; @@ -567,7 +567,7 @@ static inline void reserve_using_irq(int slot) static int vrc4171_add_sockets(void) { - vrc4171_socket_t *socket; + struct vrc4171_socket *socket; int slot, retval; for (slot = 0; slot < CARD_MAX_SLOTS; slot++) { @@ -617,7 +617,7 @@ static int vrc4171_add_sockets(void) static void vrc4171_remove_sockets(void) { - vrc4171_socket_t *socket; + struct vrc4171_socket *socket; int slot; for (slot = 0; slot < CARD_MAX_SLOTS; slot++) { diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 965bd8491233..5d6d9b1549bc 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -712,10 +712,9 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type pcibios_bus_to_resource(dev->bus, res, ®ion); if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0) return 0; - dev_printk(KERN_INFO, &dev->dev, - "Preassigned resource %d busy or not available, " - "reconfiguring...\n", - nr); + dev_info(&dev->dev, + "Preassigned resource %d busy or not available, reconfiguring...\n", + nr); } if (type & IORESOURCE_IO) { @@ -738,9 +737,9 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type return 1; } - dev_printk(KERN_INFO, &dev->dev, - "no resource of type %x available, trying to continue...\n", - type); + dev_info(&dev->dev, + "no resource of type %x available, trying to continue...\n", + type); res->start = res->end = res->flags = 0; return 0; } @@ -802,13 +801,13 @@ static void yenta_close(struct pci_dev *dev) else del_timer_sync(&sock->poll_timer); - if (sock->base) - iounmap(sock->base); + iounmap(sock->base); yenta_free_resources(sock); pci_release_regions(dev); pci_disable_device(dev); pci_set_drvdata(dev, NULL); + kfree(sock); } @@ -979,8 +978,8 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket) socket->probe_status = 0; if (request_irq(socket->cb_irq, yenta_probe_handler, IRQF_SHARED, "yenta", socket)) { - dev_printk(KERN_WARNING, &socket->dev->dev, - "request_irq() in yenta_probe_cb_irq() failed!\n"); + dev_warn(&socket->dev->dev, + "request_irq() in yenta_probe_cb_irq() failed!\n"); return -1; } @@ -1019,9 +1018,8 @@ static void yenta_get_socket_capabilities(struct yenta_socket *socket, u32 isa_i else socket->socket.irq_mask = 0; - dev_printk(KERN_INFO, &socket->dev->dev, - "ISA IRQ mask 0x%04x, PCI irq %d\n", - socket->socket.irq_mask, socket->cb_irq); + dev_info(&socket->dev->dev, "ISA IRQ mask 0x%04x, PCI irq %d\n", + socket->socket.irq_mask, socket->cb_irq); } /* @@ -1111,9 +1109,9 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge) /* Show that the wanted subordinate number is not possible: */ if (cardbus_bridge->busn_res.end > upper_limit) - dev_printk(KERN_WARNING, &cardbus_bridge->dev, - "Upper limit for fixing this " - "bridge's parent bridge: #%02x\n", upper_limit); + dev_warn(&cardbus_bridge->dev, + "Upper limit for fixing this bridge's parent bridge: #%02x\n", + upper_limit); /* If we have room to increase the bridge's subordinate number, */ if (bridge_to_fix->busn_res.end < upper_limit) { @@ -1122,11 +1120,11 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge) unsigned char subordinate_to_assign = min_t(int, cardbus_bridge->busn_res.end, upper_limit); - dev_printk(KERN_INFO, &bridge_to_fix->dev, - "Raising subordinate bus# of parent " - "bus (#%02x) from #%02x to #%02x\n", - bridge_to_fix->number, - (int)bridge_to_fix->busn_res.end, subordinate_to_assign); + dev_info(&bridge_to_fix->dev, + "Raising subordinate bus# of parent bus (#%02x) from #%02x to #%02x\n", + bridge_to_fix->number, + (int)bridge_to_fix->busn_res.end, + subordinate_to_assign); /* Save the new subordinate in the bus struct of the bridge */ bridge_to_fix->busn_res.end = subordinate_to_assign; @@ -1153,8 +1151,7 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id) * Bail out if so. */ if (!dev->subordinate) { - dev_printk(KERN_ERR, &dev->dev, "no bus associated! " - "(try 'pci=assign-busses')\n"); + dev_err(&dev->dev, "no bus associated! (try 'pci=assign-busses')\n"); return -ENODEV; } @@ -1189,7 +1186,7 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id) goto disable; if (!pci_resource_start(dev, 0)) { - dev_printk(KERN_ERR, &dev->dev, "No cardbus resource!\n"); + dev_err(&dev->dev, "No cardbus resource!\n"); ret = -ENODEV; goto release; } @@ -1208,8 +1205,8 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id) * report the subsystem vendor and device for help debugging * the irq stuff... */ - dev_printk(KERN_INFO, &dev->dev, "CardBus bridge found [%04x:%04x]\n", - dev->subsystem_vendor, dev->subsystem_device); + dev_info(&dev->dev, "CardBus bridge found [%04x:%04x]\n", + dev->subsystem_vendor, dev->subsystem_device); yenta_config_init(socket); @@ -1239,12 +1236,10 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id) setup_timer(&socket->poll_timer, yenta_interrupt_wrapper, (unsigned long)socket); mod_timer(&socket->poll_timer, jiffies + HZ); - dev_printk(KERN_INFO, &dev->dev, - "no PCI IRQ, CardBus support disabled for this " - "socket.\n"); - dev_printk(KERN_INFO, &dev->dev, - "check your BIOS CardBus, BIOS IRQ or ACPI " - "settings.\n"); + dev_info(&dev->dev, + "no PCI IRQ, CardBus support disabled for this socket.\n"); + dev_info(&dev->dev, + "check your BIOS CardBus, BIOS IRQ or ACPI settings.\n"); } else { socket->socket.features |= SS_CAP_CARDBUS; } @@ -1252,32 +1247,41 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id) /* Figure out what the dang thing can do for the PCMCIA layer... */ yenta_interrogate(socket); yenta_get_socket_capabilities(socket, isa_interrupts); - dev_printk(KERN_INFO, &dev->dev, - "Socket status: %08x\n", cb_readl(socket, CB_SOCKET_STATE)); + dev_info(&dev->dev, "Socket status: %08x\n", + cb_readl(socket, CB_SOCKET_STATE)); yenta_fixup_parent_bridge(dev->subordinate); /* Register it with the pcmcia layer.. */ ret = pcmcia_register_socket(&socket->socket); - if (ret == 0) { - /* Add the yenta register attributes */ - ret = device_create_file(&dev->dev, &dev_attr_yenta_registers); - if (ret == 0) - goto out; - - /* error path... */ - pcmcia_unregister_socket(&socket->socket); - } + if (ret) + goto free_irq; + + /* Add the yenta register attributes */ + ret = device_create_file(&dev->dev, &dev_attr_yenta_registers); + if (ret) + goto unregister_socket; + return ret; + + /* error path... */ + unregister_socket: + pcmcia_unregister_socket(&socket->socket); + free_irq: + if (socket->cb_irq) + free_irq(socket->cb_irq, socket); + else + del_timer_sync(&socket->poll_timer); unmap: iounmap(socket->base); + yenta_free_resources(socket); release: pci_release_regions(dev); disable: pci_disable_device(dev); free: + pci_set_drvdata(dev, NULL); kfree(socket); - out: return ret; } diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index f9f205cb1f11..7137a077b9a6 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -71,9 +71,10 @@ config ASUS_LAPTOP depends on ACPI select LEDS_CLASS select NEW_LEDS - select BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_CLASS_DEVICE depends on INPUT depends on RFKILL || RFKILL = n + depends on ACPI_VIDEO || ACPI_VIDEO = n select INPUT_SPARSEKMAP select INPUT_POLLDEV ---help--- @@ -95,6 +96,7 @@ config DELL_LAPTOP depends on X86 depends on DCDBAS depends on BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO || ACPI_VIDEO = n depends on RFKILL || RFKILL = n depends on SERIO_I8042 select POWER_SUPPLY @@ -109,6 +111,7 @@ config DELL_WMI tristate "Dell WMI extras" depends on ACPI_WMI depends on INPUT + depends on ACPI_VIDEO || ACPI_VIDEO = n select INPUT_SPARSEKMAP ---help--- Say Y here if you want to support WMI-based hotkeys on Dell laptops. @@ -144,6 +147,7 @@ config FUJITSU_LAPTOP depends on ACPI depends on INPUT depends on BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO || ACPI_VIDEO = n depends on LEDS_CLASS || LEDS_CLASS=n ---help--- This is a driver for laptops built by Fujitsu: @@ -247,6 +251,7 @@ config MSI_LAPTOP tristate "MSI Laptop Extras" depends on ACPI depends on BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO || ACPI_VIDEO = n depends on RFKILL depends on INPUT && SERIO_I8042 select INPUT_SPARSEKMAP @@ -280,6 +285,7 @@ config COMPAL_LAPTOP tristate "Compal (and others) Laptop Extras" depends on ACPI depends on BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO || ACPI_VIDEO = n depends on RFKILL depends on HWMON depends on POWER_SUPPLY @@ -296,7 +302,8 @@ config COMPAL_LAPTOP config SONY_LAPTOP tristate "Sony Laptop Extras" depends on ACPI - select BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO || ACPI_VIDEO = n + depends on BACKLIGHT_CLASS_DEVICE depends on INPUT depends on RFKILL ---help--- @@ -321,6 +328,7 @@ config IDEAPAD_LAPTOP depends on RFKILL && INPUT depends on SERIO_I8042 depends on BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO || ACPI_VIDEO = n select INPUT_SPARSEKMAP help This is a driver for Lenovo IdeaPad netbooks contains drivers for @@ -331,8 +339,8 @@ config THINKPAD_ACPI depends on ACPI depends on INPUT depends on RFKILL || RFKILL = n - select BACKLIGHT_LCD_SUPPORT - select BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO || ACPI_VIDEO = n + depends on BACKLIGHT_CLASS_DEVICE select HWMON select NVRAM select NEW_LEDS @@ -500,8 +508,9 @@ config EEEPC_LAPTOP depends on ACPI depends on INPUT depends on RFKILL || RFKILL = n + depends on ACPI_VIDEO || ACPI_VIDEO = n depends on HOTPLUG_PCI - select BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_CLASS_DEVICE select HWMON select LEDS_CLASS select NEW_LEDS @@ -587,6 +596,7 @@ config MSI_WMI depends on ACPI_WMI depends on INPUT depends on BACKLIGHT_CLASS_DEVICE + depends on ACPI_VIDEO || ACPI_VIDEO = n select INPUT_SPARSEKMAP help Say Y here if you want to support WMI-based hotkeys on MSI laptops. @@ -660,7 +670,7 @@ config TOSHIBA_HAPS depends on ACPI ---help--- This driver adds support for the built-in accelerometer - found on recent Toshiba laptops equiped with HID TOS620A + found on recent Toshiba laptops equipped with HID TOS620A device. This driver receives ACPI notify events 0x80 when the sensor @@ -669,7 +679,7 @@ config TOSHIBA_HAPS been stabilized. Also provides sysfs entries to get/set the desired protection - level and reseting the HDD protection interface. + level and resetting the HDD protection interface. If you have a recent Toshiba laptop with a built-in accelerometer device, say Y. @@ -824,6 +834,7 @@ config MXM_WMI config INTEL_OAKTRAIL tristate "Intel Oaktrail Platform Extras" depends on ACPI + depends on ACPI_VIDEO || ACPI_VIDEO = n depends on RFKILL && BACKLIGHT_CLASS_DEVICE && ACPI ---help--- Intel Oaktrail platform need this driver to provide interfaces to diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 3ac29a1e8f92..f6b280dbfb33 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -2246,14 +2246,10 @@ static int __init acer_wmi_init(void) set_quirks(); if (dmi_check_system(video_vendor_dmi_table)) - acpi_video_dmi_promote_vendor(); - if (acpi_video_backlight_support()) { + acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); + + if (acpi_video_get_backlight_type() != acpi_backlight_vendor) interface->capability &= ~ACER_CAP_BRIGHTNESS; - pr_info("Brightness must be controlled by acpi video driver\n"); - } else { - pr_info("Disabling ACPI video driver\n"); - acpi_video_unregister_backlight(); - } if (wmi_has_guid(WMID_GUID3)) { if (ec_raw_mode) { diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index 6808715003f6..0dec3f59917a 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -550,8 +550,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) * backlight control and supports more levels than other options. * Disable the other backlight choices. */ - acpi_video_dmi_promote_vendor(); - acpi_video_unregister(); + acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); apple_bl_unregister(); gmux_data->power_state = VGA_SWITCHEROO_ON; @@ -645,7 +644,6 @@ static void gmux_remove(struct pnp_dev *pnp) apple_gmux_data = NULL; kfree(gmux_data); - acpi_video_dmi_demote_vendor(); acpi_video_register(); apple_bl_register(); } diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 46b274693872..58d29c4f2840 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -54,6 +54,7 @@ #include <linux/slab.h> #include <linux/dmi.h> #include <linux/acpi.h> +#include <acpi/video.h> #define ASUS_LAPTOP_VERSION "0.42" @@ -1884,12 +1885,11 @@ static int asus_acpi_add(struct acpi_device *device) if (result) goto fail_platform; - if (!acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { result = asus_backlight_init(asus); if (result) goto fail_backlight; - } else - pr_info("Backlight controlled by ACPI video driver\n"); + } result = asus_input_init(asus); if (result) diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 7543a56e0f45..6f8558f744a4 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -1364,7 +1364,7 @@ static void asus_wmi_notify(u32 value, void *context) code = ASUS_WMI_BRN_DOWN; if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) { - if (!acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { asus_wmi_backlight_notify(asus, orig_code); goto exit; } @@ -1772,17 +1772,16 @@ static int asus_wmi_add(struct platform_device *pdev) stop this from showing up */ chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); if (chassis_type && !strcmp(chassis_type, "3")) - acpi_video_dmi_promote_vendor(); + acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); + if (asus->driver->quirks->wmi_backlight_power) - acpi_video_dmi_promote_vendor(); - if (!acpi_video_backlight_support()) { - pr_info("Disabling ACPI video driver\n"); - acpi_video_unregister(); + acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); + + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { err = asus_wmi_backlight_init(asus); if (err && err != -ENODEV) goto fail_backlight; - } else - pr_info("Backlight controlled by ACPI video driver\n"); + } status = wmi_install_notify_handler(asus->driver->event_guid, asus_wmi_notify, asus); diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index b4e94471f3d5..f2706d27adff 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -82,7 +82,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/power_supply.h> #include <linux/fb.h> - +#include <acpi/video.h> /* ======= */ /* Defines */ @@ -959,7 +959,7 @@ static int __init compal_init(void) return -ENODEV; } - if (!acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index d688d806a8a5..01d081052b50 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -31,6 +31,7 @@ #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <acpi/video.h> #include "../../firmware/dcdbas.h" #define BRIGHTNESS_TOKEN 0x7d @@ -1920,13 +1921,8 @@ static int __init dell_init(void) debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, &dell_debugfs_fops); -#ifdef CONFIG_ACPI - /* In the event of an ACPI backlight being available, don't - * register the platform controller. - */ - if (acpi_video_backlight_support()) + if (acpi_video_get_backlight_type() != acpi_backlight_vendor) return 0; -#endif get_buffer(); buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 6512a06bc053..f2d77fe696ac 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -35,6 +35,7 @@ #include <linux/acpi.h> #include <linux/string.h> #include <linux/dmi.h> +#include <acpi/video.h> MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver"); @@ -397,7 +398,7 @@ static int __init dell_wmi_init(void) } dmi_walk(find_hk_type, NULL); - acpi_video = acpi_video_backlight_support(); + acpi_video = acpi_video_get_backlight_type() != acpi_backlight_vendor; err = dell_wmi_input_setup(); if (err) diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 844c2096bde9..8cdf315f9730 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -37,6 +37,7 @@ #include <linux/pci_hotplug.h> #include <linux/leds.h> #include <linux/dmi.h> +#include <acpi/video.h> #define EEEPC_LAPTOP_VERSION "0.1" #define EEEPC_LAPTOP_NAME "Eee PC Hotkey Driver" @@ -1433,12 +1434,10 @@ static int eeepc_acpi_add(struct acpi_device *device) if (result) goto fail_platform; - if (!acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { result = eeepc_backlight_init(eeepc); if (result) goto fail_backlight; - } else { - pr_info("Backlight controlled by ACPI video driver\n"); } result = eeepc_input_init(eeepc); diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 2a9afa261c61..1c62caff93fd 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -72,6 +72,7 @@ #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) #include <linux/leds.h> #endif +#include <acpi/video.h> #define FUJITSU_DRIVER_VERSION "0.6.0" @@ -1099,7 +1100,7 @@ static int __init fujitsu_init(void) /* Register backlight stuff */ - if (!acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); @@ -1137,8 +1138,7 @@ static int __init fujitsu_init(void) } /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */ - - if (!acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3) fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN; else diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index b496db87bc05..bea022830944 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -38,6 +38,7 @@ #include <linux/i8042.h> #include <linux/dmi.h> #include <linux/device.h> +#include <acpi/video.h> #define IDEAPAD_RFKILL_DEV_NUM (3) @@ -903,7 +904,7 @@ static int ideapad_acpi_add(struct platform_device *pdev) ideapad_sync_rfk_state(priv); ideapad_sync_touchpad_state(priv); - if (!acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { ret = ideapad_backlight_init(priv); if (ret && ret != -ENODEV) goto backlight_failed; diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c index 8037c8b46241..6aa33c4a809f 100644 --- a/drivers/platform/x86/intel_oaktrail.c +++ b/drivers/platform/x86/intel_oaktrail.c @@ -50,6 +50,7 @@ #include <linux/platform_device.h> #include <linux/dmi.h> #include <linux/rfkill.h> +#include <acpi/video.h> #define DRIVER_NAME "intel_oaktrail" #define DRIVER_VERSION "0.4ac1" @@ -343,13 +344,11 @@ static int __init oaktrail_init(void) goto err_device_add; } - if (!acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) { ret = oaktrail_backlight_init(); if (ret) goto err_backlight; - - } else - pr_info("Backlight controlled by ACPI video driver\n"); + } ret = oaktrail_rfkill_init(); if (ret) { diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 085987730aab..42317704629d 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -64,6 +64,7 @@ #include <linux/i8042.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> +#include <acpi/video.h> #define MSI_DRIVER_VERSION "0.5" @@ -1069,9 +1070,8 @@ static int __init msi_init(void) /* Register backlight stuff */ - if (!quirks->old_ec_model || acpi_video_backlight_support()) { - pr_info("Brightness ignored, must be controlled by ACPI video driver\n"); - } else { + if (quirks->old_ec_model || + acpi_video_get_backlight_type() == acpi_backlight_vendor) { struct backlight_properties props; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index 6d2bac0c463c..978e6d640572 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -29,6 +29,7 @@ #include <linux/backlight.h> #include <linux/slab.h> #include <linux/module.h> +#include <acpi/video.h> MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>"); MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver"); @@ -320,7 +321,8 @@ static int __init msi_wmi_init(void) break; } - if (wmi_has_guid(MSIWMI_BIOS_GUID) && !acpi_video_backlight_support()) { + if (wmi_has_guid(MSIWMI_BIOS_GUID) && + acpi_video_get_backlight_type() == acpi_backlight_vendor) { err = msi_wmi_backlight_setup(); if (err) { pr_err("Unable to setup backlight device\n"); diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 9e701b2256f9..8c146e2b6727 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1720,27 +1720,14 @@ static int __init samsung_init(void) samsung->handle_backlight = true; samsung->quirks = quirks; - #ifdef CONFIG_ACPI if (samsung->quirks->broken_acpi_video) - acpi_video_dmi_promote_vendor(); - - /* Don't handle backlight here if the acpi video already handle it */ - if (acpi_video_backlight_support()) { - samsung->handle_backlight = false; - } else if (samsung->quirks->broken_acpi_video) { - pr_info("Disabling ACPI video driver\n"); - acpi_video_unregister(); - } + acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); + if (samsung->quirks->use_native_backlight) + acpi_video_set_dmi_backlight_type(acpi_backlight_native); - if (samsung->quirks->use_native_backlight) { - pr_info("Using native backlight driver\n"); - /* Tell acpi-video to not handle the backlight */ - acpi_video_dmi_promote_vendor(); - acpi_video_unregister(); - /* And also do not handle it ourselves */ + if (acpi_video_get_backlight_type() != acpi_backlight_vendor) samsung->handle_backlight = false; - } #endif ret = samsung_platform_init(samsung); @@ -1751,12 +1738,6 @@ static int __init samsung_init(void) if (ret) goto error_sabi; -#ifdef CONFIG_ACPI - /* Only log that if we are really on a sabi platform */ - if (acpi_video_backlight_support()) - pr_info("Backlight controlled by ACPI video driver\n"); -#endif - ret = samsung_sysfs_init(samsung); if (ret) goto error_sysfs; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e51c1e753607..aeb80d1c2b07 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -69,6 +69,7 @@ #include <linux/miscdevice.h> #endif #include <asm/uaccess.h> +#include <acpi/video.h> #define dprintk(fmt, ...) \ do { \ @@ -3198,12 +3199,8 @@ static int sony_nc_add(struct acpi_device *device) sony_nc_function_setup(device, sony_pf_device); } - /* setup input devices and helper fifo */ - if (acpi_video_backlight_support()) { - pr_info("brightness ignored, must be controlled by ACPI video driver\n"); - } else { + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) sony_nc_backlight_setup(); - } /* create sony_pf sysfs attributes related to the SNC device */ for (item = sony_nc_values; item->name; ++item) { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 28f328136f0d..33e488cf5569 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -83,6 +83,7 @@ #include <sound/control.h> #include <sound/initval.h> #include <asm/uaccess.h> +#include <acpi/video.h> /* ThinkPad CMOS commands */ #define TP_CMOS_VOLUME_DOWN 0 @@ -3487,7 +3488,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) /* Do not issue duplicate brightness change events to * userspace. tpacpi_detect_brightness_capabilities() must have * been called before this point */ - if (acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() != acpi_backlight_vendor) { pr_info("This ThinkPad has standard ACPI backlight " "brightness control, supported by the ACPI " "video driver\n"); @@ -6491,7 +6492,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm) return 1; } - if (acpi_video_backlight_support()) { + if (acpi_video_get_backlight_type() != acpi_backlight_vendor) { if (brightness_enable > 1) { pr_info("Standard ACPI backlight interface " "available, not loading native one\n"); diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 9956b9902bb4..59bf27ed72d6 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -2640,14 +2640,11 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev) */ if (dev->tr_backlight_supported || dmi_check_system(toshiba_vendor_backlight_dmi)) - acpi_video_dmi_promote_vendor(); + acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); - if (acpi_video_backlight_support()) + if (acpi_video_get_backlight_type() != acpi_backlight_vendor) return 0; - /* acpi-video may have loaded before we called dmi_promote_vendor() */ - acpi_video_unregister_backlight(); - memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index ff0356fb378f..05796495be0e 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -28,8 +28,8 @@ #include "../base.h" #include "pnpacpi.h" -static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering, - int *polarity, int *shareable) +static void decode_irq_flags(struct pnp_dev *dev, int flags, u8 *triggering, + u8 *polarity, u8 *shareable) { switch (flags & (IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE)) { @@ -654,7 +654,7 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev, struct resource *p) { struct acpi_resource_irq *irq = &resource->data.irq; - int triggering, polarity, shareable; + u8 triggering, polarity, shareable; if (!pnp_resource_enabled(p)) { irq->interrupt_count = 0; @@ -683,7 +683,7 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev, struct resource *p) { struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq; - int triggering, polarity, shareable; + u8 triggering, polarity, shareable; if (!pnp_resource_enabled(p)) { extended_irq->interrupt_count = 0; @@ -873,7 +873,7 @@ int pnpacpi_encode_resources(struct pnp_dev *dev, struct acpi_buffer *buffer) /* pnpacpi_build_resource_template allocates extra mem */ int res_cnt = (buffer->length - 1) / sizeof(struct acpi_resource) - 1; struct acpi_resource *resource = buffer->pointer; - int port = 0, irq = 0, dma = 0, mem = 0; + unsigned int port = 0, irq = 0, dma = 0, mem = 0; pnp_dbg(&dev->dev, "encode %d resources\n", res_cnt); while (i < res_cnt) { diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c index 49c1720df59a..515f33882ab8 100644 --- a/drivers/pnp/system.c +++ b/drivers/pnp/system.c @@ -7,6 +7,7 @@ * Bjorn Helgaas <bjorn.helgaas@hp.com> */ +#include <linux/acpi.h> #include <linux/pnp.h> #include <linux/device.h> #include <linux/init.h> @@ -22,25 +23,41 @@ static const struct pnp_device_id pnp_dev_table[] = { {"", 0} }; +#ifdef CONFIG_ACPI +static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc) +{ + u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY; + return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc); +} +#else +static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc) +{ + struct resource *res; + + res = io ? request_region(start, length, desc) : + request_mem_region(start, length, desc); + if (res) { + res->flags &= ~IORESOURCE_BUSY; + return true; + } + return false; +} +#endif + static void reserve_range(struct pnp_dev *dev, struct resource *r, int port) { char *regionid; const char *pnpid = dev_name(&dev->dev); resource_size_t start = r->start, end = r->end; - struct resource *res; + bool reserved; regionid = kmalloc(16, GFP_KERNEL); if (!regionid) return; snprintf(regionid, 16, "pnp %s", pnpid); - if (port) - res = request_region(start, end - start + 1, regionid); - else - res = request_mem_region(start, end - start + 1, regionid); - if (res) - res->flags &= ~IORESOURCE_BUSY; - else + reserved = __reserve_range(start, end - start + 1, !!port, regionid); + if (!reserved) kfree(regionid); /* @@ -49,7 +66,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port) * have double reservations. */ dev_info(&dev->dev, "%pR %s reserved\n", r, - res ? "has been" : "could not be"); + reserved ? "has been" : "could not be"); } static void reserve_resources_of_dev(struct pnp_dev *dev) diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c index 1e08195551fe..5f855f99bdfc 100644 --- a/drivers/power/reset/ltc2952-poweroff.c +++ b/drivers/power/reset/ltc2952-poweroff.c @@ -158,7 +158,6 @@ static irqreturn_t ltc2952_poweroff_handler(int irq, void *dev_id) HRTIMER_MODE_REL); } else { hrtimer_cancel(&data->timer_trigger); - /* omitting return value check, timer should have been valid */ } return IRQ_HANDLED; } diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index fd243231620a..482b22ddc7b2 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -187,6 +187,7 @@ struct rapl_package { }; struct rapl_defaults { + u8 floor_freq_reg_addr; int (*check_unit)(struct rapl_package *rp, int cpu); void (*set_floor_freq)(struct rapl_domain *rd, bool mode); u64 (*compute_time_window)(struct rapl_package *rp, u64 val, @@ -196,7 +197,8 @@ struct rapl_defaults { static struct rapl_defaults *rapl_defaults; /* Sideband MBI registers */ -#define IOSF_CPU_POWER_BUDGET_CTL (0x2) +#define IOSF_CPU_POWER_BUDGET_CTL_BYT (0x2) +#define IOSF_CPU_POWER_BUDGET_CTL_TNG (0xdf) #define PACKAGE_PLN_INT_SAVED BIT(0) #define MAX_PRIM_NAME (32) @@ -358,7 +360,8 @@ static int set_domain_enable(struct powercap_zone *power_zone, bool mode) get_online_cpus(); rapl_write_data_raw(rd, PL1_ENABLE, mode); - rapl_defaults->set_floor_freq(rd, mode); + if (rapl_defaults->set_floor_freq) + rapl_defaults->set_floor_freq(rd, mode); put_online_cpus(); return 0; @@ -979,16 +982,22 @@ static void set_floor_freq_atom(struct rapl_domain *rd, bool enable) static u32 power_ctrl_orig_val; u32 mdata; + if (!rapl_defaults->floor_freq_reg_addr) { + pr_err("Invalid floor frequency config register\n"); + return; + } + if (!power_ctrl_orig_val) iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_PMC_READ, - IOSF_CPU_POWER_BUDGET_CTL, &power_ctrl_orig_val); + rapl_defaults->floor_freq_reg_addr, + &power_ctrl_orig_val); mdata = power_ctrl_orig_val; if (enable) { mdata &= ~(0x7f << 8); mdata |= 1 << 8; } iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_PMC_WRITE, - IOSF_CPU_POWER_BUDGET_CTL, mdata); + rapl_defaults->floor_freq_reg_addr, mdata); } static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value, @@ -1029,6 +1038,7 @@ static u64 rapl_compute_time_window_atom(struct rapl_package *rp, u64 value, } static const struct rapl_defaults rapl_defaults_core = { + .floor_freq_reg_addr = 0, .check_unit = rapl_check_unit_core, .set_floor_freq = set_floor_freq_default, .compute_time_window = rapl_compute_time_window_core, @@ -1041,12 +1051,34 @@ static const struct rapl_defaults rapl_defaults_hsw_server = { .dram_domain_energy_unit = 15300, }; -static const struct rapl_defaults rapl_defaults_atom = { +static const struct rapl_defaults rapl_defaults_byt = { + .floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_BYT, + .check_unit = rapl_check_unit_atom, + .set_floor_freq = set_floor_freq_atom, + .compute_time_window = rapl_compute_time_window_atom, +}; + +static const struct rapl_defaults rapl_defaults_tng = { + .floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_TNG, .check_unit = rapl_check_unit_atom, .set_floor_freq = set_floor_freq_atom, .compute_time_window = rapl_compute_time_window_atom, }; +static const struct rapl_defaults rapl_defaults_ann = { + .floor_freq_reg_addr = 0, + .check_unit = rapl_check_unit_atom, + .set_floor_freq = NULL, + .compute_time_window = rapl_compute_time_window_atom, +}; + +static const struct rapl_defaults rapl_defaults_cht = { + .floor_freq_reg_addr = 0, + .check_unit = rapl_check_unit_atom, + .set_floor_freq = NULL, + .compute_time_window = rapl_compute_time_window_atom, +}; + #define RAPL_CPU(_model, _ops) { \ .vendor = X86_VENDOR_INTEL, \ .family = 6, \ @@ -1057,7 +1089,7 @@ static const struct rapl_defaults rapl_defaults_atom = { static const struct x86_cpu_id rapl_ids[] __initconst = { RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */ RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */ - RAPL_CPU(0x37, rapl_defaults_atom),/* Valleyview */ + RAPL_CPU(0x37, rapl_defaults_byt),/* Valleyview */ RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */ RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */ RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */ @@ -1065,10 +1097,11 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */ RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */ - RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */ - RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */ + RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */ + RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */ RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */ - RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */ + RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */ + RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */ {} }; MODULE_DEVICE_TABLE(x86cpu, rapl_ids); diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index ba34c7d89042..3a7769fe53de 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -223,13 +223,16 @@ void *pwm_get_chip_data(struct pwm_device *pwm) EXPORT_SYMBOL_GPL(pwm_get_chip_data); /** - * pwmchip_add() - register a new PWM chip + * pwmchip_add_with_polarity() - register a new PWM chip * @chip: the PWM chip to add + * @polarity: initial polarity of PWM channels * * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base - * will be used. + * will be used. The initial polarity for all channels is specified by the + * @polarity parameter. */ -int pwmchip_add(struct pwm_chip *chip) +int pwmchip_add_with_polarity(struct pwm_chip *chip, + enum pwm_polarity polarity) { struct pwm_device *pwm; unsigned int i; @@ -259,6 +262,7 @@ int pwmchip_add(struct pwm_chip *chip) pwm->chip = chip; pwm->pwm = chip->base + i; pwm->hwpwm = i; + pwm->polarity = polarity; radix_tree_insert(&pwm_tree, pwm->pwm, pwm); } @@ -279,6 +283,19 @@ out: mutex_unlock(&pwm_lock); return ret; } +EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity); + +/** + * pwmchip_add() - register a new PWM chip + * @chip: the PWM chip to add + * + * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base + * will be used. The initial polarity for all channels is normal. + */ +int pwmchip_add(struct pwm_chip *chip) +{ + return pwmchip_add_with_polarity(chip, PWM_POLARITY_NORMAL); +} EXPORT_SYMBOL_GPL(pwmchip_add); /** @@ -586,6 +603,23 @@ void pwm_add_table(struct pwm_lookup *table, size_t num) } /** + * pwm_remove_table() - unregister PWM device consumers + * @table: array of consumers to unregister + * @num: number of consumers in table + */ +void pwm_remove_table(struct pwm_lookup *table, size_t num) +{ + mutex_lock(&pwm_lookup_lock); + + while (num--) { + list_del(&table->list); + table++; + } + + mutex_unlock(&pwm_lookup_lock); +} + +/** * pwm_get() - look up and request a PWM device * @dev: device for PWM consumer * @con_id: consumer name diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index d3c22de9ee47..a947c9095d9d 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -8,9 +8,11 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> @@ -21,6 +23,7 @@ #define PWM_ENA 0x04 #define PWM_DIS 0x08 #define PWM_SR 0x0C +#define PWM_ISR 0x1C /* Bit field in SR */ #define PWM_SR_ALL_CH_ON 0x0F @@ -60,6 +63,9 @@ struct atmel_pwm_chip { struct clk *clk; void __iomem *base; + unsigned int updated_pwms; + struct mutex isr_lock; /* ISR is cleared when read, ensure only one thread does that */ + void (*config)(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long dty, unsigned long prd); }; @@ -144,6 +150,10 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, val = (val & ~PWM_CMR_CPRE_MSK) | (pres & PWM_CMR_CPRE_MSK); atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); atmel_pwm->config(chip, pwm, dty, prd); + mutex_lock(&atmel_pwm->isr_lock); + atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR); + atmel_pwm->updated_pwms &= ~(1 << pwm->hwpwm); + mutex_unlock(&atmel_pwm->isr_lock); clk_disable(atmel_pwm->clk); return ret; @@ -155,24 +165,25 @@ static void atmel_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm, struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); unsigned int val; - if (test_bit(PWMF_ENABLED, &pwm->flags)) { - /* - * If the PWM channel is enabled, using the update register, - * it needs to set bit 10 of CMR to 0 - */ - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CUPD, dty); - val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); - val &= ~PWM_CMR_UPD_CDTY; - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); - } else { - /* - * If the PWM channel is disabled, write value to duty and - * period registers directly. - */ - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CDTY, dty); - atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CPRD, prd); - } + atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CUPD, dty); + + val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); + val &= ~PWM_CMR_UPD_CDTY; + atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); + + /* + * If the PWM channel is enabled, only update CDTY by using the update + * register, it needs to set bit 10 of CMR to 0 + */ + if (test_bit(PWMF_ENABLED, &pwm->flags)) + return; + /* + * If the PWM channel is disabled, write value to duty and period + * registers directly. + */ + atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CDTY, dty); + atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWMV1_CPRD, prd); } static void atmel_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm, @@ -242,7 +253,22 @@ static int atmel_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); + unsigned long timeout = jiffies + 2 * HZ; + + /* + * Wait for at least a complete period to have passed before disabling a + * channel to be sure that CDTY has been updated + */ + mutex_lock(&atmel_pwm->isr_lock); + atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR); + + while (!(atmel_pwm->updated_pwms & (1 << pwm->hwpwm)) && + time_before(jiffies, timeout)) { + usleep_range(10, 100); + atmel_pwm->updated_pwms |= atmel_pwm_readl(atmel_pwm, PWM_ISR); + } + mutex_unlock(&atmel_pwm->isr_lock); atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm); clk_disable(atmel_pwm->clk); @@ -357,6 +383,8 @@ static int atmel_pwm_probe(struct platform_device *pdev) atmel_pwm->chip.npwm = 4; atmel_pwm->chip.can_sleep = true; atmel_pwm->config = data->config; + atmel_pwm->updated_pwms = 0; + mutex_init(&atmel_pwm->isr_lock); ret = pwmchip_add(&atmel_pwm->chip); if (ret < 0) { @@ -378,6 +406,7 @@ static int atmel_pwm_remove(struct platform_device *pdev) struct atmel_pwm_chip *atmel_pwm = platform_get_drvdata(pdev); clk_unprepare(atmel_pwm->clk); + mutex_destroy(&atmel_pwm->isr_lock); return pwmchip_remove(&atmel_pwm->chip); } diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c index 02bc048892a9..7af8fea2dc5b 100644 --- a/drivers/pwm/pwm-bcm-kona.c +++ b/drivers/pwm/pwm-bcm-kona.c @@ -266,18 +266,15 @@ static int kona_pwmc_probe(struct platform_device *pdev) return ret; } - /* Set smooth mode, push/pull, and normal polarity for all channels */ - for (chan = 0; chan < kp->chip.npwm; chan++) { - value |= (1 << PWM_CONTROL_SMOOTH_SHIFT(chan)); + /* Set push/pull for all channels */ + for (chan = 0; chan < kp->chip.npwm; chan++) value |= (1 << PWM_CONTROL_TYPE_SHIFT(chan)); - value |= (1 << PWM_CONTROL_POLARITY_SHIFT(chan)); - } writel(value, kp->base + PWM_CONTROL_OFFSET); clk_disable_unprepare(kp->clk); - ret = pwmchip_add(&kp->chip); + ret = pwmchip_add_with_polarity(&kp->chip, PWM_POLARITY_INVERSED); if (ret < 0) dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret); diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c index cf20d2beacdd..45042c1b2046 100644 --- a/drivers/pwm/pwm-lpss-pci.c +++ b/drivers/pwm/pwm-lpss-pci.c @@ -44,8 +44,10 @@ static void pwm_lpss_remove_pci(struct pci_dev *pdev) } static const struct pci_device_id pwm_lpss_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bsw_info}, { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info}, + { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bsw_info}, { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, { }, diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index ff201e1b9219..ada2d326dc3e 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -456,6 +456,7 @@ static const struct of_device_id samsung_pwm_matches[] = { { .compatible = "samsung,exynos4210-pwm", .data = &s5p64x0_variant }, {}, }; +MODULE_DEVICE_TABLE(of, samsung_pwm_matches); static int pwm_samsung_parse_dt(struct samsung_pwm_chip *chip) { diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 47a1b2ea76c4..d6a126c17c03 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -83,7 +83,7 @@ static u16 rio_destid_alloc(struct rio_net *net) * @destid: destID to reserve * * Tries to reserve the specified destID. - * Returns 0 if successfull. + * Returns 0 if successful. */ static int rio_destid_reserve(struct rio_net *net, u16 destid) { diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 75f4bfc2b98a..b3c6ff49103b 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -297,7 +297,6 @@ static struct scsi_host_template zfcp_scsi_host_template = { * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, /* GCD, adjusted later */ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, - .cmd_per_lun = 1, .use_clustering = 1, .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index b021bcb88537..456e1567841c 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -52,7 +52,7 @@ config SCSI_MQ_DEFAULT This option enables the new blk-mq based I/O path for SCSI devices by default. With the option the scsi_mod.use_blk_mq module/boot option defaults to Y, without it to N, but it can - still be overriden either way. + still be overridden either way. If unsure say N. @@ -503,7 +503,7 @@ config SCSI_DPT_I2O config SCSI_ADVANSYS tristate "AdvanSys SCSI support" - depends on SCSI && VIRT_TO_BUS && !ARM + depends on SCSI depends on ISA || EISA || PCI help This is a driver for all SCSI host adapters manufactured by @@ -634,6 +634,23 @@ config FCOE_FNIC <file:Documentation/scsi/scsi.txt>. The module will be called fnic. +config SCSI_SNIC + tristate "Cisco SNIC Driver" + depends on PCI && SCSI + help + This is support for the Cisco PCI-Express SCSI HBA. + + To compile this driver as a module, choose M here and read + <file:Documentation/scsi/scsi.txt>. + The module will be called snic. + +config SCSI_SNIC_DEBUG_FS + bool "Cisco SNIC Driver Debugfs Support" + depends on SCSI_SNIC && DEBUG_FS + help + This enables to list debugging information from SNIC Driver + available via debugfs file system + config SCSI_DMX3191D tristate "DMX3191D SCSI support" depends on PCI && SCSI @@ -1743,7 +1760,6 @@ config SCSI_BFA_FC config SCSI_VIRTIO tristate "virtio-scsi support" depends on VIRTIO - select BLK_DEV_INTEGRITY help This is the virtual HBA driver for virtio. If the kernel will be used in a virtual machine, say Y or M. diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index dee160a4f163..91209e3d27e3 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_LIBFC) += libfc/ obj-$(CONFIG_LIBFCOE) += fcoe/ obj-$(CONFIG_FCOE) += fcoe/ obj-$(CONFIG_FCOE_FNIC) += fnic/ +obj-$(CONFIG_SCSI_SNIC) += snic/ obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o @@ -161,6 +162,7 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/ obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \ scsicam.o scsi_error.o scsi_lib.o +scsi_mod-y += scsi_common.o scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c index 42c7161474f7..6e110c630d2c 100644 --- a/drivers/scsi/NCR53c406a.c +++ b/drivers/scsi/NCR53c406a.c @@ -1064,7 +1064,6 @@ static struct scsi_host_template driver_template = .can_queue = 1 /* can_queue */, .this_id = 7 /* SCSI ID of the chip */, .sg_tablesize = 32 /*SG_ALL*/ /*SG_NONE*/, - .cmd_per_lun = 1 /* commands per lun */, .unchecked_isa_dma = 1 /* unchecked_isa_dma */, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index 7e33a61c1ba4..cac6b37d7b1b 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c @@ -1078,7 +1078,6 @@ static struct scsi_host_template inia100_template = { .can_queue = 1, .this_id = 1, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 4596e9dd757c..e63cf9f22f36 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -46,7 +46,7 @@ static int aac_src_get_sync_status(struct aac_dev *dev); -irqreturn_t aac_src_intr_message(int irq, void *dev_id) +static irqreturn_t aac_src_intr_message(int irq, void *dev_id) { struct aac_msix_ctx *ctx; struct aac_dev *dev; diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index ae95e347f37d..4305178e4e01 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -1,12 +1,10 @@ -#define DRV_NAME "advansys" -#define ASC_VERSION "3.4" /* AdvanSys Driver Version */ - /* * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-2000 Advanced System Products, Inc. * Copyright (c) 2000-2001 ConnectCom Solutions, Inc. * Copyright (c) 2007 Matthew Wilcox <matthew@wil.cx> + * Copyright (c) 2014 Hannes Reinecke <hare@suse.de> * All Rights Reserved. * * This program is free software; you can redistribute it and/or modify @@ -39,6 +37,7 @@ #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> +#include <linux/dmapool.h> #include <asm/io.h> #include <asm/dma.h> @@ -49,26 +48,15 @@ #include <scsi/scsi.h> #include <scsi/scsi_host.h> +#define DRV_NAME "advansys" +#define ASC_VERSION "3.5" /* AdvanSys Driver Version */ + /* FIXME: * - * 1. Although all of the necessary command mapping places have the - * appropriate dma_map.. APIs, the driver still processes its internal - * queue using bus_to_virt() and virt_to_bus() which are illegal under - * the API. The entire queue processing structure will need to be - * altered to fix this. - * 2. Need to add memory mapping workaround. Test the memory mapping. - * If it doesn't work revert to I/O port access. Can a test be done - * safely? - * 3. Handle an interrupt not working. Keep an interrupt counter in - * the interrupt handler. In the timeout function if the interrupt - * has not occurred then print a message and run in polled mode. - * 4. Need to add support for target mode commands, cf. CAM XPT. - * 5. check DMA mapping functions for failure - * 6. Use scsi_transport_spi - * 7. advansys_info is not safe against multiple simultaneous callers - * 8. Add module_param to override ISA/VLB ioport array + * 1. Use scsi_transport_spi + * 2. advansys_info is not safe against multiple simultaneous callers + * 3. Add module_param to override ISA/VLB ioport array */ -#warning this driver is still not properly converted to the DMA API /* Enable driver /proc statistics. */ #define ADVANSYS_STATS @@ -76,31 +64,8 @@ /* Enable driver tracing. */ #undef ADVANSYS_DEBUG -/* - * Portable Data Types - * - * Any instance where a 32-bit long or pointer type is assumed - * for precision or HW defined structures, the following define - * types must be used. In Linux the char, short, and int types - * are all consistent at 8, 16, and 32 bits respectively. Pointers - * and long types are 64 bits on Alpha and UltraSPARC. - */ -#define ASC_PADDR __u32 /* Physical/Bus address data type. */ -#define ASC_VADDR __u32 /* Virtual address data type. */ -#define ASC_DCNT __u32 /* Unsigned Data count type. */ -#define ASC_SDCNT __s32 /* Signed Data count type. */ - typedef unsigned char uchar; -#ifndef TRUE -#define TRUE (1) -#endif -#ifndef FALSE -#define FALSE (0) -#endif - -#define ERR (-1) -#define UW_ERR (uint)(0xFFFF) #define isodd_word(val) ((((uint)val) & (uint)0x0001) != 0) #define PCI_VENDOR_ID_ASP 0x10cd @@ -111,15 +76,6 @@ typedef unsigned char uchar; #define PCI_DEVICE_ID_38C0800_REV1 0x2500 #define PCI_DEVICE_ID_38C1600_REV1 0x2700 -/* - * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists. - * The SRB structure will have to be changed and the ASC_SRB2SCSIQ() - * macro re-defined to be able to obtain a ASC_SCSI_Q pointer from the - * SRB structure. - */ -#define CC_VERY_LONG_SG_LIST 0 -#define ASC_SRB2SCSIQ(srb_ptr) (srb_ptr) - #define PortAddr unsigned int /* port address size */ #define inp(port) inb(port) #define outp(port, byte) outb((byte), (port)) @@ -307,15 +263,15 @@ typedef struct asc_scsiq_1 { uchar sg_queue_cnt; uchar target_id; uchar target_lun; - ASC_PADDR data_addr; - ASC_DCNT data_cnt; - ASC_PADDR sense_addr; + __le32 data_addr; + __le32 data_cnt; + __le32 sense_addr; uchar sense_len; uchar extra_bytes; } ASC_SCSIQ_1; typedef struct asc_scsiq_2 { - ASC_VADDR srb_ptr; + u32 srb_tag; uchar target_ix; uchar flag; uchar cdb_len; @@ -338,8 +294,8 @@ typedef struct asc_scsiq_4 { uchar y_res; ushort x_req_count; ushort x_reconnect_rtn; - ASC_PADDR x_saved_data_addr; - ASC_DCNT x_saved_data_cnt; + __le32 x_saved_data_addr; + __le32 x_saved_data_cnt; } ASC_SCSIQ_4; typedef struct asc_q_done_info { @@ -351,12 +307,12 @@ typedef struct asc_q_done_info { uchar sense_len; uchar extra_bytes; uchar res; - ASC_DCNT remain_bytes; + u32 remain_bytes; } ASC_QDONE_INFO; typedef struct asc_sg_list { - ASC_PADDR addr; - ASC_DCNT bytes; + __le32 addr; + __le32 bytes; } ASC_SG_LIST; typedef struct asc_sg_head { @@ -376,17 +332,6 @@ typedef struct asc_scsi_q { ushort next_sg_index; } ASC_SCSI_Q; -typedef struct asc_scsi_req_q { - ASC_SCSIQ_1 r1; - ASC_SCSIQ_2 r2; - uchar *cdbptr; - ASC_SG_HEAD *sg_head; - uchar *sense_ptr; - ASC_SCSIQ_3 r3; - uchar cdb[ASC_MAX_CDB_LEN]; - uchar sense[ASC_MIN_SENSE_LEN]; -} ASC_SCSI_REQ_Q; - typedef struct asc_scsi_bios_req_q { ASC_SCSIQ_1 r1; ASC_SCSIQ_2 r2; @@ -570,7 +515,7 @@ typedef struct asc_dvc_var { dma_addr_t overrun_dma; uchar scsi_reset_wait; uchar chip_no; - char is_in_int; + bool is_in_int; uchar max_total_qng; uchar cur_total_qng; uchar in_critical_cnt; @@ -586,15 +531,13 @@ typedef struct asc_dvc_var { char redo_scam; ushort res2; uchar dos_int13_table[ASC_MAX_TID + 1]; - ASC_DCNT max_dma_count; + unsigned int max_dma_count; ASC_SCSI_BIT_ID_TYPE no_scam; ASC_SCSI_BIT_ID_TYPE pci_fix_asyn_xfer; uchar min_sdtr_index; uchar max_sdtr_index; struct asc_board *drv_ptr; - int ptr_map_count; - void **ptr_map; - ASC_DCNT uc_break; + unsigned int uc_break; } ASC_DVC_VAR; typedef struct asc_dvc_inq_info { @@ -602,8 +545,8 @@ typedef struct asc_dvc_inq_info { } ASC_DVC_INQ_INFO; typedef struct asc_cap_info { - ASC_DCNT lba; - ASC_DCNT blk_size; + u32 lba; + u32 blk_size; } ASC_CAP_INFO; typedef struct asc_cap_info_array { @@ -929,31 +872,6 @@ typedef struct asc_mc_saved { #define AscReadChipDvcID(port) (uchar)inp((port)+IOP_REG_ID) #define AscWriteChipDvcID(port, data) outp((port)+IOP_REG_ID, data) -/* - * Portable Data Types - * - * Any instance where a 32-bit long or pointer type is assumed - * for precision or HW defined structures, the following define - * types must be used. In Linux the char, short, and int types - * are all consistent at 8, 16, and 32 bits respectively. Pointers - * and long types are 64 bits on Alpha and UltraSPARC. - */ -#define ADV_PADDR __u32 /* Physical address data type. */ -#define ADV_VADDR __u32 /* Virtual address data type. */ -#define ADV_DCNT __u32 /* Unsigned Data count type. */ -#define ADV_SDCNT __s32 /* Signed Data count type. */ - -/* - * These macros are used to convert a virtual address to a - * 32-bit value. This currently can be used on Linux Alpha - * which uses 64-bit virtual address but a 32-bit bus address. - * This is likely to break in the future, but doing this now - * will give us time to change the HW and FW to handle 64-bit - * addresses. - */ -#define ADV_VADDR_TO_U32 virt_to_bus -#define ADV_U32_TO_VADDR bus_to_virt - #define AdvPortAddr void __iomem * /* Virtual memory address size */ /* @@ -965,8 +883,6 @@ typedef struct asc_mc_saved { #define ADV_MEM_WRITEW(addr, word) writew(word, addr) #define ADV_MEM_WRITEDW(addr, dword) writel(dword, addr) -#define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 15) - /* * Define total number of simultaneous maximum element scatter-gather * request blocks per wide adapter. ASC_DEF_MAX_HOST_QNG (253) is the @@ -1747,44 +1663,37 @@ typedef struct adveep_38C1600_config { * little-endian. */ typedef struct adv_carr_t { - ADV_VADDR carr_va; /* Carrier Virtual Address */ - ADV_PADDR carr_pa; /* Carrier Physical Address */ - ADV_VADDR areq_vpa; /* ASC_SCSI_REQ_Q Virtual or Physical Address */ + __le32 carr_va; /* Carrier Virtual Address */ + __le32 carr_pa; /* Carrier Physical Address */ + __le32 areq_vpa; /* ADV_SCSI_REQ_Q Virtual or Physical Address */ /* * next_vpa [31:4] Carrier Virtual or Physical Next Pointer * * next_vpa [3:1] Reserved Bits * next_vpa [0] Done Flag set in Response Queue. */ - ADV_VADDR next_vpa; + __le32 next_vpa; } ADV_CARR_T; /* * Mask used to eliminate low 4 bits of carrier 'next_vpa' field. */ -#define ASC_NEXT_VPA_MASK 0xFFFFFFF0 - -#define ASC_RQ_DONE 0x00000001 -#define ASC_RQ_GOOD 0x00000002 -#define ASC_CQ_STOPPER 0x00000000 +#define ADV_NEXT_VPA_MASK 0xFFFFFFF0 -#define ASC_GET_CARRP(carrp) ((carrp) & ASC_NEXT_VPA_MASK) +#define ADV_RQ_DONE 0x00000001 +#define ADV_RQ_GOOD 0x00000002 +#define ADV_CQ_STOPPER 0x00000000 -#define ADV_CARRIER_NUM_PAGE_CROSSING \ - (((ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) + (PAGE_SIZE - 1))/PAGE_SIZE) - -#define ADV_CARRIER_BUFSIZE \ - ((ADV_CARRIER_COUNT + ADV_CARRIER_NUM_PAGE_CROSSING) * sizeof(ADV_CARR_T)) +#define ADV_GET_CARRP(carrp) ((carrp) & ADV_NEXT_VPA_MASK) /* - * ASC_SCSI_REQ_Q 'a_flag' definitions - * - * The Adv Library should limit use to the lower nibble (4 bits) of - * a_flag. Drivers are free to use the upper nibble (4 bits) of a_flag. + * Each carrier is 64 bytes, and we need three additional + * carrier for icq, irq, and the termination carrier. */ -#define ADV_POLL_REQUEST 0x01 /* poll for request completion */ -#define ADV_SCSIQ_DONE 0x02 /* request done */ -#define ADV_DONT_RETRY 0x08 /* don't do retry */ +#define ADV_CARRIER_COUNT (ASC_DEF_MAX_HOST_QNG + 3) + +#define ADV_CARRIER_BUFSIZE \ + (ADV_CARRIER_COUNT * sizeof(ADV_CARR_T)) #define ADV_CHIP_ASC3550 0x01 /* Ultra-Wide IC */ #define ADV_CHIP_ASC38C0800 0x02 /* Ultra2-Wide/LVD IC */ @@ -1816,15 +1725,15 @@ typedef struct adv_dvc_cfg { struct adv_dvc_var; struct adv_scsi_req_q; -typedef struct asc_sg_block { +typedef struct adv_sg_block { uchar reserved1; uchar reserved2; uchar reserved3; uchar sg_cnt; /* Valid entries in block. */ - ADV_PADDR sg_ptr; /* Pointer to next sg block. */ + __le32 sg_ptr; /* Pointer to next sg block. */ struct { - ADV_PADDR sg_addr; /* SG element address. */ - ADV_DCNT sg_count; /* SG element count. */ + __le32 sg_addr; /* SG element address. */ + __le32 sg_count; /* SG element count. */ } sg_list[NO_OF_SG_PER_BLOCK]; } ADV_SG_BLOCK; @@ -1844,10 +1753,10 @@ typedef struct adv_scsi_req_q { uchar target_cmd; uchar target_id; /* Device target identifier. */ uchar target_lun; /* Device target logical unit number. */ - ADV_PADDR data_addr; /* Data buffer physical address. */ - ADV_DCNT data_cnt; /* Data count. Ucode sets to residual. */ - ADV_PADDR sense_addr; - ADV_PADDR carr_pa; + __le32 data_addr; /* Data buffer physical address. */ + __le32 data_cnt; /* Data count. Ucode sets to residual. */ + __le32 sense_addr; + __le32 carr_pa; uchar mflag; uchar sense_len; uchar cdb_len; /* SCSI CDB length. Must <= 16 bytes. */ @@ -1857,29 +1766,26 @@ typedef struct adv_scsi_req_q { uchar host_status; /* Ucode host status. */ uchar sg_working_ix; uchar cdb[12]; /* SCSI CDB bytes 0-11. */ - ADV_PADDR sg_real_addr; /* SG list physical address. */ - ADV_PADDR scsiq_rptr; + __le32 sg_real_addr; /* SG list physical address. */ + __le32 scsiq_rptr; uchar cdb16[4]; /* SCSI CDB bytes 12-15. */ - ADV_VADDR scsiq_ptr; - ADV_VADDR carr_va; + __le32 scsiq_ptr; + __le32 carr_va; /* * End of microcode structure - 60 bytes. The rest of the structure * is used by the Adv Library and ignored by the microcode. */ - ADV_VADDR srb_ptr; + u32 srb_tag; ADV_SG_BLOCK *sg_list_ptr; /* SG list virtual address. */ - char *vdata_addr; /* Data buffer virtual address. */ - uchar a_flag; - uchar pad[2]; /* Pad out to a word boundary. */ } ADV_SCSI_REQ_Q; /* * The following two structures are used to process Wide Board requests. * * The ADV_SCSI_REQ_Q structure in adv_req_t is passed to the Adv Library - * and microcode with the ADV_SCSI_REQ_Q field 'srb_ptr' pointing to the - * adv_req_t. The adv_req_t structure 'cmndp' field in turn points to the - * Mid-Level SCSI request structure. + * and microcode with the ADV_SCSI_REQ_Q field 'srb_tag' set to the + * SCSI request tag. The adv_req_t structure 'cmndp' field in turn points + * to the Mid-Level SCSI request structure. * * Zero or more ADV_SG_BLOCK are used with each ADV_SCSI_REQ_Q. Each * ADV_SG_BLOCK structure holds 15 scatter-gather elements. Under Linux @@ -1890,17 +1796,17 @@ typedef struct adv_scsi_req_q { */ typedef struct adv_sgblk { ADV_SG_BLOCK sg_block; /* Sgblock structure. */ - uchar align[32]; /* Sgblock structure padding. */ + dma_addr_t sg_addr; /* Physical address */ struct adv_sgblk *next_sgblkp; /* Next scatter-gather structure. */ } adv_sgblk_t; typedef struct adv_req { ADV_SCSI_REQ_Q scsi_req_q; /* Adv Library request structure. */ - uchar align[32]; /* Request structure padding. */ + uchar align[24]; /* Request structure padding. */ struct scsi_cmnd *cmndp; /* Mid-Level SCSI command pointer. */ + dma_addr_t req_addr; adv_sgblk_t *sgblkp; /* Adv Library scatter-gather pointer. */ - struct adv_req *next_reqp; /* Next Request Structure. */ -} adv_req_t; +} adv_req_t __aligned(32); /* * Adapter operation variable structure. @@ -1937,12 +1843,12 @@ typedef struct adv_dvc_var { uchar chip_scsi_id; /* chip SCSI target ID */ uchar chip_type; uchar bist_err_code; - ADV_CARR_T *carrier_buf; + ADV_CARR_T *carrier; ADV_CARR_T *carr_freelist; /* Carrier free list. */ + dma_addr_t carrier_addr; ADV_CARR_T *icq_sp; /* Initiator command queue stopper pointer. */ ADV_CARR_T *irq_sp; /* Initiator response queue stopper pointer. */ ushort carr_pending_cnt; /* Count of pending carriers. */ - struct adv_req *orig_reqp; /* adv_req_t memory block. */ /* * Note: The following fields will not be used after initialization. The * driver may discard the buffer after initialization is done. @@ -2068,8 +1974,8 @@ do { \ AdvReadByteRegister((iop_base), IOPB_CHIP_TYPE_REV) /* - * Abort an SRB in the chip's RISC Memory. The 'srb_ptr' argument must - * match the ASC_SCSI_REQ_Q 'srb_ptr' field. + * Abort an SRB in the chip's RISC Memory. The 'srb_tag' argument must + * match the ADV_SCSI_REQ_Q 'srb_tag' field. * * If the request has not yet been sent to the device it will simply be * aborted from RISC memory. If the request is disconnected it will be @@ -2079,9 +1985,9 @@ do { \ * ADV_TRUE(1) - Queue was successfully aborted. * ADV_FALSE(0) - Queue was not found on the active queue list. */ -#define AdvAbortQueue(asc_dvc, scsiq) \ - AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \ - (ADV_DCNT) (scsiq)) +#define AdvAbortQueue(asc_dvc, srb_tag) \ + AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_ABORT, \ + (ADV_DCNT) (srb_tag)) /* * Send a Bus Device Reset Message to the specified target ID. @@ -2095,8 +2001,8 @@ do { \ * are not purged. */ #define AdvResetDevice(asc_dvc, target_id) \ - AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \ - (ADV_DCNT) (target_id)) + AdvSendIdleCmd((asc_dvc), (ushort) IDLE_CMD_DEVICE_RESET, \ + (ADV_DCNT) (target_id)) /* * SCSI Wide Type definition. @@ -2115,7 +2021,7 @@ do { \ #define ADV_TID_TO_TIDMASK(tid) (0x01 << ((tid) & ADV_MAX_TID)) /* - * ASC_SCSI_REQ_Q 'done_status' and 'host_status' return values. + * ADV_SCSI_REQ_Q 'done_status' and 'host_status' return values. */ #define QD_NO_STATUS 0x00 /* Request not completed yet. */ @@ -2153,8 +2059,6 @@ do { \ #define QHSTA_M_SGBACKUP_ERROR 0x47 /* Scatter-Gather backup error */ /* Return the address that is aligned at the next doubleword >= to 'addr'. */ -#define ADV_8BALIGN(addr) (((ulong) (addr) + 0x7) & ~0x7) -#define ADV_16BALIGN(addr) (((ulong) (addr) + 0xF) & ~0xF) #define ADV_32BALIGN(addr) (((ulong) (addr) + 0x1F) & ~0x1F) /* @@ -2315,24 +2219,24 @@ do { \ /* Per board statistics structure */ struct asc_stats { /* Driver Entrypoint Statistics */ - ADV_DCNT queuecommand; /* # calls to advansys_queuecommand() */ - ADV_DCNT reset; /* # calls to advansys_eh_bus_reset() */ - ADV_DCNT biosparam; /* # calls to advansys_biosparam() */ - ADV_DCNT interrupt; /* # advansys_interrupt() calls */ - ADV_DCNT callback; /* # calls to asc/adv_isr_callback() */ - ADV_DCNT done; /* # calls to request's scsi_done function */ - ADV_DCNT build_error; /* # asc/adv_build_req() ASC_ERROR returns. */ - ADV_DCNT adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */ - ADV_DCNT adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */ + unsigned int queuecommand; /* # calls to advansys_queuecommand() */ + unsigned int reset; /* # calls to advansys_eh_bus_reset() */ + unsigned int biosparam; /* # calls to advansys_biosparam() */ + unsigned int interrupt; /* # advansys_interrupt() calls */ + unsigned int callback; /* # calls to asc/adv_isr_callback() */ + unsigned int done; /* # calls to request's scsi_done function */ + unsigned int build_error; /* # asc/adv_build_req() ASC_ERROR returns. */ + unsigned int adv_build_noreq; /* # adv_build_req() adv_req_t alloc. fail. */ + unsigned int adv_build_nosg; /* # adv_build_req() adv_sgblk_t alloc. fail. */ /* AscExeScsiQueue()/AdvExeScsiQueue() Statistics */ - ADV_DCNT exe_noerror; /* # ASC_NOERROR returns. */ - ADV_DCNT exe_busy; /* # ASC_BUSY returns. */ - ADV_DCNT exe_error; /* # ASC_ERROR returns. */ - ADV_DCNT exe_unknown; /* # unknown returns. */ + unsigned int exe_noerror; /* # ASC_NOERROR returns. */ + unsigned int exe_busy; /* # ASC_BUSY returns. */ + unsigned int exe_error; /* # ASC_ERROR returns. */ + unsigned int exe_unknown; /* # unknown returns. */ /* Data Transfer Statistics */ - ADV_DCNT xfer_cnt; /* # I/O requests received */ - ADV_DCNT xfer_elem; /* # scatter-gather elements */ - ADV_DCNT xfer_sect; /* # 512-byte blocks */ + unsigned int xfer_cnt; /* # I/O requests received */ + unsigned int xfer_elem; /* # scatter-gather elements */ + unsigned int xfer_sect; /* # 512-byte blocks */ }; #endif /* ADVANSYS_STATS */ @@ -2345,6 +2249,7 @@ struct asc_stats { */ struct asc_board { struct device *dev; + struct Scsi_Host *shost; uint flags; /* Board flags */ unsigned int irq; union { @@ -2366,7 +2271,6 @@ struct asc_board { ADVEEP_38C0800_CONFIG adv_38C0800_eep; /* 38C0800 EEPROM config. */ ADVEEP_38C1600_CONFIG adv_38C1600_eep; /* 38C1600 EEPROM config. */ } eep_config; - ulong last_reset; /* Saved last reset time */ /* /proc/scsi/advansys/[0...] */ #ifdef ADVANSYS_STATS struct asc_stats asc_stats; /* Board statistics */ @@ -2381,7 +2285,9 @@ struct asc_board { void __iomem *ioremap_addr; /* I/O Memory remap address. */ ushort ioport; /* I/O Port address. */ adv_req_t *adv_reqp; /* Request structures. */ - adv_sgblk_t *adv_sgblkp; /* Scatter-gather structures. */ + dma_addr_t adv_reqp_addr; + size_t adv_reqp_size; + struct dma_pool *adv_sgblk_pool; /* Scatter-gather structures. */ ushort bios_signature; /* BIOS Signature. */ ushort bios_version; /* BIOS Version. */ ushort bios_codeseg; /* BIOS Code Segment. */ @@ -2470,12 +2376,11 @@ static void asc_prt_adv_dvc_var(ADV_DVC_VAR *h) printk(" start_motor 0x%x, scsi_reset_wait 0x%x\n", (unsigned)h->start_motor, (unsigned)h->scsi_reset_wait); - printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%lxn\n", + printk(" max_host_qng %u, max_dvc_qng %u, carr_freelist 0x%p\n", (unsigned)h->max_host_qng, (unsigned)h->max_dvc_qng, - (ulong)h->carr_freelist); + h->carr_freelist); - printk(" icq_sp 0x%lx, irq_sp 0x%lx\n", - (ulong)h->icq_sp, (ulong)h->irq_sp); + printk(" icq_sp 0x%p, irq_sp 0x%p\n", h->icq_sp, h->irq_sp); printk(" no_scam 0x%x, tagqng_able 0x%x\n", (unsigned)h->no_scam, (unsigned)h->tagqng_able); @@ -2600,8 +2505,8 @@ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q) printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q); printk - (" target_ix 0x%x, target_lun %u, srb_ptr 0x%lx, tag_code 0x%x,\n", - q->q2.target_ix, q->q1.target_lun, (ulong)q->q2.srb_ptr, + (" target_ix 0x%x, target_lun %u, srb_tag 0x%x, tag_code 0x%x,\n", + q->q2.target_ix, q->q1.target_lun, q->q2.srb_tag, q->q2.tag_code); printk @@ -2634,8 +2539,8 @@ static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q) static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q) { printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q); - printk(" srb_ptr 0x%lx, target_ix %u, cdb_len %u, tag_code %u,\n", - (ulong)q->d2.srb_ptr, q->d2.target_ix, q->d2.cdb_len, + printk(" srb_tag 0x%x, target_ix %u, cdb_len %u, tag_code %u,\n", + q->d2.srb_tag, q->d2.target_ix, q->d2.cdb_len, q->d2.tag_code); printk (" done_stat 0x%x, host_stat 0x%x, scsi_stat 0x%x, scsi_msg 0x%x\n", @@ -2651,17 +2556,17 @@ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b) { int i; - printk(" ASC_SG_BLOCK at addr 0x%lx (sgblockno %d)\n", + printk(" ADV_SG_BLOCK at addr 0x%lx (sgblockno %d)\n", (ulong)b, sgblockno); - printk(" sg_cnt %u, sg_ptr 0x%lx\n", - b->sg_cnt, (ulong)le32_to_cpu(b->sg_ptr)); + printk(" sg_cnt %u, sg_ptr 0x%x\n", + b->sg_cnt, (u32)le32_to_cpu(b->sg_ptr)); BUG_ON(b->sg_cnt > NO_OF_SG_PER_BLOCK); if (b->sg_ptr != 0) BUG_ON(b->sg_cnt != NO_OF_SG_PER_BLOCK); for (i = 0; i < b->sg_cnt; i++) { - printk(" [%u]: sg_addr 0x%lx, sg_count 0x%lx\n", - i, (ulong)b->sg_list[i].sg_addr, - (ulong)b->sg_list[i].sg_count); + printk(" [%u]: sg_addr 0x%x, sg_count 0x%x\n", + i, (u32)le32_to_cpu(b->sg_list[i].sg_addr), + (u32)le32_to_cpu(b->sg_list[i].sg_count)); } } @@ -2673,15 +2578,16 @@ static void asc_prt_adv_sgblock(int sgblockno, ADV_SG_BLOCK *b) static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) { int sg_blk_cnt; - struct asc_sg_block *sg_ptr; + struct adv_sg_block *sg_ptr; + adv_sgblk_t *sgblkp; printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q); - printk(" target_id %u, target_lun %u, srb_ptr 0x%lx, a_flag 0x%x\n", - q->target_id, q->target_lun, (ulong)q->srb_ptr, q->a_flag); + printk(" target_id %u, target_lun %u, srb_tag 0x%x\n", + q->target_id, q->target_lun, q->srb_tag); - printk(" cntl 0x%x, data_addr 0x%lx, vdata_addr 0x%lx\n", - q->cntl, (ulong)le32_to_cpu(q->data_addr), (ulong)q->vdata_addr); + printk(" cntl 0x%x, data_addr 0x%lx\n", + q->cntl, (ulong)le32_to_cpu(q->data_addr)); printk(" data_cnt %lu, sense_addr 0x%lx, sense_len %u,\n", (ulong)le32_to_cpu(q->data_cnt), @@ -2700,21 +2606,15 @@ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) /* Display the request's ADV_SG_BLOCK structures. */ if (q->sg_list_ptr != NULL) { + sgblkp = container_of(q->sg_list_ptr, adv_sgblk_t, sg_block); sg_blk_cnt = 0; - while (1) { - /* - * 'sg_ptr' is a physical address. Convert it to a virtual - * address by indexing 'sg_blk_cnt' into the virtual address - * array 'sg_list_ptr'. - * - * XXX - Assumes all SG physical blocks are virtually contiguous. - */ - sg_ptr = - &(((ADV_SG_BLOCK *)(q->sg_list_ptr))[sg_blk_cnt]); + while (sgblkp) { + sg_ptr = &sgblkp->sg_block; asc_prt_adv_sgblock(sg_blk_cnt, sg_ptr); if (sg_ptr->sg_ptr == 0) { break; } + sgblkp = sgblkp->next_sgblkp; sg_blk_cnt++; } } @@ -2722,59 +2622,6 @@ static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) #endif /* ADVANSYS_DEBUG */ /* - * The advansys chip/microcode contains a 32-bit identifier for each command - * known as the 'srb'. I don't know what it stands for. The driver used - * to encode the scsi_cmnd pointer by calling virt_to_bus and retrieve it - * with bus_to_virt. Now the driver keeps a per-host map of integers to - * pointers. It auto-expands when full, unless it can't allocate memory. - * Note that an srb of 0 is treated specially by the chip/firmware, hence - * the return of i+1 in this routine, and the corresponding subtraction in - * the inverse routine. - */ -#define BAD_SRB 0 -static u32 advansys_ptr_to_srb(struct asc_dvc_var *asc_dvc, void *ptr) -{ - int i; - void **new_ptr; - - for (i = 0; i < asc_dvc->ptr_map_count; i++) { - if (!asc_dvc->ptr_map[i]) - goto out; - } - - if (asc_dvc->ptr_map_count == 0) - asc_dvc->ptr_map_count = 1; - else - asc_dvc->ptr_map_count *= 2; - - new_ptr = krealloc(asc_dvc->ptr_map, - asc_dvc->ptr_map_count * sizeof(void *), GFP_ATOMIC); - if (!new_ptr) - return BAD_SRB; - asc_dvc->ptr_map = new_ptr; - out: - ASC_DBG(3, "Putting ptr %p into array offset %d\n", ptr, i); - asc_dvc->ptr_map[i] = ptr; - return i + 1; -} - -static void * advansys_srb_to_ptr(struct asc_dvc_var *asc_dvc, u32 srb) -{ - void *ptr; - - srb--; - if (srb >= asc_dvc->ptr_map_count) { - printk("advansys: bad SRB %u, max %u\n", srb, - asc_dvc->ptr_map_count); - return NULL; - } - ptr = asc_dvc->ptr_map[srb]; - asc_dvc->ptr_map[srb] = NULL; - ASC_DBG(3, "Returning ptr %p from array offset %d\n", ptr, srb); - return ptr; -} - -/* * advansys_info() * * Return suitable for printing on the console with the argument @@ -3350,7 +3197,7 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost) seq_printf(m, " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n", - boardp->flags, boardp->last_reset, jiffies, + boardp->flags, shost->last_reset, jiffies, boardp->asc_n_io_port); seq_printf(m, " io_port 0x%lx\n", shost->io_port); @@ -3844,7 +3691,7 @@ static int AscStartChip(PortAddr iop_base) return (1); } -static int AscStopChip(PortAddr iop_base) +static bool AscStopChip(PortAddr iop_base) { uchar cc_val; @@ -3855,22 +3702,22 @@ static int AscStopChip(PortAddr iop_base) AscSetChipIH(iop_base, INS_HALT); AscSetChipIH(iop_base, INS_RFLAG_WTM); if ((AscGetChipStatus(iop_base) & CSW_HALTED) == 0) { - return (0); + return false; } - return (1); + return true; } -static int AscIsChipHalted(PortAddr iop_base) +static bool AscIsChipHalted(PortAddr iop_base) { if ((AscGetChipStatus(iop_base) & CSW_HALTED) != 0) { if ((AscGetChipControl(iop_base) & CC_HALT) != 0) { - return (1); + return true; } } - return (0); + return false; } -static int AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc) +static bool AscResetChipAndScsiBus(ASC_DVC_VAR *asc_dvc) { PortAddr iop_base; int i = 10; @@ -3953,20 +3800,6 @@ static ushort AscReadLramWord(PortAddr iop_base, ushort addr) return (word_data); } -#if CC_VERY_LONG_SG_LIST -static ASC_DCNT AscReadLramDWord(PortAddr iop_base, ushort addr) -{ - ushort val_low, val_high; - ASC_DCNT dword_data; - - AscSetChipLramAddr(iop_base, addr); - val_low = AscGetChipLramData(iop_base); - val_high = AscGetChipLramData(iop_base); - dword_data = ((ASC_DCNT) val_high << 16) | (ASC_DCNT) val_low; - return (dword_data); -} -#endif /* CC_VERY_LONG_SG_LIST */ - static void AscMemWordSetLram(PortAddr iop_base, ushort s_addr, ushort set_wval, int words) { @@ -4068,27 +3901,24 @@ AscMemWordCopyPtrFromLram(PortAddr iop_base, } } -static ASC_DCNT AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words) +static u32 AscMemSumLramWord(PortAddr iop_base, ushort s_addr, int words) { - ASC_DCNT sum; + u32 sum = 0; int i; - sum = 0L; for (i = 0; i < words; i++, s_addr += 2) { sum += AscReadLramWord(iop_base, s_addr); } return (sum); } -static ushort AscInitLram(ASC_DVC_VAR *asc_dvc) +static void AscInitLram(ASC_DVC_VAR *asc_dvc) { uchar i; ushort s_addr; PortAddr iop_base; - ushort warn_code; iop_base = asc_dvc->iop_base; - warn_code = 0; AscMemWordSetLram(iop_base, ASC_QADR_BEG, 0, (ushort)(((int)(asc_dvc->max_total_qng + 2 + 1) * 64) >> 1)); @@ -4127,14 +3957,13 @@ static ushort AscInitLram(ASC_DVC_VAR *asc_dvc) AscWriteLramByte(iop_base, (ushort)(s_addr + (ushort)ASC_SCSIQ_B_QNO), i); } - return warn_code; } -static ASC_DCNT +static u32 AscLoadMicroCode(PortAddr iop_base, ushort s_addr, const uchar *mcode_buf, ushort mcode_size) { - ASC_DCNT chksum; + u32 chksum; ushort mcode_word_size; ushort mcode_chksum; @@ -4186,13 +4015,13 @@ static void AscInitQLinkVar(ASC_DVC_VAR *asc_dvc) } } -static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) +static int AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) { int i; - ushort warn_code; + int warn_code; PortAddr iop_base; - ASC_PADDR phy_addr; - ASC_DCNT phy_size; + __le32 phy_addr; + __le32 phy_size; struct asc_board *board = asc_dvc_to_board(asc_dvc); iop_base = asc_dvc->iop_base; @@ -4231,12 +4060,12 @@ static ushort AscInitMicroCodeVar(ASC_DVC_VAR *asc_dvc) AscSetPCAddr(iop_base, ASC_MCODE_START_ADDR); if (AscGetPCAddr(iop_base) != ASC_MCODE_START_ADDR) { asc_dvc->err_code |= ASC_IERR_SET_PC_ADDR; - warn_code = UW_ERR; + warn_code = -EINVAL; goto err_mcode_start; } if (AscStartChip(iop_base) != 1) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; - warn_code = UW_ERR; + warn_code = -EIO; goto err_mcode_start; } @@ -4250,13 +4079,13 @@ err_dma_map: return warn_code; } -static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) +static int AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) { const struct firmware *fw; const char fwname[] = "advansys/mcode.bin"; int err; unsigned long chksum; - ushort warn_code; + int warn_code; PortAddr iop_base; iop_base = asc_dvc->iop_base; @@ -4268,15 +4097,13 @@ static ushort AscInitAsc1000Driver(ASC_DVC_VAR *asc_dvc) } asc_dvc->init_state |= ASC_INIT_STATE_BEG_LOAD_MC; if (asc_dvc->err_code != 0) - return UW_ERR; + return ASC_ERROR; if (!AscFindSignature(asc_dvc->iop_base)) { asc_dvc->err_code = ASC_IERR_BAD_SIGNATURE; return warn_code; } AscDisableInterrupt(iop_base); - warn_code |= AscInitLram(asc_dvc); - if (asc_dvc->err_code != 0) - return UW_ERR; + AscInitLram(asc_dvc); err = request_firmware(&fw, fwname, asc_dvc->drv_ptr->dev); if (err) { @@ -4336,7 +4163,7 @@ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf, int size, int memsize, int chksum) { int i, j, end, len = 0; - ADV_DCNT sum; + u32 sum; AdvWriteWordRegister(iop_base, IOPW_RAM_ADDR, 0); @@ -4382,38 +4209,72 @@ static int AdvLoadMicrocode(AdvPortAddr iop_base, const unsigned char *buf, return 0; } -static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc) +static void AdvBuildCarrierFreelist(struct adv_dvc_var *adv_dvc) { - ADV_CARR_T *carrp; - ADV_SDCNT buf_size; - ADV_PADDR carr_paddr; + off_t carr_offset = 0, next_offset; + dma_addr_t carr_paddr; + int carr_num = ADV_CARRIER_BUFSIZE / sizeof(ADV_CARR_T), i; - carrp = (ADV_CARR_T *) ADV_16BALIGN(asc_dvc->carrier_buf); - asc_dvc->carr_freelist = NULL; - if (carrp == asc_dvc->carrier_buf) { - buf_size = ADV_CARRIER_BUFSIZE; - } else { - buf_size = ADV_CARRIER_BUFSIZE - sizeof(ADV_CARR_T); + for (i = 0; i < carr_num; i++) { + carr_offset = i * sizeof(ADV_CARR_T); + /* Get physical address of the carrier 'carrp'. */ + carr_paddr = adv_dvc->carrier_addr + carr_offset; + + adv_dvc->carrier[i].carr_pa = cpu_to_le32(carr_paddr); + adv_dvc->carrier[i].carr_va = cpu_to_le32(carr_offset); + adv_dvc->carrier[i].areq_vpa = 0; + next_offset = carr_offset + sizeof(ADV_CARR_T); + if (i == carr_num) + next_offset = ~0; + adv_dvc->carrier[i].next_vpa = cpu_to_le32(next_offset); } + /* + * We cannot have a carrier with 'carr_va' of '0', as + * a reference to this carrier would be interpreted as + * list termination. + * So start at carrier 1 with the freelist. + */ + adv_dvc->carr_freelist = &adv_dvc->carrier[1]; +} - do { - /* Get physical address of the carrier 'carrp'. */ - carr_paddr = cpu_to_le32(virt_to_bus(carrp)); +static ADV_CARR_T *adv_get_carrier(struct adv_dvc_var *adv_dvc, u32 offset) +{ + int index; - buf_size -= sizeof(ADV_CARR_T); + BUG_ON(offset > ADV_CARRIER_BUFSIZE); - carrp->carr_pa = carr_paddr; - carrp->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(carrp)); + index = offset / sizeof(ADV_CARR_T); + return &adv_dvc->carrier[index]; +} - /* - * Insert the carrier at the beginning of the freelist. - */ - carrp->next_vpa = - cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist)); - asc_dvc->carr_freelist = carrp; +static ADV_CARR_T *adv_get_next_carrier(struct adv_dvc_var *adv_dvc) +{ + ADV_CARR_T *carrp = adv_dvc->carr_freelist; + u32 next_vpa = le32_to_cpu(carrp->next_vpa); + + if (next_vpa == 0 || next_vpa == ~0) { + ASC_DBG(1, "invalid vpa offset 0x%x\n", next_vpa); + return NULL; + } + + adv_dvc->carr_freelist = adv_get_carrier(adv_dvc, next_vpa); + /* + * insert stopper carrier to terminate list + */ + carrp->next_vpa = cpu_to_le32(ADV_CQ_STOPPER); + + return carrp; +} + +/* + * 'offset' is the index in the request pointer array + */ +static adv_req_t * adv_get_reqp(struct adv_dvc_var *adv_dvc, u32 offset) +{ + struct asc_board *boardp = adv_dvc->drv_ptr; - carrp++; - } while (buf_size > 0); + BUG_ON(offset > adv_dvc->max_host_qng); + return &boardp->adv_reqp[offset]; } /* @@ -4432,10 +4293,9 @@ static void AdvBuildCarrierFreelist(struct adv_dvc_var *asc_dvc) */ static int AdvSendIdleCmd(ADV_DVC_VAR *asc_dvc, - ushort idle_cmd, ADV_DCNT idle_cmd_parameter) + ushort idle_cmd, u32 idle_cmd_parameter) { - int result; - ADV_DCNT i, j; + int result, i, j; AdvPortAddr iop_base; iop_base = asc_dvc->iop_base; @@ -4902,17 +4762,11 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc) * Set-up the Host->RISC Initiator Command Queue (ICQ). */ - if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { + asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->icq_sp) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } - asc_dvc->carr_freelist = (ADV_CARR_T *) - ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); - - /* - * The first command issued will be placed in the stopper carrier. - */ - asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. @@ -4922,21 +4776,11 @@ static int AdvInitAsc3550Driver(ADV_DVC_VAR *asc_dvc) /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ - if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { + asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->irq_sp) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } - asc_dvc->carr_freelist = (ADV_CARR_T *) - ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); - - /* - * The first command completed by the RISC will be placed in - * the stopper. - * - * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is - * completed the RISC will set the ASC_RQ_STOPPER bit. - */ - asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. @@ -5399,17 +5243,12 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc) * Set-up the Host->RISC Initiator Command Queue (ICQ). */ - if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { + asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->icq_sp) { + ASC_DBG(0, "Failed to get ICQ carrier\n"); asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } - asc_dvc->carr_freelist = (ADV_CARR_T *) - ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); - - /* - * The first command issued will be placed in the stopper carrier. - */ - asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. @@ -5420,21 +5259,12 @@ static int AdvInitAsc38C0800Driver(ADV_DVC_VAR *asc_dvc) /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ - if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { + asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->irq_sp) { + ASC_DBG(0, "Failed to get IRQ carrier\n"); asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } - asc_dvc->carr_freelist = (ADV_CARR_T *) - ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); - - /* - * The first command completed by the RISC will be placed in - * the stopper. - * - * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is - * completed the RISC will set the ASC_RQ_STOPPER bit. - */ - asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. @@ -5909,17 +5739,11 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc) /* * Set-up the Host->RISC Initiator Command Queue (ICQ). */ - if ((asc_dvc->icq_sp = asc_dvc->carr_freelist) == NULL) { + asc_dvc->icq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->icq_sp) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } - asc_dvc->carr_freelist = (ADV_CARR_T *) - ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->icq_sp->next_vpa)); - - /* - * The first command issued will be placed in the stopper carrier. - */ - asc_dvc->icq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC ICQ physical address start value. Initialize the @@ -5933,21 +5757,11 @@ static int AdvInitAsc38C1600Driver(ADV_DVC_VAR *asc_dvc) /* * Set-up the RISC->Host Initiator Response Queue (IRQ). */ - if ((asc_dvc->irq_sp = asc_dvc->carr_freelist) == NULL) { + asc_dvc->irq_sp = adv_get_next_carrier(asc_dvc); + if (!asc_dvc->irq_sp) { asc_dvc->err_code |= ASC_IERR_NO_CARRIER; return ADV_ERROR; } - asc_dvc->carr_freelist = (ADV_CARR_T *) - ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->next_vpa)); - - /* - * The first command completed by the RISC will be placed in - * the stopper. - * - * Note: Set 'next_vpa' to ASC_CQ_STOPPER. When the request is - * completed the RISC will set the ASC_RQ_STOPPER bit. - */ - asc_dvc->irq_sp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); /* * Set RISC IRQ physical address start value. @@ -6134,15 +5948,16 @@ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code) */ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) { - struct asc_board *boardp; + struct asc_board *boardp = adv_dvc_varp->drv_ptr; + u32 srb_tag; adv_req_t *reqp; adv_sgblk_t *sgblkp; struct scsi_cmnd *scp; - struct Scsi_Host *shost; - ADV_DCNT resid_cnt; + u32 resid_cnt; + dma_addr_t sense_addr; - ASC_DBG(1, "adv_dvc_varp 0x%lx, scsiqp 0x%lx\n", - (ulong)adv_dvc_varp, (ulong)scsiqp); + ASC_DBG(1, "adv_dvc_varp 0x%p, scsiqp 0x%p\n", + adv_dvc_varp, scsiqp); ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); /* @@ -6150,22 +5965,9 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) * completed. The adv_req_t structure actually contains the * completed ADV_SCSI_REQ_Q structure. */ - reqp = (adv_req_t *)ADV_U32_TO_VADDR(scsiqp->srb_ptr); - ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp); - if (reqp == NULL) { - ASC_PRINT("adv_isr_callback: reqp is NULL\n"); - return; - } + srb_tag = le32_to_cpu(scsiqp->srb_tag); + scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag); - /* - * Get the struct scsi_cmnd structure and Scsi_Host structure for the - * command that has been completed. - * - * Note: The adv_req_t request structure and adv_sgblk_t structure, - * if any, are dropped, because a board structure pointer can not be - * determined. - */ - scp = reqp->cmndp; ASC_DBG(1, "scp 0x%p\n", scp); if (scp == NULL) { ASC_PRINT @@ -6174,12 +5976,25 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) } ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); - shost = scp->device->host; - ASC_STATS(shost, callback); - ASC_DBG(1, "shost 0x%p\n", shost); + reqp = (adv_req_t *)scp->host_scribble; + ASC_DBG(1, "reqp 0x%lx\n", (ulong)reqp); + if (reqp == NULL) { + ASC_PRINT("adv_isr_callback: reqp is NULL\n"); + return; + } + /* + * Remove backreferences to avoid duplicate + * command completions. + */ + scp->host_scribble = NULL; + reqp->cmndp = NULL; + + ASC_STATS(boardp->shost, callback); + ASC_DBG(1, "shost 0x%p\n", boardp->shost); - boardp = shost_priv(shost); - BUG_ON(adv_dvc_varp != &boardp->dvc_var.adv_dvc_var); + sense_addr = le32_to_cpu(scsiqp->sense_addr); + dma_unmap_single(boardp->dev, sense_addr, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); /* * 'done_status' contains the command's ending status. @@ -6272,18 +6087,10 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp) /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; - /* Add 'sgblkp' to the board free list. */ - sgblkp->next_sgblkp = boardp->adv_sgblkp; - boardp->adv_sgblkp = sgblkp; + dma_pool_free(boardp->adv_sgblk_pool, sgblkp, + sgblkp->sg_addr); } - /* - * Free the adv_req_t structure used with the command by adding - * it back to the board free list. - */ - reqp->next_reqp = boardp->adv_reqp; - boardp->adv_reqp = reqp; - ASC_DBG(1, "done\n"); } @@ -6312,8 +6119,9 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc) uchar int_stat; ushort target_bit; ADV_CARR_T *free_carrp; - ADV_VADDR irq_next_vpa; + __le32 irq_next_vpa; ADV_SCSI_REQ_Q *scsiq; + adv_req_t *reqp; iop_base = asc_dvc->iop_base; @@ -6356,25 +6164,28 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc) * Check if the IRQ stopper carrier contains a completed request. */ while (((irq_next_vpa = - le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ASC_RQ_DONE) != 0) { + le32_to_cpu(asc_dvc->irq_sp->next_vpa)) & ADV_RQ_DONE) != 0) { /* * Get a pointer to the newly completed ADV_SCSI_REQ_Q structure. * The RISC will have set 'areq_vpa' to a virtual address. * - * The firmware will have copied the ASC_SCSI_REQ_Q.scsiq_ptr + * The firmware will have copied the ADV_SCSI_REQ_Q.scsiq_ptr * field to the carrier ADV_CARR_T.areq_vpa field. The conversion - * below complements the conversion of ASC_SCSI_REQ_Q.scsiq_ptr' + * below complements the conversion of ADV_SCSI_REQ_Q.scsiq_ptr' * in AdvExeScsiQueue(). */ - scsiq = (ADV_SCSI_REQ_Q *) - ADV_U32_TO_VADDR(le32_to_cpu(asc_dvc->irq_sp->areq_vpa)); + u32 pa_offset = le32_to_cpu(asc_dvc->irq_sp->areq_vpa); + ASC_DBG(1, "irq_sp %p areq_vpa %u\n", + asc_dvc->irq_sp, pa_offset); + reqp = adv_get_reqp(asc_dvc, pa_offset); + scsiq = &reqp->scsi_req_q; /* * Request finished with good status and the queue was not * DMAed to host memory by the firmware. Set all status fields * to indicate good status. */ - if ((irq_next_vpa & ASC_RQ_GOOD) != 0) { + if ((irq_next_vpa & ADV_RQ_GOOD) != 0) { scsiq->done_status = QD_NO_ERROR; scsiq->host_status = scsiq->scsi_status = 0; scsiq->data_cnt = 0L; @@ -6386,11 +6197,10 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc) * stopper carrier. */ free_carrp = asc_dvc->irq_sp; - asc_dvc->irq_sp = (ADV_CARR_T *) - ADV_U32_TO_VADDR(ASC_GET_CARRP(irq_next_vpa)); + asc_dvc->irq_sp = adv_get_carrier(asc_dvc, + ADV_GET_CARRP(irq_next_vpa)); - free_carrp->next_vpa = - cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->carr_freelist)); + free_carrp->next_vpa = asc_dvc->carr_freelist->carr_va; asc_dvc->carr_freelist = free_carrp; asc_dvc->carr_pending_cnt--; @@ -6405,7 +6215,6 @@ static int AdvISR(ADV_DVC_VAR *asc_dvc) * Notify the driver of the completed request by passing * the ADV_SCSI_REQ_Q pointer to its callback function. */ - scsiq->a_flag |= ADV_SCSIQ_DONE; adv_isr_callback(asc_dvc, scsiq); /* * Note: After the driver callback function is called, 'scsiq' @@ -6521,11 +6330,11 @@ AscCalSDTRData(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar syn_offset) return byte; } -static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data) +static bool AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data) { ASC_SCSI_BIT_ID_TYPE org_id; int i; - int sta = TRUE; + bool sta = true; AscSetBank(iop_base, 1); org_id = AscReadChipDvcID(iop_base); @@ -6539,10 +6348,10 @@ static int AscSetChipSynRegAtID(PortAddr iop_base, uchar id, uchar sdtr_data) AscSetBank(iop_base, 0); AscSetChipSyn(iop_base, sdtr_data); if (AscGetChipSyn(iop_base) != sdtr_data) { - sta = FALSE; + sta = false; } } else { - sta = FALSE; + sta = false; } AscSetBank(iop_base, 1); AscWriteChipDvcID(iop_base, org_id); @@ -6556,12 +6365,12 @@ static void AscSetChipSDTR(PortAddr iop_base, uchar sdtr_data, uchar tid_no) AscPutMCodeSDTRDoneAtID(iop_base, tid_no, sdtr_data); } -static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) +static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) { EXT_MSG ext_msg; EXT_MSG out_msg; ushort halt_q_addr; - int sdtr_accept; + bool sdtr_accept; ushort int_halt_code; ASC_SCSI_BIT_ID_TYPE scsi_busy; ASC_SCSI_BIT_ID_TYPE target_id; @@ -6603,14 +6412,14 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) boardp->sdtr_data[tid_no] = 0; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); - return (0); + return; } else if (int_halt_code == ASC_HALT_ENABLE_ASYN_USE_SYN_FIX) { if (asc_dvc->pci_fix_asyn_xfer & target_id) { AscSetChipSDTR(iop_base, asyn_sdtr, tid_no); boardp->sdtr_data[tid_no] = asyn_sdtr; } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); - return (0); + return; } else if (int_halt_code == ASC_HALT_EXTMSG_IN) { AscMemWordCopyPtrFromLram(iop_base, ASCV_MSGIN_BEG, @@ -6620,10 +6429,10 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_SDTR && ext_msg.msg_len == MS_SDTR_LEN) { - sdtr_accept = TRUE; + sdtr_accept = true; if ((ext_msg.req_ack_offset > ASC_SYN_MAX_OFFSET)) { - sdtr_accept = FALSE; + sdtr_accept = false; ext_msg.req_ack_offset = ASC_SYN_MAX_OFFSET; } if ((ext_msg.xfer_period < @@ -6631,7 +6440,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) || (ext_msg.xfer_period > asc_dvc->sdtr_period_tbl[asc_dvc-> max_sdtr_index])) { - sdtr_accept = FALSE; + sdtr_accept = false; ext_msg.xfer_period = asc_dvc->sdtr_period_tbl[asc_dvc-> min_sdtr_index]; @@ -6696,7 +6505,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); - return (0); + return; } else if (ext_msg.msg_type == EXTENDED_MESSAGE && ext_msg.msg_req == EXTENDED_WDTR && ext_msg.msg_len == MS_WDTR_LEN) { @@ -6712,7 +6521,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); - return (0); + return; } else { ext_msg.msg_type = MESSAGE_REJECT; @@ -6726,7 +6535,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); - return (0); + return; } } else if (int_halt_code == ASC_HALT_CHK_CONDITION) { @@ -6783,7 +6592,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) AscWriteLramByte(iop_base, (ushort)ASCV_SCSIBUSY_B, scsi_busy); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); - return (0); + return; } else if (int_halt_code == ASC_HALT_SDTR_REJECTED) { AscMemWordCopyPtrFromLram(iop_base, @@ -6805,7 +6614,7 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) (ushort)(halt_q_addr + (ushort)ASC_SCSIQ_B_CNTL), q_cntl); AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); - return (0); + return; } else if (int_halt_code == ASC_HALT_SS_QUEUE_FULL) { scsi_status = AscReadLramByte(iop_base, @@ -6850,166 +6659,9 @@ static int AscIsrChipHalted(ASC_DVC_VAR *asc_dvc) } } AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); - return (0); - } -#if CC_VERY_LONG_SG_LIST - else if (int_halt_code == ASC_HALT_HOST_COPY_SG_LIST_TO_RISC) { - uchar q_no; - ushort q_addr; - uchar sg_wk_q_no; - uchar first_sg_wk_q_no; - ASC_SCSI_Q *scsiq; /* Ptr to driver request. */ - ASC_SG_HEAD *sg_head; /* Ptr to driver SG request. */ - ASC_SG_LIST_Q scsi_sg_q; /* Structure written to queue. */ - ushort sg_list_dwords; - ushort sg_entry_cnt; - uchar next_qp; - int i; - - q_no = AscReadLramByte(iop_base, (ushort)ASCV_REQ_SG_LIST_QP); - if (q_no == ASC_QLINK_END) - return 0; - - q_addr = ASC_QNO_TO_QADDR(q_no); - - /* - * Convert the request's SRB pointer to a host ASC_SCSI_REQ - * structure pointer using a macro provided by the driver. - * The ASC_SCSI_REQ pointer provides a pointer to the - * host ASC_SG_HEAD structure. - */ - /* Read request's SRB pointer. */ - scsiq = (ASC_SCSI_Q *) - ASC_SRB2SCSIQ(ASC_U32_TO_VADDR(AscReadLramDWord(iop_base, - (ushort) - (q_addr + - ASC_SCSIQ_D_SRBPTR)))); - - /* - * Get request's first and working SG queue. - */ - sg_wk_q_no = AscReadLramByte(iop_base, - (ushort)(q_addr + - ASC_SCSIQ_B_SG_WK_QP)); - - first_sg_wk_q_no = AscReadLramByte(iop_base, - (ushort)(q_addr + - ASC_SCSIQ_B_FIRST_SG_WK_QP)); - - /* - * Reset request's working SG queue back to the - * first SG queue. - */ - AscWriteLramByte(iop_base, - (ushort)(q_addr + - (ushort)ASC_SCSIQ_B_SG_WK_QP), - first_sg_wk_q_no); - - sg_head = scsiq->sg_head; - - /* - * Set sg_entry_cnt to the number of SG elements - * that will be completed on this interrupt. - * - * Note: The allocated SG queues contain ASC_MAX_SG_LIST - 1 - * SG elements. The data_cnt and data_addr fields which - * add 1 to the SG element capacity are not used when - * restarting SG handling after a halt. - */ - if (scsiq->remain_sg_entry_cnt > (ASC_MAX_SG_LIST - 1)) { - sg_entry_cnt = ASC_MAX_SG_LIST - 1; - - /* - * Keep track of remaining number of SG elements that - * will need to be handled on the next interrupt. - */ - scsiq->remain_sg_entry_cnt -= (ASC_MAX_SG_LIST - 1); - } else { - sg_entry_cnt = scsiq->remain_sg_entry_cnt; - scsiq->remain_sg_entry_cnt = 0; - } - - /* - * Copy SG elements into the list of allocated SG queues. - * - * Last index completed is saved in scsiq->next_sg_index. - */ - next_qp = first_sg_wk_q_no; - q_addr = ASC_QNO_TO_QADDR(next_qp); - scsi_sg_q.sg_head_qp = q_no; - scsi_sg_q.cntl = QCSG_SG_XFER_LIST; - for (i = 0; i < sg_head->queue_cnt; i++) { - scsi_sg_q.seq_no = i + 1; - if (sg_entry_cnt > ASC_SG_LIST_PER_Q) { - sg_list_dwords = (uchar)(ASC_SG_LIST_PER_Q * 2); - sg_entry_cnt -= ASC_SG_LIST_PER_Q; - /* - * After very first SG queue RISC FW uses next - * SG queue first element then checks sg_list_cnt - * against zero and then decrements, so set - * sg_list_cnt 1 less than number of SG elements - * in each SG queue. - */ - scsi_sg_q.sg_list_cnt = ASC_SG_LIST_PER_Q - 1; - scsi_sg_q.sg_cur_list_cnt = - ASC_SG_LIST_PER_Q - 1; - } else { - /* - * This is the last SG queue in the list of - * allocated SG queues. If there are more - * SG elements than will fit in the allocated - * queues, then set the QCSG_SG_XFER_MORE flag. - */ - if (scsiq->remain_sg_entry_cnt != 0) { - scsi_sg_q.cntl |= QCSG_SG_XFER_MORE; - } else { - scsi_sg_q.cntl |= QCSG_SG_XFER_END; - } - /* equals sg_entry_cnt * 2 */ - sg_list_dwords = sg_entry_cnt << 1; - scsi_sg_q.sg_list_cnt = sg_entry_cnt - 1; - scsi_sg_q.sg_cur_list_cnt = sg_entry_cnt - 1; - sg_entry_cnt = 0; - } - - scsi_sg_q.q_no = next_qp; - AscMemWordCopyPtrToLram(iop_base, - q_addr + ASC_SCSIQ_SGHD_CPY_BEG, - (uchar *)&scsi_sg_q, - sizeof(ASC_SG_LIST_Q) >> 1); - - AscMemDWordCopyPtrToLram(iop_base, - q_addr + ASC_SGQ_LIST_BEG, - (uchar *)&sg_head-> - sg_list[scsiq->next_sg_index], - sg_list_dwords); - - scsiq->next_sg_index += ASC_SG_LIST_PER_Q; - - /* - * If the just completed SG queue contained the - * last SG element, then no more SG queues need - * to be written. - */ - if (scsi_sg_q.cntl & QCSG_SG_XFER_END) { - break; - } - - next_qp = AscReadLramByte(iop_base, - (ushort)(q_addr + - ASC_SCSIQ_B_FWD)); - q_addr = ASC_QNO_TO_QADDR(next_qp); - } - - /* - * Clear the halt condition so the RISC will be restarted - * after the return. - */ - AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0); - return (0); + return; } -#endif /* CC_VERY_LONG_SG_LIST */ - return (0); + return; } /* @@ -7043,7 +6695,7 @@ DvcGetQinfo(PortAddr iop_base, ushort s_addr, uchar *inbuf, int words) static uchar _AscCopyLramScsiDoneQ(PortAddr iop_base, ushort q_addr, - ASC_QDONE_INFO *scsiq, ASC_DCNT max_dma_count) + ASC_QDONE_INFO *scsiq, unsigned int max_dma_count) { ushort _val; uchar sg_queue_cnt; @@ -7070,10 +6722,10 @@ _AscCopyLramScsiDoneQ(PortAddr iop_base, /* * Read high word of remain bytes from alternate location. */ - scsiq->remain_bytes = (((ADV_DCNT)AscReadLramWord(iop_base, - (ushort)(q_addr + - (ushort) - ASC_SCSIQ_W_ALT_DC1))) + scsiq->remain_bytes = (((u32)AscReadLramWord(iop_base, + (ushort)(q_addr + + (ushort) + ASC_SCSIQ_W_ALT_DC1))) << 16); /* * Read low word of remain bytes from original location. @@ -7093,25 +6745,24 @@ _AscCopyLramScsiDoneQ(PortAddr iop_base, */ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep) { - struct asc_board *boardp; + struct asc_board *boardp = asc_dvc_varp->drv_ptr; + u32 srb_tag; struct scsi_cmnd *scp; - struct Scsi_Host *shost; ASC_DBG(1, "asc_dvc_varp 0x%p, qdonep 0x%p\n", asc_dvc_varp, qdonep); ASC_DBG_PRT_ASC_QDONE_INFO(2, qdonep); - scp = advansys_srb_to_ptr(asc_dvc_varp, qdonep->d2.srb_ptr); + /* + * Decrease the srb_tag by 1 to find the SCSI command + */ + srb_tag = qdonep->d2.srb_tag - 1; + scp = scsi_host_find_tag(boardp->shost, srb_tag); if (!scp) return; ASC_DBG_PRT_CDB(2, scp->cmnd, scp->cmd_len); - shost = scp->device->host; - ASC_STATS(shost, callback); - ASC_DBG(1, "shost 0x%p\n", shost); - - boardp = shost_priv(shost); - BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var); + ASC_STATS(boardp->shost, callback); dma_unmap_single(boardp->dev, scp->SCp.dma_handle, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); @@ -7220,7 +6871,7 @@ static int AscIsrQDone(ASC_DVC_VAR *asc_dvc) uchar cur_target_qng; ASC_QDONE_INFO scsiq_buf; ASC_QDONE_INFO *scsiq; - int false_overrun; + bool false_overrun; iop_base = asc_dvc->iop_base; n_q_used = 1; @@ -7294,14 +6945,17 @@ static int AscIsrQDone(ASC_DVC_VAR *asc_dvc) scsiq->d3.done_stat = QD_WITH_ERROR; goto FATAL_ERR_QDONE; } - if ((scsiq->d2.srb_ptr == 0UL) || + if ((scsiq->d2.srb_tag == 0UL) || ((scsiq->q_status & QS_ABORTED) != 0)) { return (0x11); } else if (scsiq->q_status == QS_DONE) { - false_overrun = FALSE; + /* + * This is also curious. + * false_overrun will _always_ be set to 'false' + */ + false_overrun = false; if (scsiq->extra_bytes != 0) { - scsiq->remain_bytes += - (ADV_DCNT)scsiq->extra_bytes; + scsiq->remain_bytes += scsiq->extra_bytes; } if (scsiq->d3.done_stat == QD_WITH_ERROR) { if (scsiq->d3.host_stat == @@ -7372,23 +7026,23 @@ static int AscISR(ASC_DVC_VAR *asc_dvc) uchar host_flag; iop_base = asc_dvc->iop_base; - int_pending = FALSE; + int_pending = ASC_FALSE; if (AscIsIntPending(iop_base) == 0) return int_pending; if ((asc_dvc->init_state & ASC_INIT_STATE_END_LOAD_MC) == 0) { - return ERR; + return ASC_ERROR; } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_ON_CRITICAL); - return ERR; + return ASC_ERROR; } if (asc_dvc->is_in_int) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_ISR_RE_ENTRY); - return ERR; + return ASC_ERROR; } - asc_dvc->is_in_int = TRUE; + asc_dvc->is_in_int = true; ctrl_reg = AscGetChipControl(iop_base); saved_ctrl_reg = ctrl_reg & (~(CC_SCSI_RESET | CC_CHIP_RESET | CC_SINGLE_STEP | CC_DIAG | CC_TEST)); @@ -7396,7 +7050,7 @@ static int AscISR(ASC_DVC_VAR *asc_dvc) if (chipstat & CSW_SCSI_RESET_LATCH) { if (!(asc_dvc->bus_type & (ASC_IS_VL | ASC_IS_EISA))) { int i = 10; - int_pending = TRUE; + int_pending = ASC_TRUE; asc_dvc->sdtr_done = 0; saved_ctrl_reg &= (uchar)(~CC_HALT); while ((AscGetChipStatus(iop_base) & @@ -7418,15 +7072,11 @@ static int AscISR(ASC_DVC_VAR *asc_dvc) (uchar)(host_flag | (uchar)ASC_HOST_FLAG_IN_ISR)); if ((chipstat & CSW_INT_PENDING) || (int_pending)) { AscAckInterrupt(iop_base); - int_pending = TRUE; + int_pending = ASC_TRUE; if ((chipstat & CSW_HALTED) && (ctrl_reg & CC_SINGLE_STEP)) { - if (AscIsrChipHalted(asc_dvc) == ERR) { - goto ISR_REPORT_QDONE_FATAL_ERROR; - } else { - saved_ctrl_reg &= (uchar)(~CC_HALT); - } + AscIsrChipHalted(asc_dvc); + saved_ctrl_reg &= (uchar)(~CC_HALT); } else { - ISR_REPORT_QDONE_FATAL_ERROR: if ((asc_dvc->dvc_cntl & ASC_CNTL_INT_MULTI_Q) != 0) { while (((status = AscIsrQDone(asc_dvc)) & 0x01) != 0) { @@ -7440,20 +7090,20 @@ static int AscISR(ASC_DVC_VAR *asc_dvc) } while (status == 0x11); } if ((status & 0x80) != 0) - int_pending = ERR; + int_pending = ASC_ERROR; } } AscWriteLramByte(iop_base, ASCV_HOST_FLAG_B, host_flag); AscSetChipLramAddr(iop_base, saved_ram_addr); AscSetChipControl(iop_base, saved_ctrl_reg); - asc_dvc->is_in_int = FALSE; + asc_dvc->is_in_int = false; return int_pending; } /* * advansys_reset() * - * Reset the bus associated with the command 'scp'. + * Reset the host associated with the command 'scp'. * * This function runs its own thread. Interrupts must be blocked but * sleeping is allowed and no locking other than for host structures is @@ -7471,7 +7121,7 @@ static int advansys_reset(struct scsi_cmnd *scp) ASC_STATS(shost, reset); - scmd_printk(KERN_INFO, scp, "SCSI bus reset started...\n"); + scmd_printk(KERN_INFO, scp, "SCSI host reset started...\n"); if (ASC_NARROW_BOARD(boardp)) { ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; @@ -7482,20 +7132,19 @@ static int advansys_reset(struct scsi_cmnd *scp) /* Refer to ASC_IERR_* definitions for meaning of 'err_code'. */ if (asc_dvc->err_code || !asc_dvc->overrun_dma) { - scmd_printk(KERN_INFO, scp, "SCSI bus reset error: " + scmd_printk(KERN_INFO, scp, "SCSI host reset error: " "0x%x, status: 0x%x\n", asc_dvc->err_code, status); ret = FAILED; } else if (status) { - scmd_printk(KERN_INFO, scp, "SCSI bus reset warning: " + scmd_printk(KERN_INFO, scp, "SCSI host reset warning: " "0x%x\n", status); } else { - scmd_printk(KERN_INFO, scp, "SCSI bus reset " + scmd_printk(KERN_INFO, scp, "SCSI host reset " "successful\n"); } ASC_DBG(1, "after AscInitAsc1000Driver()\n"); - spin_lock_irqsave(shost->host_lock, flags); } else { /* * If the suggest reset bus flags are set, then reset the bus. @@ -7504,28 +7153,25 @@ static int advansys_reset(struct scsi_cmnd *scp) ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; /* - * Reset the target's SCSI bus. + * Reset the chip and SCSI bus. */ ASC_DBG(1, "before AdvResetChipAndSB()\n"); switch (AdvResetChipAndSB(adv_dvc)) { case ASC_TRUE: - scmd_printk(KERN_INFO, scp, "SCSI bus reset " + scmd_printk(KERN_INFO, scp, "SCSI host reset " "successful\n"); break; case ASC_FALSE: default: - scmd_printk(KERN_INFO, scp, "SCSI bus reset error\n"); + scmd_printk(KERN_INFO, scp, "SCSI host reset error\n"); ret = FAILED; break; } spin_lock_irqsave(shost->host_lock, flags); AdvISR(adv_dvc); + spin_unlock_irqrestore(shost->host_lock, flags); } - /* Save the time of the most recently completed reset. */ - boardp->last_reset = jiffies; - spin_unlock_irqrestore(shost->host_lock, flags); - ASC_DBG(1, "ret %d\n", ret); return ret; @@ -7584,9 +7230,10 @@ static irqreturn_t advansys_interrupt(int irq, void *dev_id) struct Scsi_Host *shost = dev_id; struct asc_board *boardp = shost_priv(shost); irqreturn_t result = IRQ_NONE; + unsigned long flags; ASC_DBG(2, "boardp 0x%p\n", boardp); - spin_lock(shost->host_lock); + spin_lock_irqsave(shost->host_lock, flags); if (ASC_NARROW_BOARD(boardp)) { if (AscIsIntPending(shost->io_port)) { result = IRQ_HANDLED; @@ -7601,38 +7248,38 @@ static irqreturn_t advansys_interrupt(int irq, void *dev_id) ASC_STATS(shost, interrupt); } } - spin_unlock(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, flags); ASC_DBG(1, "end\n"); return result; } -static int AscHostReqRiscHalt(PortAddr iop_base) +static bool AscHostReqRiscHalt(PortAddr iop_base) { int count = 0; - int sta = 0; + bool sta = false; uchar saved_stop_code; if (AscIsChipHalted(iop_base)) - return (1); + return true; saved_stop_code = AscReadLramByte(iop_base, ASCV_STOP_CODE_B); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, ASC_STOP_HOST_REQ_RISC_HALT | ASC_STOP_REQ_RISC_STOP); do { if (AscIsChipHalted(iop_base)) { - sta = 1; + sta = true; break; } mdelay(100); } while (count++ < 20); AscWriteLramByte(iop_base, ASCV_STOP_CODE_B, saved_stop_code); - return (sta); + return sta; } -static int +static bool AscSetRunChipSynRegAtID(PortAddr iop_base, uchar tid_no, uchar sdtr_data) { - int sta = FALSE; + bool sta = false; if (AscHostReqRiscHalt(iop_base)) { sta = AscSetChipSynRegAtID(iop_base, tid_no, sdtr_data); @@ -7851,13 +7498,17 @@ static int advansys_slave_configure(struct scsi_device *sdev) return 0; } -static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp) +static __le32 asc_get_sense_buffer_dma(struct scsi_cmnd *scp) { struct asc_board *board = shost_priv(scp->device->host); + scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer, - SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); - dma_cache_sync(board->dev, scp->sense_buffer, - SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + SCSI_SENSE_BUFFERSIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(board->dev, scp->SCp.dma_handle)) { + ASC_DBG(1, "failed to map sense buffer\n"); + return 0; + } return cpu_to_le32(scp->SCp.dma_handle); } @@ -7866,17 +7517,16 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, { struct asc_dvc_var *asc_dvc = &boardp->dvc_var.asc_dvc_var; int use_sg; + u32 srb_tag; memset(asc_scsi_q, 0, sizeof(*asc_scsi_q)); /* - * Point the ASC_SCSI_Q to the 'struct scsi_cmnd'. + * Set the srb_tag to the command tag + 1, as + * srb_tag '0' is used internally by the chip. */ - asc_scsi_q->q2.srb_ptr = advansys_ptr_to_srb(asc_dvc, scp); - if (asc_scsi_q->q2.srb_ptr == BAD_SRB) { - scp->result = HOST_BYTE(DID_SOFT_ERROR); - return ASC_ERROR; - } + srb_tag = scp->request->tag + 1; + asc_scsi_q->q2.srb_tag = srb_tag; /* * Build the ASC_SCSI_Q request. @@ -7887,8 +7537,10 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, asc_scsi_q->q1.target_lun = scp->device->lun; asc_scsi_q->q2.target_ix = ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun); - asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp); + asc_scsi_q->q1.sense_addr = asc_get_sense_buffer_dma(scp); asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE; + if (!asc_scsi_q->q1.sense_addr) + return ASC_BUSY; /* * If there are any outstanding requests for the current target, @@ -7910,7 +7562,10 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, /* Build ASC_SCSI_Q */ use_sg = scsi_dma_map(scp); - if (use_sg != 0) { + if (use_sg < 0) { + ASC_DBG(1, "failed to map sglist\n"); + return ASC_BUSY; + } else if (use_sg > 0) { int sgcnt; struct scatterlist *slp; struct asc_sg_head *asc_sg_head; @@ -7975,20 +7630,19 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, * ADV_ERROR(-1) - SG List creation failed */ static int -adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, - int use_sg) +adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, + ADV_SCSI_REQ_Q *scsiqp, struct scsi_cmnd *scp, int use_sg) { - adv_sgblk_t *sgblkp; - ADV_SCSI_REQ_Q *scsiqp; + adv_sgblk_t *sgblkp, *prev_sgblkp; struct scatterlist *slp; int sg_elem_cnt; ADV_SG_BLOCK *sg_block, *prev_sg_block; - ADV_PADDR sg_block_paddr; + dma_addr_t sgblk_paddr; int i; - scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q); slp = scsi_sglist(scp); sg_elem_cnt = use_sg; + prev_sgblkp = NULL; prev_sg_block = NULL; reqp->sgblkp = NULL; @@ -7998,7 +7652,9 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, * list. One 'adv_sgblk_t' structure holds NO_OF_SG_PER_BLOCK * (15) scatter-gather elements. */ - if ((sgblkp = boardp->adv_sgblkp) == NULL) { + sgblkp = dma_pool_alloc(boardp->adv_sgblk_pool, GFP_ATOMIC, + &sgblk_paddr); + if (!sgblkp) { ASC_DBG(1, "no free adv_sgblk_t\n"); ASC_STATS(scp->device->host, adv_build_nosg); @@ -8009,24 +7665,16 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, while ((sgblkp = reqp->sgblkp) != NULL) { /* Remove 'sgblkp' from the request list. */ reqp->sgblkp = sgblkp->next_sgblkp; - - /* Add 'sgblkp' to the board free list. */ - sgblkp->next_sgblkp = boardp->adv_sgblkp; - boardp->adv_sgblkp = sgblkp; + sgblkp->next_sgblkp = NULL; + dma_pool_free(boardp->adv_sgblk_pool, sgblkp, + sgblkp->sg_addr); } return ASC_BUSY; } - /* Complete 'adv_sgblk_t' board allocation. */ - boardp->adv_sgblkp = sgblkp->next_sgblkp; + sgblkp->sg_addr = sgblk_paddr; sgblkp->next_sgblkp = NULL; - - /* - * Get 8 byte aligned virtual and physical addresses - * for the allocated ADV_SG_BLOCK structure. - */ - sg_block = (ADV_SG_BLOCK *)ADV_8BALIGN(&sgblkp->sg_block); - sg_block_paddr = virt_to_bus(sg_block); + sg_block = &sgblkp->sg_block; /* * Check if this is the first 'adv_sgblk_t' for the @@ -8041,17 +7689,16 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, * address pointers. */ scsiqp->sg_list_ptr = sg_block; - scsiqp->sg_real_addr = cpu_to_le32(sg_block_paddr); + scsiqp->sg_real_addr = cpu_to_le32(sgblk_paddr); } else { /* Request's second or later scatter-gather block. */ - sgblkp->next_sgblkp = reqp->sgblkp; - reqp->sgblkp = sgblkp; + prev_sgblkp->next_sgblkp = sgblkp; /* * Point the previous ADV_SG_BLOCK structure to * the newly allocated ADV_SG_BLOCK structure. */ - prev_sg_block->sg_ptr = cpu_to_le32(sg_block_paddr); + prev_sg_block->sg_ptr = cpu_to_le32(sgblk_paddr); } for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) { @@ -8062,15 +7709,19 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, ASC_STATS_ADD(scp->device->host, xfer_sect, DIV_ROUND_UP(sg_dma_len(slp), 512)); - if (--sg_elem_cnt == 0) { /* Last ADV_SG_BLOCK and scatter-gather entry. */ + if (--sg_elem_cnt == 0) { + /* + * Last ADV_SG_BLOCK and scatter-gather entry. + */ sg_block->sg_cnt = i + 1; - sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */ + sg_block->sg_ptr = 0L; /* Last ADV_SG_BLOCK in list. */ return ADV_SUCCESS; } slp++; } sg_block->sg_cnt = NO_OF_SG_PER_BLOCK; prev_sg_block = sg_block; + prev_sgblkp = sgblkp; } } @@ -8080,38 +7731,35 @@ adv_get_sglist(struct asc_board *boardp, adv_req_t *reqp, struct scsi_cmnd *scp, * If an adv_req_t can not be allocated to issue the request, * then return ASC_BUSY. If an error occurs, then return ASC_ERROR. * - * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the + * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the * microcode for DMA addresses or math operations are byte swapped * to little-endian order. */ static int adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, - ADV_SCSI_REQ_Q **adv_scsiqpp) + adv_req_t **adv_reqpp) { + u32 srb_tag = scp->request->tag; adv_req_t *reqp; ADV_SCSI_REQ_Q *scsiqp; - int i; int ret; int use_sg; + dma_addr_t sense_addr; /* * Allocate an adv_req_t structure from the board to execute * the command. */ - if (boardp->adv_reqp == NULL) { + reqp = &boardp->adv_reqp[srb_tag]; + if (reqp->cmndp && reqp->cmndp != scp ) { ASC_DBG(1, "no free adv_req_t\n"); ASC_STATS(scp->device->host, adv_build_noreq); return ASC_BUSY; - } else { - reqp = boardp->adv_reqp; - boardp->adv_reqp = reqp->next_reqp; - reqp->next_reqp = NULL; } - /* - * Get 32-byte aligned ADV_SCSI_REQ_Q and ADV_SG_BLOCK pointers. - */ - scsiqp = (ADV_SCSI_REQ_Q *)ADV_32BALIGN(&reqp->scsi_req_q); + reqp->req_addr = boardp->adv_reqp_addr + (srb_tag * sizeof(adv_req_t)); + + scsiqp = &reqp->scsi_req_q; /* * Initialize the structure. @@ -8119,14 +7767,15 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, scsiqp->cntl = scsiqp->scsi_cntl = scsiqp->done_status = 0; /* - * Set the ADV_SCSI_REQ_Q 'srb_ptr' to point to the adv_req_t structure. + * Set the srb_tag to the command tag. */ - scsiqp->srb_ptr = ADV_VADDR_TO_U32(reqp); + scsiqp->srb_tag = srb_tag; /* - * Set the adv_req_t 'cmndp' to point to the struct scsi_cmnd structure. + * Set 'host_scribble' to point to the adv_req_t structure. */ reqp->cmndp = scp; + scp->host_scribble = (void *)reqp; /* * Build the ADV_SCSI_REQ_Q request. @@ -8135,28 +7784,38 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, /* Set CDB length and copy it to the request structure. */ scsiqp->cdb_len = scp->cmd_len; /* Copy first 12 CDB bytes to cdb[]. */ - for (i = 0; i < scp->cmd_len && i < 12; i++) { - scsiqp->cdb[i] = scp->cmnd[i]; - } + memcpy(scsiqp->cdb, scp->cmnd, scp->cmd_len < 12 ? scp->cmd_len : 12); /* Copy last 4 CDB bytes, if present, to cdb16[]. */ - for (; i < scp->cmd_len; i++) { - scsiqp->cdb16[i - 12] = scp->cmnd[i]; + if (scp->cmd_len > 12) { + int cdb16_len = scp->cmd_len - 12; + + memcpy(scsiqp->cdb16, &scp->cmnd[12], cdb16_len); } scsiqp->target_id = scp->device->id; scsiqp->target_lun = scp->device->lun; - scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0])); - scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE; + sense_addr = dma_map_single(boardp->dev, scp->sense_buffer, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(boardp->dev, sense_addr)) { + ASC_DBG(1, "failed to map sense buffer\n"); + ASC_STATS(scp->device->host, adv_build_noreq); + return ASC_BUSY; + } + scsiqp->sense_addr = cpu_to_le32(sense_addr); + scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); /* Build ADV_SCSI_REQ_Q */ use_sg = scsi_dma_map(scp); - if (use_sg == 0) { + if (use_sg < 0) { + ASC_DBG(1, "failed to map SG list\n"); + ASC_STATS(scp->device->host, adv_build_noreq); + return ASC_BUSY; + } else if (use_sg == 0) { /* Zero-length transfer */ reqp->sgblkp = NULL; scsiqp->data_cnt = 0; - scsiqp->vdata_addr = NULL; scsiqp->data_addr = 0; scsiqp->sg_list_ptr = NULL; @@ -8168,27 +7827,20 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, scp->device->host->sg_tablesize); scsi_dma_unmap(scp); scp->result = HOST_BYTE(DID_ERROR); - - /* - * Free the 'adv_req_t' structure by adding it back - * to the board free list. - */ - reqp->next_reqp = boardp->adv_reqp; - boardp->adv_reqp = reqp; + reqp->cmndp = NULL; + scp->host_scribble = NULL; return ASC_ERROR; } scsiqp->data_cnt = cpu_to_le32(scsi_bufflen(scp)); - ret = adv_get_sglist(boardp, reqp, scp, use_sg); + ret = adv_get_sglist(boardp, reqp, scsiqp, scp, use_sg); if (ret != ADV_SUCCESS) { - /* - * Free the adv_req_t structure by adding it back to - * the board free list. - */ - reqp->next_reqp = boardp->adv_reqp; - boardp->adv_reqp = reqp; + scsi_dma_unmap(scp); + scp->result = HOST_BYTE(DID_ERROR); + reqp->cmndp = NULL; + scp->host_scribble = NULL; return ret; } @@ -8201,7 +7853,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, ASC_DBG_PRT_ADV_SCSI_REQ_Q(2, scsiqp); ASC_DBG_PRT_CDB(1, scp->cmnd, scp->cmd_len); - *adv_scsiqpp = scsiqp; + *adv_reqpp = reqp; return ASC_NOERROR; } @@ -8358,8 +8010,8 @@ AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) int i; ASC_SG_HEAD *sg_head; ASC_SG_LIST_Q scsi_sg_q; - ASC_DCNT saved_data_addr; - ASC_DCNT saved_data_cnt; + __le32 saved_data_addr; + __le32 saved_data_cnt; PortAddr iop_base; ushort sg_list_dwords; ushort sg_index; @@ -8371,42 +8023,15 @@ AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) sg_head = scsiq->sg_head; saved_data_addr = scsiq->q1.data_addr; saved_data_cnt = scsiq->q1.data_cnt; - scsiq->q1.data_addr = (ASC_PADDR) sg_head->sg_list[0].addr; - scsiq->q1.data_cnt = (ASC_DCNT) sg_head->sg_list[0].bytes; -#if CC_VERY_LONG_SG_LIST + scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr); + scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes); /* - * If sg_head->entry_cnt is greater than ASC_MAX_SG_LIST - * then not all SG elements will fit in the allocated queues. - * The rest of the SG elements will be copied when the RISC - * completes the SG elements that fit and halts. + * Set sg_entry_cnt to be the number of SG elements that + * will fit in the allocated SG queues. It is minus 1, because + * the first SG element is handled above. */ - if (sg_head->entry_cnt > ASC_MAX_SG_LIST) { - /* - * Set sg_entry_cnt to be the number of SG elements that - * will fit in the allocated SG queues. It is minus 1, because - * the first SG element is handled above. ASC_MAX_SG_LIST is - * already inflated by 1 to account for this. For example it - * may be 50 which is 1 + 7 queues * 7 SG elements. - */ - sg_entry_cnt = ASC_MAX_SG_LIST - 1; + sg_entry_cnt = sg_head->entry_cnt - 1; - /* - * Keep track of remaining number of SG elements that will - * need to be handled from a_isr.c. - */ - scsiq->remain_sg_entry_cnt = - sg_head->entry_cnt - ASC_MAX_SG_LIST; - } else { -#endif /* CC_VERY_LONG_SG_LIST */ - /* - * Set sg_entry_cnt to be the number of SG elements that - * will fit in the allocated SG queues. It is minus 1, because - * the first SG element is handled above. - */ - sg_entry_cnt = sg_head->entry_cnt - 1; -#if CC_VERY_LONG_SG_LIST - } -#endif /* CC_VERY_LONG_SG_LIST */ if (sg_entry_cnt != 0) { scsiq->q1.cntl |= QC_SG_HEAD; q_addr = ASC_QNO_TO_QADDR(q_no); @@ -8431,21 +8056,7 @@ AscPutReadySgListQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no) ASC_SG_LIST_PER_Q - 1; } } else { -#if CC_VERY_LONG_SG_LIST - /* - * This is the last SG queue in the list of - * allocated SG queues. If there are more - * SG elements than will fit in the allocated - * queues, then set the QCSG_SG_XFER_MORE flag. - */ - if (sg_head->entry_cnt > ASC_MAX_SG_LIST) { - scsi_sg_q.cntl |= QCSG_SG_XFER_MORE; - } else { -#endif /* CC_VERY_LONG_SG_LIST */ - scsi_sg_q.cntl |= QCSG_SG_XFER_END; -#if CC_VERY_LONG_SG_LIST - } -#endif /* CC_VERY_LONG_SG_LIST */ + scsi_sg_q.cntl |= QCSG_SG_XFER_END; sg_list_dwords = sg_entry_cnt << 1; if (i == 0) { scsi_sg_q.sg_list_cnt = sg_entry_cnt; @@ -8550,9 +8161,9 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) PortAddr iop_base; int sta; int n_q_required; - int disable_syn_offset_one_fix; + bool disable_syn_offset_one_fix; int i; - ASC_PADDR addr; + u32 addr; ushort sg_entry_cnt = 0; ushort sg_entry_cnt_minus_one = 0; uchar target_ix; @@ -8562,12 +8173,12 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) uchar scsi_cmd; uchar disable_cmd; ASC_SG_HEAD *sg_head; - ASC_DCNT data_cnt; + unsigned long data_cnt; iop_base = asc_dvc->iop_base; sg_head = scsiq->sg_head; if (asc_dvc->err_code != 0) - return (ERR); + return ASC_ERROR; scsiq->q1.q_no = 0; if ((scsiq->q2.tag_code & ASC_TAG_FLAG_EXTRA_BYTES) == 0) { scsiq->q1.extra_bytes = 0; @@ -8593,46 +8204,41 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) } if (asc_dvc->in_critical_cnt != 0) { AscSetLibErrorCode(asc_dvc, ASCQ_ERR_CRITICAL_RE_ENTRY); - return (ERR); + return ASC_ERROR; } asc_dvc->in_critical_cnt++; if ((scsiq->q1.cntl & QC_SG_HEAD) != 0) { if ((sg_entry_cnt = sg_head->entry_cnt) == 0) { asc_dvc->in_critical_cnt--; - return (ERR); + return ASC_ERROR; } -#if !CC_VERY_LONG_SG_LIST if (sg_entry_cnt > ASC_MAX_SG_LIST) { asc_dvc->in_critical_cnt--; - return (ERR); + return ASC_ERROR; } -#endif /* !CC_VERY_LONG_SG_LIST */ if (sg_entry_cnt == 1) { - scsiq->q1.data_addr = - (ADV_PADDR)sg_head->sg_list[0].addr; - scsiq->q1.data_cnt = - (ADV_DCNT)sg_head->sg_list[0].bytes; + scsiq->q1.data_addr = cpu_to_le32(sg_head->sg_list[0].addr); + scsiq->q1.data_cnt = cpu_to_le32(sg_head->sg_list[0].bytes); scsiq->q1.cntl &= ~(QC_SG_HEAD | QC_SG_SWAP_QUEUE); } sg_entry_cnt_minus_one = sg_entry_cnt - 1; } scsi_cmd = scsiq->cdbptr[0]; - disable_syn_offset_one_fix = FALSE; + disable_syn_offset_one_fix = false; if ((asc_dvc->pci_fix_asyn_xfer & scsiq->q1.target_id) && !(asc_dvc->pci_fix_asyn_xfer_always & scsiq->q1.target_id)) { if (scsiq->q1.cntl & QC_SG_HEAD) { data_cnt = 0; for (i = 0; i < sg_entry_cnt; i++) { - data_cnt += - (ADV_DCNT)le32_to_cpu(sg_head->sg_list[i]. - bytes); + data_cnt += le32_to_cpu(sg_head->sg_list[i]. + bytes); } } else { data_cnt = le32_to_cpu(scsiq->q1.data_cnt); } if (data_cnt != 0UL) { if (data_cnt < 512UL) { - disable_syn_offset_one_fix = TRUE; + disable_syn_offset_one_fix = true; } else { for (i = 0; i < ASC_SYN_OFFSET_ONE_DISABLE_LIST; i++) { @@ -8643,7 +8249,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) } if (scsi_cmd == disable_cmd) { disable_syn_offset_one_fix = - TRUE; + true; break; } } @@ -8662,12 +8268,11 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) if (asc_dvc->bug_fix_cntl & ASC_BUG_FIX_IF_NOT_DWB) { if ((scsi_cmd == READ_6) || (scsi_cmd == READ_10)) { - addr = - (ADV_PADDR)le32_to_cpu(sg_head-> + addr = le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. addr) + - (ADV_DCNT)le32_to_cpu(sg_head-> + le32_to_cpu(sg_head-> sg_list [sg_entry_cnt_minus_one]. bytes); @@ -8688,8 +8293,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) sg_list [sg_entry_cnt_minus_one]. bytes); - data_cnt -= - (ASC_DCNT) extra_bytes; + data_cnt -= extra_bytes; sg_head-> sg_list [sg_entry_cnt_minus_one]. @@ -8700,16 +8304,6 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) } } sg_head->entry_to_copy = sg_head->entry_cnt; -#if CC_VERY_LONG_SG_LIST - /* - * Set the sg_entry_cnt to the maximum possible. The rest of - * the SG elements will be copied when the RISC completes the - * SG elements that fit and halts. - */ - if (sg_entry_cnt > ASC_MAX_SG_LIST) { - sg_entry_cnt = ASC_MAX_SG_LIST; - } -#endif /* CC_VERY_LONG_SG_LIST */ n_q_required = AscSgListToQueue(sg_entry_cnt); if ((AscGetNumOfFreeQueue(asc_dvc, target_ix, n_q_required) >= (uint) n_q_required) @@ -8744,8 +8338,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) == 0) { scsiq->q2.tag_code |= ASC_TAG_FLAG_EXTRA_BYTES; - data_cnt -= (ASC_DCNT) - extra_bytes; + data_cnt -= extra_bytes; scsiq->q1.data_cnt = cpu_to_le32 (data_cnt); @@ -8780,7 +8373,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) * If 'done_status' is not set to QD_DO_RETRY, then 'error_retry' will be * set to SCSI_MAX_RETRY. * - * Multi-byte fields in the ASC_SCSI_REQ_Q that are used by the microcode + * Multi-byte fields in the ADV_SCSI_REQ_Q that are used by the microcode * for DMA addresses or math operations are byte swapped to little-endian * order. * @@ -8791,11 +8384,11 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq) * ADV_ERROR(-1) - Invalid ADV_SCSI_REQ_Q request structure * host IC error. */ -static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq) +static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp) { AdvPortAddr iop_base; - ADV_PADDR req_paddr; ADV_CARR_T *new_carrp; + ADV_SCSI_REQ_Q *scsiq = &reqp->scsi_req_q; /* * The ADV_SCSI_REQ_Q 'target_id' field should never exceed ADV_MAX_TID. @@ -8812,39 +8405,19 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq) * Allocate a carrier ensuring at least one carrier always * remains on the freelist and initialize fields. */ - if ((new_carrp = asc_dvc->carr_freelist) == NULL) { + new_carrp = adv_get_next_carrier(asc_dvc); + if (!new_carrp) { + ASC_DBG(1, "No free carriers\n"); return ADV_BUSY; } - asc_dvc->carr_freelist = (ADV_CARR_T *) - ADV_U32_TO_VADDR(le32_to_cpu(new_carrp->next_vpa)); - asc_dvc->carr_pending_cnt++; - - /* - * Set the carrier to be a stopper by setting 'next_vpa' - * to the stopper value. The current stopper will be changed - * below to point to the new stopper. - */ - new_carrp->next_vpa = cpu_to_le32(ASC_CQ_STOPPER); - /* - * Clear the ADV_SCSI_REQ_Q done flag. - */ - scsiq->a_flag &= ~ADV_SCSIQ_DONE; - - req_paddr = virt_to_bus(scsiq); - BUG_ON(req_paddr & 31); - /* Wait for assertion before making little-endian */ - req_paddr = cpu_to_le32(req_paddr); + asc_dvc->carr_pending_cnt++; /* Save virtual and physical address of ADV_SCSI_REQ_Q and carrier. */ - scsiq->scsiq_ptr = cpu_to_le32(ADV_VADDR_TO_U32(scsiq)); - scsiq->scsiq_rptr = req_paddr; + scsiq->scsiq_ptr = cpu_to_le32(scsiq->srb_tag); + scsiq->scsiq_rptr = cpu_to_le32(reqp->req_addr); - scsiq->carr_va = cpu_to_le32(ADV_VADDR_TO_U32(asc_dvc->icq_sp)); - /* - * Every ADV_CARR_T.carr_pa is byte swapped to little-endian - * order during initialization. - */ + scsiq->carr_va = asc_dvc->icq_sp->carr_va; scsiq->carr_pa = asc_dvc->icq_sp->carr_pa; /* @@ -8852,7 +8425,7 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, ADV_SCSI_REQ_Q *scsiq) * the microcode. The newly allocated stopper will become the new * stopper. */ - asc_dvc->icq_sp->areq_vpa = req_paddr; + asc_dvc->icq_sp->areq_vpa = scsiq->scsiq_rptr; /* * Set the 'next_vpa' pointer for the old stopper to be the @@ -8907,11 +8480,10 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) ASC_DVC_VAR *asc_dvc = &boardp->dvc_var.asc_dvc_var; struct asc_scsi_q asc_scsi_q; - /* asc_build_req() can not return ASC_BUSY. */ ret = asc_build_req(boardp, scp, &asc_scsi_q); - if (ret == ASC_ERROR) { + if (ret != ASC_NOERROR) { ASC_STATS(scp->device->host, build_error); - return ASC_ERROR; + return ret; } ret = AscExeScsiQueue(asc_dvc, &asc_scsi_q); @@ -8919,9 +8491,9 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) err_code = asc_dvc->err_code; } else { ADV_DVC_VAR *adv_dvc = &boardp->dvc_var.adv_dvc_var; - ADV_SCSI_REQ_Q *adv_scsiqp; + adv_req_t *adv_reqp; - switch (adv_build_req(boardp, scp, &adv_scsiqp)) { + switch (adv_build_req(boardp, scp, &adv_reqp)) { case ASC_NOERROR: ASC_DBG(3, "adv_build_req ASC_NOERROR\n"); break; @@ -8941,7 +8513,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) return ASC_ERROR; } - ret = AdvExeScsiQueue(adv_dvc, adv_scsiqp); + ret = AdvExeScsiQueue(adv_dvc, adv_reqp); err_code = adv_dvc->err_code; } @@ -8956,6 +8528,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) ASC_DBG(1, "ExeScsiQueue() ASC_NOERROR\n"); break; case ASC_BUSY: + ASC_DBG(1, "ExeScsiQueue() ASC_BUSY\n"); ASC_STATS(scp->device->host, exe_busy); break; case ASC_ERROR: @@ -9122,7 +8695,7 @@ static int AscStopQueueExe(PortAddr iop_base) return (0); } -static ASC_DCNT AscGetMaxDmaCount(ushort bus_type) +static unsigned int AscGetMaxDmaCount(ushort bus_type) { if (bus_type & ASC_IS_ISA) return ASC_MAX_ISA_DMA_COUNT; @@ -9183,15 +8756,13 @@ static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value) } #endif /* CONFIG_ISA */ -static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) +static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) { int i; PortAddr iop_base; - ushort warn_code; uchar chip_version; iop_base = asc_dvc->iop_base; - warn_code = 0; asc_dvc->err_code = 0; if ((asc_dvc->bus_type & (ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) { @@ -9205,7 +8776,7 @@ static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) /* asc_dvc->init_state initialized in AscInitGetConfig(). */ asc_dvc->sdtr_done = 0; asc_dvc->cur_total_qng = 0; - asc_dvc->is_in_int = 0; + asc_dvc->is_in_int = false; asc_dvc->in_critical_cnt = 0; asc_dvc->last_q_shortage = 0; asc_dvc->use_tagged_qng = 0; @@ -9267,7 +8838,6 @@ static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc) asc_dvc->scsiq_busy_tail[i] = (ASC_SCSI_Q *)0L; asc_dvc->cfg->max_tag_qng[i] = ASC_MAX_INRAM_TAG_QNG; } - return warn_code; } static int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg) @@ -9385,7 +8955,7 @@ static int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg) int retry; retry = 0; - while (TRUE) { + while (true) { AscSetChipEEPData(iop_base, data_reg); mdelay(1); read_back = AscGetChipEEPData(iop_base); @@ -9521,7 +9091,7 @@ static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, int n_error; retry = 0; - while (TRUE) { + while (true) { if ((n_error = AscSetEEPConfigOnce(iop_base, cfg_buf, bus_type)) == 0) { break; @@ -9533,7 +9103,7 @@ static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, return n_error; } -static ushort AscInitFromEEP(ASC_DVC_VAR *asc_dvc) +static int AscInitFromEEP(ASC_DVC_VAR *asc_dvc) { ASCEEP_CONFIG eep_config_buf; ASCEEP_CONFIG *eep_config; @@ -9548,13 +9118,13 @@ static ushort AscInitFromEEP(ASC_DVC_VAR *asc_dvc) warn_code = 0; AscWriteLramWord(iop_base, ASCV_HALTCODE_W, 0x00FE); AscStopQueueExe(iop_base); - if ((AscStopChip(iop_base) == FALSE) || + if ((AscStopChip(iop_base)) || (AscGetChipScsiCtrl(iop_base) != 0)) { asc_dvc->init_state |= ASC_INIT_RESET_SCSI_DONE; AscResetChipAndScsiBus(asc_dvc); mdelay(asc_dvc->scsi_reset_wait * 1000); /* XXX: msleep? */ } - if (AscIsChipHalted(iop_base) == FALSE) { + if (!AscIsChipHalted(iop_base)) { asc_dvc->err_code |= ASC_IERR_START_STOP_CHIP; return (warn_code); } @@ -9709,8 +9279,8 @@ static int AscInitGetConfig(struct Scsi_Host *shost) return asc_dvc->err_code; if (AscFindSignature(asc_dvc->iop_base)) { - warn_code |= AscInitAscDvcVar(asc_dvc); - warn_code |= AscInitFromEEP(asc_dvc); + AscInitAscDvcVar(asc_dvc); + warn_code = AscInitFromEEP(asc_dvc); asc_dvc->init_state |= ASC_INIT_STATE_END_GET_CFG; if (asc_dvc->scsi_reset_wait > ASC_MAX_SCSI_RESET_WAIT) asc_dvc->scsi_reset_wait = ASC_MAX_SCSI_RESET_WAIT; @@ -9866,6 +9436,7 @@ static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) * on big-endian platforms so char fields read as words are actually being * unswapped on big-endian platforms. */ +#ifdef CONFIG_PCI static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config = { ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */ 0x0000, /* cfg_msw */ @@ -10202,7 +9773,6 @@ static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar = { 0 /* 63 reserved */ }; -#ifdef CONFIG_PCI /* * Wait for EEPROM command to complete */ @@ -11232,7 +10802,7 @@ static struct scsi_host_template advansys_template = { .name = DRV_NAME, .info = advansys_info, .queuecommand = advansys_queuecommand, - .eh_bus_reset_handler = advansys_reset, + .eh_host_reset_handler = advansys_reset, .bios_param = advansys_biosparam, .slave_configure = advansys_slave_configure, /* @@ -11240,7 +10810,7 @@ static struct scsi_host_template advansys_template = { * must be set. The flag will be cleared in advansys_board_found * for non-ISA adapters. */ - .unchecked_isa_dma = 1, + .unchecked_isa_dma = true, /* * All adapters controlled by this driver are capable of large * scatter-gather lists. According to the mid-level SCSI documentation @@ -11249,26 +10819,25 @@ static struct scsi_host_template advansys_template = { * by enabling clustering, I/O throughput increases as well. */ .use_clustering = ENABLE_CLUSTERING, + .use_blk_tags = 1, }; static int advansys_wide_init_chip(struct Scsi_Host *shost) { struct asc_board *board = shost_priv(shost); struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; - int req_cnt = 0; - adv_req_t *reqp = NULL; - int sg_cnt = 0; - adv_sgblk_t *sgp; + size_t sgblk_pool_size; int warn_code, err_code; /* * Allocate buffer carrier structures. The total size - * is about 4 KB, so allocate all at once. + * is about 8 KB, so allocate all at once. */ - adv_dvc->carrier_buf = kmalloc(ADV_CARRIER_BUFSIZE, GFP_KERNEL); - ASC_DBG(1, "carrier_buf 0x%p\n", adv_dvc->carrier_buf); + adv_dvc->carrier = dma_alloc_coherent(board->dev, + ADV_CARRIER_BUFSIZE, &adv_dvc->carrier_addr, GFP_KERNEL); + ASC_DBG(1, "carrier 0x%p\n", adv_dvc->carrier); - if (!adv_dvc->carrier_buf) + if (!adv_dvc->carrier) goto kmalloc_failed; /* @@ -11276,54 +10845,34 @@ static int advansys_wide_init_chip(struct Scsi_Host *shost) * board. The total size is about 16 KB, so allocate all at once. * If the allocation fails decrement and try again. */ - for (req_cnt = adv_dvc->max_host_qng; req_cnt > 0; req_cnt--) { - reqp = kmalloc(sizeof(adv_req_t) * req_cnt, GFP_KERNEL); - - ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", reqp, req_cnt, - (ulong)sizeof(adv_req_t) * req_cnt); - - if (reqp) - break; + board->adv_reqp_size = adv_dvc->max_host_qng * sizeof(adv_req_t); + if (board->adv_reqp_size & 0x1f) { + ASC_DBG(1, "unaligned reqp %lu bytes\n", sizeof(adv_req_t)); + board->adv_reqp_size = ADV_32BALIGN(board->adv_reqp_size); } + board->adv_reqp = dma_alloc_coherent(board->dev, board->adv_reqp_size, + &board->adv_reqp_addr, GFP_KERNEL); - if (!reqp) + if (!board->adv_reqp) goto kmalloc_failed; - adv_dvc->orig_reqp = reqp; + ASC_DBG(1, "reqp 0x%p, req_cnt %d, bytes %lu\n", board->adv_reqp, + adv_dvc->max_host_qng, board->adv_reqp_size); /* * Allocate up to ADV_TOT_SG_BLOCK request structures for * the Wide board. Each structure is about 136 bytes. */ - board->adv_sgblkp = NULL; - for (sg_cnt = 0; sg_cnt < ADV_TOT_SG_BLOCK; sg_cnt++) { - sgp = kmalloc(sizeof(adv_sgblk_t), GFP_KERNEL); + sgblk_pool_size = sizeof(adv_sgblk_t) * ADV_TOT_SG_BLOCK; + board->adv_sgblk_pool = dma_pool_create("adv_sgblk", board->dev, + sgblk_pool_size, 32, 0); - if (!sgp) - break; - - sgp->next_sgblkp = board->adv_sgblkp; - board->adv_sgblkp = sgp; - - } - - ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", sg_cnt, sizeof(adv_sgblk_t), - sizeof(adv_sgblk_t) * sg_cnt); + ASC_DBG(1, "sg_cnt %d * %lu = %lu bytes\n", ADV_TOT_SG_BLOCK, + sizeof(adv_sgblk_t), sgblk_pool_size); - if (!board->adv_sgblkp) + if (!board->adv_sgblk_pool) goto kmalloc_failed; - /* - * Point 'adv_reqp' to the request structures and - * link them together. - */ - req_cnt--; - reqp[req_cnt].next_reqp = NULL; - for (; req_cnt > 0; req_cnt--) { - reqp[req_cnt - 1].next_reqp = &reqp[req_cnt]; - } - board->adv_reqp = &reqp[0]; - if (adv_dvc->chip_type == ADV_CHIP_ASC3550) { ASC_DBG(2, "AdvInitAsc3550Driver()\n"); warn_code = AdvInitAsc3550Driver(adv_dvc); @@ -11353,14 +10902,20 @@ static int advansys_wide_init_chip(struct Scsi_Host *shost) static void advansys_wide_free_mem(struct asc_board *board) { struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var; - kfree(adv_dvc->carrier_buf); - adv_dvc->carrier_buf = NULL; - kfree(adv_dvc->orig_reqp); - adv_dvc->orig_reqp = board->adv_reqp = NULL; - while (board->adv_sgblkp) { - adv_sgblk_t *sgp = board->adv_sgblkp; - board->adv_sgblkp = sgp->next_sgblkp; - kfree(sgp); + + if (adv_dvc->carrier) { + dma_free_coherent(board->dev, ADV_CARRIER_BUFSIZE, + adv_dvc->carrier, adv_dvc->carrier_addr); + adv_dvc->carrier = NULL; + } + if (board->adv_reqp) { + dma_free_coherent(board->dev, board->adv_reqp_size, + board->adv_reqp, board->adv_reqp_addr); + board->adv_reqp = NULL; + } + if (board->adv_sgblk_pool) { + dma_pool_destroy(board->adv_sgblk_pool); + board->adv_sgblk_pool = NULL; } } @@ -11431,28 +10986,28 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, switch (asc_dvc_varp->bus_type) { #ifdef CONFIG_ISA case ASC_IS_ISA: - shost->unchecked_isa_dma = TRUE; + shost->unchecked_isa_dma = true; share_irq = 0; break; case ASC_IS_VL: - shost->unchecked_isa_dma = FALSE; + shost->unchecked_isa_dma = false; share_irq = 0; break; case ASC_IS_EISA: - shost->unchecked_isa_dma = FALSE; + shost->unchecked_isa_dma = false; share_irq = IRQF_SHARED; break; #endif /* CONFIG_ISA */ #ifdef CONFIG_PCI case ASC_IS_PCI: - shost->unchecked_isa_dma = FALSE; + shost->unchecked_isa_dma = false; share_irq = IRQF_SHARED; break; #endif /* CONFIG_PCI */ default: shost_printk(KERN_ERR, shost, "unknown adapter type: " "%d\n", asc_dvc_varp->bus_type); - shost->unchecked_isa_dma = TRUE; + shost->unchecked_isa_dma = false; share_irq = 0; break; } @@ -11471,7 +11026,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, * For Wide boards set PCI information before calling * AdvInitGetConfig(). */ - shost->unchecked_isa_dma = FALSE; + shost->unchecked_isa_dma = false; share_irq = IRQF_SHARED; ASC_DBG(2, "AdvInitGetConfig()\n"); @@ -11656,24 +11211,11 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, /* Set maximum number of queues the adapter can handle. */ shost->can_queue = adv_dvc_varp->max_host_qng; } - - /* - * Following v1.3.89, 'cmd_per_lun' is no longer needed - * and should be set to zero. - * - * But because of a bug introduced in v1.3.89 if the driver is - * compiled as a module and 'cmd_per_lun' is zero, the Mid-Level - * SCSI function 'allocate_device' will panic. To allow the driver - * to work as a module in these kernels set 'cmd_per_lun' to 1. - * - * Note: This is wrong. cmd_per_lun should be set to the depth - * you want on untagged devices always. - #ifdef MODULE - */ - shost->cmd_per_lun = 1; -/* #else - shost->cmd_per_lun = 0; -#endif */ + ret = scsi_init_shared_tag_map(shost, shost->can_queue); + if (ret) { + shost_printk(KERN_ERR, shost, "init tag map failed\n"); + goto err_free_dma; + } /* * Set the maximum number of scatter-gather elements the @@ -11844,7 +11386,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, err_unmap: if (boardp->ioremap_addr) iounmap(boardp->ioremap_addr); +#ifdef CONFIG_PCI err_shost: +#endif return ret; } @@ -11927,6 +11471,7 @@ static int advansys_isa_probe(struct device *dev, unsigned int id) board = shost_priv(shost); board->irq = advansys_isa_irq_no(iop_base); board->dev = dev; + board->shost = shost; err = advansys_board_found(shost, iop_base, ASC_IS_ISA); if (err) @@ -12009,6 +11554,7 @@ static int advansys_vlb_probe(struct device *dev, unsigned int id) board = shost_priv(shost); board->irq = advansys_vlb_irq_no(iop_base); board->dev = dev; + board->shost = shost; err = advansys_board_found(shost, iop_base, ASC_IS_VL); if (err) @@ -12116,6 +11662,7 @@ static int advansys_eisa_probe(struct device *dev) board = shost_priv(shost); board->irq = irq; board->dev = dev; + board->shost = shost; err = advansys_board_found(shost, ioport, ASC_IS_EISA); if (!err) { @@ -12232,6 +11779,7 @@ static int advansys_pci_probe(struct pci_dev *pdev, board = shost_priv(shost); board->irq = pdev->irq; board->dev = &pdev->dev; + board->shost = shost; if (pdev->device == PCI_DEVICE_ID_ASP_ABP940UW || pdev->device == PCI_DEVICE_ID_38C0800_REV1 || diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index e31c460a1335..f44d0487236e 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -2922,7 +2922,6 @@ static struct scsi_host_template aha152x_driver_template = { .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, .slave_alloc = aha152x_adjust_queue, }; diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index b95d2779f467..5b8b2937a3fe 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -950,7 +950,6 @@ static struct scsi_host_template driver_template = { .can_queue = AHA1542_MAILBOXES, .this_id = 7, .sg_tablesize = 16, - .cmd_per_lun = 1, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index 31ace4bef8fe..bad35ffc015d 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c @@ -544,7 +544,6 @@ static struct scsi_host_template aha1740_template = { .can_queue = AHA1740_ECBS, .this_id = 7, .sg_tablesize = AHA1740_SCATTER, - .cmd_per_lun = AHA1740_CMDLUN, .use_clustering = ENABLE_CLUSTERING, .eh_abort_handler = aha1740_eh_abort_handler, }; diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h index af23fd6bd795..b0c5603461ca 100644 --- a/drivers/scsi/aha1740.h +++ b/drivers/scsi/aha1740.h @@ -149,6 +149,5 @@ struct ecb { /* Enhanced Control Block 6.1 */ #define AHA1740_ECBS 32 #define AHA1740_SCATTER 16 -#define AHA1740_CMDLUN 1 #endif diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 02a2512b76a8..4b135cca42a1 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -65,7 +65,6 @@ static struct scsi_host_template aic94xx_sht = { .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, .can_queue = 1, - .cmd_per_lun = 1, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c index 32d23212de48..3110736fd337 100644 --- a/drivers/scsi/arm/arxescsi.c +++ b/drivers/scsi/arm/arxescsi.c @@ -245,7 +245,6 @@ static struct scsi_host_template arxescsi_template = { .can_queue = 0, .this_id = 7, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, .proc_name = "arxescsi", }; diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index abc66f5263ec..faa1bee07c8a 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c @@ -367,7 +367,6 @@ static struct scsi_host_template cumanascsi2_template = { .this_id = 7, .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, .proc_name = "cumanascsi2", }; diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index 5bf3c0d134b4..a8ad6880dd91 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c @@ -486,7 +486,6 @@ static struct scsi_host_template eesox_template = { .this_id = 7, .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, .dma_boundary = IOMD_DMA_BOUNDARY, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, .proc_name = "eesox", }; diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index 0836433e3a2d..05301bc752ee 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -3158,7 +3158,6 @@ static struct scsi_host_template atp870u_template = { .can_queue = qcnt /* can_queue */, .this_id = 7 /* SCSI ID */, .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/, - .cmd_per_lun = ATP870U_CMDLUN /* commands per lun */, .use_clustering = ENABLE_CLUSTERING, .max_sectors = ATP870U_MAX_SECTORS, }; diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h index 62bae64a01c1..5cf62566ad42 100644 --- a/drivers/scsi/atp870u.h +++ b/drivers/scsi/atp870u.h @@ -10,7 +10,6 @@ #define MAX_SENSE 14 #define qcnt 32 #define ATP870U_SCATTER 128 -#define ATP870U_CMDLUN 1 #define MAX_ADAPTER 8 #define MAX_SCSI_ID 16 diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 447cf7ce606e..185391a64d4b 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -452,6 +452,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba, ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT; + phba->get_boot = BE_GET_BOOT_RETRIES; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, @@ -480,6 +481,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba) case ASYNC_EVENT_NEW_ISCSI_CONN: case ASYNC_EVENT_NEW_TCP_CONN: phba->state |= BE_ADAPTER_CHECK_BOOT; + phba->get_boot = BE_GET_BOOT_RETRIES; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, @@ -488,6 +490,8 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba) compl->flags); break; default: + phba->state |= BE_ADAPTER_CHECK_BOOT; + phba->get_boot = BE_GET_BOOT_RETRIES; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index f11d325fe696..cdfbc5c19cf4 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -304,6 +304,17 @@ struct mgmt_auth_method_format { struct mgmt_chap_format chap; } __packed; +struct be_cmd_req_logout_fw_sess { + struct be_cmd_req_hdr hdr; /* dw[4] */ + uint32_t session_handle; +} __packed; + +struct be_cmd_resp_logout_fw_sess { + struct be_cmd_resp_hdr hdr; /* dw[4] */ +#define BEISCSI_MGMT_SESSION_CLOSE 0x20 + uint32_t session_status; +} __packed; + struct mgmt_conn_login_options { u8 flags; u8 header_digest; @@ -1136,6 +1147,7 @@ struct be_cmd_get_all_if_id_req { #define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6 #define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7 #define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14 +#define OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET 24 #define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36 #define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41 #define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 1f74760ce86c..7a6dbfbccec9 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -668,14 +668,20 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev) return ret; } + ret = pci_request_regions(pcidev, DRV_NAME); + if (ret) { + dev_err(&pcidev->dev, + "beiscsi_enable_pci - request region failed\n"); + goto pci_dev_disable; + } + pci_set_master(pcidev); ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret) { ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); - pci_disable_device(pcidev); - return ret; + goto pci_region_release; } else { ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); @@ -684,11 +690,17 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev) ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret) { dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); - pci_disable_device(pcidev); - return ret; + goto pci_region_release; } } return 0; + +pci_region_release: + pci_release_regions(pcidev); +pci_dev_disable: + pci_disable_device(pcidev); + + return ret; } static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) @@ -1356,8 +1368,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) conn->rxdata_octets += resid; unmap: - scsi_dma_unmap(io_task->scsi_cmnd); - io_task->scsi_cmnd = NULL; + if (io_task->scsi_cmnd) { + scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; + } iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); } @@ -2037,11 +2051,16 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) /* Interpret compl as a async link evt */ beiscsi_async_link_state_process(phba, (struct be_async_event_link_state *) mcc_compl); - else + else { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX, "BM_%d : Unsupported Async Event, flags" " = 0x%08x\n", mcc_compl->flags); + if (phba->state & BE_ADAPTER_LINK_UP) { + phba->state |= BE_ADAPTER_CHECK_BOOT; + phba->get_boot = BE_GET_BOOT_RETRIES; + } + } } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); atomic_dec(&phba->ctrl.mcc_obj.q.used); @@ -3678,14 +3697,16 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba) struct be_ctrl_info *ctrl = &phba->ctrl; q = &phba->ctrl.mcc_obj.q; - if (q->created) + if (q->created) { beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); - be_queue_free(phba, q); + be_queue_free(phba, q); + } q = &phba->ctrl.mcc_obj.cq; - if (q->created) + if (q->created) { beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); - be_queue_free(phba, q); + be_queue_free(phba, q); + } } static void hwi_cleanup(struct beiscsi_hba *phba) @@ -3729,8 +3750,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba) for (i = 0; i < (phba->num_cpus); i++) { q = &phwi_context->be_cq[i]; - if (q->created) + if (q->created) { + be_queue_free(phba, q); beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); + } } be_mcc_queues_destroy(phba); @@ -3740,8 +3763,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba) eq_for_mcc = 0; for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { q = &phwi_context->be_eq[i].q; - if (q->created) + if (q->created) { + be_queue_free(phba, q); beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); + } } be_cmd_fw_uninit(ctrl); } @@ -4328,8 +4353,14 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BM_%d : No boot session\n"); + + if (ret == -ENXIO) + phba->get_boot = 0; + + return ret; } + phba->get_boot = 0; nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, sizeof(*session_resp), &nonemb_cmd.dma); @@ -4369,6 +4400,9 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) memcpy(&phba->boot_sess, &session_resp->session_info, sizeof(struct mgmt_session_info)); + + beiscsi_logout_fw_sess(phba, + phba->boot_sess.session_handle); ret = 0; boot_freemem: @@ -4580,11 +4614,13 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, spin_unlock_bh(&phba->mgmt_sgl_lock); } - if (io_task->mtask_addr) + if (io_task->mtask_addr) { pci_unmap_single(phba->pcidev, io_task->mtask_addr, io_task->mtask_data_count, PCI_DMA_TODEVICE); + io_task->mtask_addr = 0; + } } /** @@ -5264,6 +5300,7 @@ static void beiscsi_remove(struct pci_dev *pcidev) iscsi_host_free(phba->shost); pci_disable_pcie_error_reporting(pcidev); pci_set_drvdata(pcidev, NULL); + pci_release_regions(pcidev); pci_disable_device(pcidev); } @@ -5374,8 +5411,14 @@ beiscsi_hw_health_check(struct work_struct *work) be_eqd_update(phba); if (phba->state & BE_ADAPTER_CHECK_BOOT) { - phba->state &= ~BE_ADAPTER_CHECK_BOOT; - be_check_boot_session(phba); + if ((phba->get_boot > 0) && (!phba->boot_kset)) { + phba->get_boot--; + if (!(phba->get_boot % BE_GET_BOOT_TO)) + be_check_boot_session(phba); + } else { + phba->state &= ~BE_ADAPTER_CHECK_BOOT; + phba->get_boot = 0; + } } beiscsi_ue_detect(phba); @@ -5738,6 +5781,7 @@ hba_free: iscsi_host_free(phba->shost); pci_set_drvdata(pcidev, NULL); disable_pci: + pci_release_regions(pcidev); pci_disable_device(pcidev); return ret; } diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index e70ea26bbc2b..b8c0c7819cb1 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -36,7 +36,7 @@ #include <scsi/scsi_transport_iscsi.h> #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.4.114.0" +#define BUILD_STR "10.6.0.0" #define BE_NAME "Avago Technologies OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -109,6 +109,9 @@ #define BEISCSI_CLEAN_UNLOAD 0x01 #define BEISCSI_EEH_UNLOAD 0x02 + +#define BE_GET_BOOT_RETRIES 45 +#define BE_GET_BOOT_TO 20 /** * hardware needs the async PDU buffers to be posted in multiples of 8 * So have atleast 8 of them by default @@ -413,6 +416,7 @@ struct beiscsi_hba { } fw_config; unsigned int state; + int get_boot; bool fw_timeout; bool ue_detected; struct delayed_work beiscsi_hw_check_task; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index c2c4d6975fb7..ca4016f20e76 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1707,3 +1707,72 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, (params->dw[offsetof(struct amap_beiscsi_offload_params, exp_statsn) / 32] + 1)); } + +/** + * beiscsi_logout_fw_sess()- Firmware Session Logout + * @phba: Device priv structure instance + * @fw_sess_handle: FW session handle + * + * Logout from the FW established sessions. + * returns + * Success: 0 + * Failure: Non-Zero Value + * + */ +int beiscsi_logout_fw_sess(struct beiscsi_hba *phba, + uint32_t fw_sess_handle) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_cmd_req_logout_fw_sess *req; + struct be_cmd_resp_logout_fw_sess *resp; + unsigned int tag; + int rc; + + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BG_%d : In bescsi_logout_fwboot_sess\n"); + + spin_lock(&ctrl->mbox_lock); + tag = alloc_mcc_tag(phba); + if (!tag) { + spin_unlock(&ctrl->mbox_lock); + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BG_%d : MBX Tag Failure\n"); + return -EINVAL; + } + + wrb = wrb_from_mccq(phba); + req = embedded_payload(wrb); + wrb->tag0 |= tag; + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET, + sizeof(struct be_cmd_req_logout_fw_sess)); + + /* Set the session handle */ + req->session_handle = fw_sess_handle; + be_mcc_notify(phba); + spin_unlock(&ctrl->mbox_lock); + + rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + if (rc) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, + "BG_%d : MBX CMD FW_SESSION_LOGOUT_TARGET Failed\n"); + return -EBUSY; + } + + resp = embedded_payload(wrb); + if (resp->session_status != + BEISCSI_MGMT_SESSION_CLOSE) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, + "BG_%d : FW_SESSION_LOGOUT_TARGET resp : 0x%x\n", + resp->session_status); + rc = -EINVAL; + } + + return rc; +} diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 9356b9a86b66..b58a7decbd94 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -338,4 +338,7 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba); int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, int num); +int beiscsi_logout_fw_sess(struct beiscsi_hba *phba, + uint32_t fw_sess_handle); + #endif diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index e53078d03309..72894378ffcf 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1173,8 +1173,10 @@ static void bnx2i_cleanup_task(struct iscsi_task *task) bnx2i_send_cmd_cleanup_req(hba, task->dd_data); spin_unlock_bh(&conn->session->back_lock); + spin_unlock_bh(&conn->session->frwd_lock); wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); + spin_lock_bh(&conn->session->frwd_lock); spin_lock_bh(&conn->session->back_lock); } bnx2i_iscsi_unmap_sg_list(task->dd_data); @@ -2093,7 +2095,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) else /* wait for option-2 conn teardown */ wait_event_interruptible(bnx2i_ep->ofld_wait, - bnx2i_ep->state != EP_STATE_DISCONN_START); + ((bnx2i_ep->state != EP_STATE_DISCONN_START) + && (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD))); if (signal_pending(current)) flush_signals(current); diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 2e66f34ebb79..622bdabc8894 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -3928,6 +3928,7 @@ csio_hw_init(struct csio_hw *hw) evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL); if (!evt_entry) { + rv = -ENOMEM; csio_err(hw, "Failed to initialize eventq"); goto err_evtq_cleanup; } diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 3db4c63978c5..0e2bee937fe8 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -1,7 +1,7 @@ /* * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management * - * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. + * Copyright (C) 2003-2015 Chelsio Communications. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or @@ -32,8 +32,8 @@ static unsigned int dbg_level; #define DRV_MODULE_NAME "cxgb3i" #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver" -#define DRV_MODULE_VERSION "2.0.0" -#define DRV_MODULE_RELDATE "Jun. 2010" +#define DRV_MODULE_VERSION "2.0.1-ko" +#define DRV_MODULE_RELDATE "Apr. 2015" static char version[] = DRV_MODULE_DESC " " DRV_MODULE_NAME @@ -156,7 +156,7 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion); static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, const struct l2t_entry *e) { - unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win); + unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win); struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; skb->priority = CPL_PRIORITY_SETUP; @@ -172,7 +172,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) | - V_RCV_BUFSIZ(cxgb3i_rcv_win>>10)); + V_RCV_BUFSIZ(csk->rcv_win >> 10)); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n", @@ -369,7 +369,7 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | V_TX_CPU_IDX(csk->rss_qid)); /* sendbuffer is in units of 32KB. */ - req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15)); + req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15)); cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); } } @@ -503,8 +503,8 @@ static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) csk, csk->state, csk->flags, csk->tid); csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; - if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10)) - csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10); + if (csk->rcv_win > (M_RCV_BUFSIZ << 10)) + csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10); cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); @@ -988,6 +988,8 @@ static int init_act_open(struct cxgbi_sock *csk) goto rel_resource; skb->sk = (struct sock *)csk; set_arp_failure_handler(skb, act_open_arp_failure); + csk->snd_win = cxgb3i_snd_win; + csk->rcv_win = cxgb3i_rcv_win; csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; csk->wr_una_cred = 0; @@ -1320,8 +1322,6 @@ static void cxgb3i_dev_open(struct t3cdev *t3dev) cdev->nports = adapter->params.nports; cdev->mtus = adapter->params.mtus; cdev->nmtus = NMTUS; - cdev->snd_win = cxgb3i_snd_win; - cdev->rcv_win = cxgb3i_rcv_win; cdev->rx_credit_thres = cxgb3i_rx_credit_thres; cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h index 20593fd69d8f..b0430c9359e7 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h @@ -1,7 +1,7 @@ /* * cxgb3i.h: Chelsio S3xx iSCSI driver. * - * Copyright (c) 2008 Chelsio Communications, Inc. + * Copyright (c) 2008-2015 Chelsio Communications, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index dd00e5fe4a5e..de6feb8964c9 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1,7 +1,7 @@ /* * cxgb4i.c: Chelsio T4 iSCSI driver. * - * Copyright (c) 2010 Chelsio Communications, Inc. + * Copyright (c) 2010-2015 Chelsio Communications, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -36,11 +36,12 @@ static unsigned int dbg_level; #define DRV_MODULE_NAME "cxgb4i" #define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" -#define DRV_MODULE_VERSION "0.9.4" +#define DRV_MODULE_VERSION "0.9.5-ko" +#define DRV_MODULE_RELDATE "Apr. 2015" static char version[] = DRV_MODULE_DESC " " DRV_MODULE_NAME - " v" DRV_MODULE_VERSION "\n"; + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Chelsio Communications, Inc."); MODULE_DESCRIPTION(DRV_MODULE_DESC); @@ -50,11 +51,13 @@ MODULE_LICENSE("GPL"); module_param(dbg_level, uint, 0644); MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); -static int cxgb4i_rcv_win = 256 * 1024; +#define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024) +static int cxgb4i_rcv_win = -1; module_param(cxgb4i_rcv_win, int, 0644); MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes"); -static int cxgb4i_snd_win = 128 * 1024; +#define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024) +static int cxgb4i_snd_win = -1; module_param(cxgb4i_snd_win, int, 0644); MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); @@ -196,10 +199,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, TX_CHAN_V(csk->tx_chan) | SMAC_SEL_V(csk->smac_idx) | ULP_MODE_V(ULP_MODE_ISCSI) | - RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); + RCV_BUFSIZ_V(csk->rcv_win >> 10); + opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | - (RX_FC_DISABLE_F) | RSS_QUEUE_V(csk->rss_qid); if (is_t4(lldi->adapter_type)) { @@ -228,6 +231,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, } else { struct cpl_t5_act_open_req *req = (struct cpl_t5_act_open_req *)skb->head; + u32 isn = (prandom_u32() & ~7UL) - 1; INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, @@ -241,7 +245,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, cxgb4_select_ntuple( csk->cdev->ports[csk->port_id], csk->l2t))); - opt2 |= 1 << 31; + req->rsvd = cpu_to_be32(isn); + opt2 |= T5_ISS_VALID; + opt2 |= T5_OPT_2_VALID_F; + req->opt2 = cpu_to_be32(opt2); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, @@ -279,7 +286,7 @@ static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, TX_CHAN_V(csk->tx_chan) | SMAC_SEL_V(csk->smac_idx) | ULP_MODE_V(ULP_MODE_ISCSI) | - RCV_BUFSIZ_V(cxgb4i_rcv_win >> 10); + RCV_BUFSIZ_V(csk->rcv_win >> 10); opt2 = RX_CHANNEL_V(0) | RSS_QUEUE_VALID_F | @@ -544,7 +551,7 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; flowc->mnemval[5].val = htonl(csk->rcv_nxt); flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; - flowc->mnemval[6].val = htonl(cxgb4i_snd_win); + flowc->mnemval[6].val = htonl(csk->snd_win); flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; flowc->mnemval[7].val = htonl(csk->advmss); flowc->mnemval[8].mnemonic = 0; @@ -557,7 +564,7 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, - csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win, + csk->snd_nxt, csk->rcv_nxt, csk->snd_win, csk->advmss); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); @@ -750,8 +757,8 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't * pass through opt0. */ - if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) - csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); + if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) + csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; if (TCPOPT_TSTAMP_G(tcp_opt)) @@ -1367,6 +1374,8 @@ static int init_act_open(struct cxgbi_sock *csk) unsigned int step; unsigned int size, size6; int t4 = is_t4(lldi->adapter_type); + unsigned int linkspeed; + unsigned int rcv_winf, snd_winf; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", @@ -1440,6 +1449,21 @@ static int init_act_open(struct cxgbi_sock *csk) csk->txq_idx = cxgb4_port_idx(ndev) * step; step = lldi->nrxq / lldi->nchan; csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; + linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed; + csk->snd_win = cxgb4i_snd_win; + csk->rcv_win = cxgb4i_rcv_win; + if (cxgb4i_rcv_win <= 0) { + csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; + rcv_winf = linkspeed / SPEED_10000; + if (rcv_winf) + csk->rcv_win *= rcv_winf; + } + if (cxgb4i_snd_win <= 0) { + csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; + snd_winf = linkspeed / SPEED_10000; + if (snd_winf) + csk->snd_win *= snd_winf; + } csk->wr_cred = lldi->wr_cred - DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); csk->wr_max_cred = csk->wr_cred; @@ -1758,8 +1782,6 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi) cdev->nports = lldi->nports; cdev->mtus = lldi->mtus; cdev->nmtus = NMTUS; - cdev->snd_win = cxgb4i_snd_win; - cdev->rcv_win = cxgb4i_rcv_win; cdev->rx_credit_thres = cxgb4i_rx_credit_thres; cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h index 1096026ba241..22dd8d670e4a 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h @@ -1,7 +1,7 @@ /* * cxgb4i.h: Chelsio T4 iSCSI driver. * - * Copyright (c) 2010 Chelsio Communications, Inc. + * Copyright (c) 2010-2015 Chelsio Communications, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -23,6 +23,8 @@ #define CXGB4I_TX_HEADER_LEN \ (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) +#define T5_ISS_VALID (1 << 18) + struct ulptx_idata { __be32 cmd_more; __be32 len; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index eb58afcfb73b..1d42e4f88b96 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -1,7 +1,7 @@ /* * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. * - * Copyright (c) 2010 Chelsio Communications, Inc. + * Copyright (c) 2010-2015 Chelsio Communications, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -38,8 +38,12 @@ static unsigned int dbg_level; #define DRV_MODULE_NAME "libcxgbi" #define DRV_MODULE_DESC "Chelsio iSCSI driver library" -#define DRV_MODULE_VERSION "0.9.0" -#define DRV_MODULE_RELDATE "Jun. 2010" +#define DRV_MODULE_VERSION "0.9.1-ko" +#define DRV_MODULE_RELDATE "Apr. 2015" + +static char version[] = + DRV_MODULE_DESC " " DRV_MODULE_NAME + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Chelsio Communications, Inc."); MODULE_DESCRIPTION(DRV_MODULE_DESC); @@ -1126,11 +1130,11 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) goto out_err; } - if (csk->write_seq - csk->snd_una >= cdev->snd_win) { + if (csk->write_seq - csk->snd_una >= csk->snd_win) { log_debug(1 << CXGBI_DBG_PDU_TX, "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", csk, csk->state, csk->flags, csk->tid, csk->write_seq, - csk->snd_una, cdev->snd_win); + csk->snd_una, csk->snd_win); err = -ENOBUFS; goto out_err; } @@ -1885,7 +1889,7 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n", csk, csk->state, csk->flags, csk->tid, csk->copied_seq, csk->rcv_wup, cdev->rx_credit_thres, - cdev->rcv_win); + csk->rcv_win); if (csk->state != CTP_ESTABLISHED) return; @@ -1896,7 +1900,7 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) if (unlikely(cdev->rx_credit_thres == 0)) return; - must_send = credits + 16384 >= cdev->rcv_win; + must_send = credits + 16384 >= csk->rcv_win; if (must_send || credits >= cdev->rx_credit_thres) csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); } @@ -2913,6 +2917,8 @@ static int __init libcxgbi_init_module(void) sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; + pr_info("%s", version); + pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", ISCSI_ITT_MASK, sw_tag_idx_bits, ISCSI_AGE_MASK, sw_tag_age_bits); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index aba1af720df6..b3e5bd1d5d9c 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -1,7 +1,7 @@ /* * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver. * - * Copyright (c) 2010 Chelsio Communications, Inc. + * Copyright (c) 2010-2015 Chelsio Communications, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -234,6 +234,8 @@ struct cxgbi_sock { u32 snd_nxt; u32 snd_una; u32 write_seq; + u32 snd_win; + u32 rcv_win; }; /* @@ -540,8 +542,6 @@ struct cxgbi_device { struct iscsi_transport *itp; unsigned int pfvf; - unsigned int snd_win; - unsigned int rcv_win; unsigned int rx_credit_thres; unsigned int skb_tx_rsvd; unsigned int skb_rx_extra; /* for msg coalesced mode */ diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 2806cfbec2b9..f35ed53adaac 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -3562,7 +3562,6 @@ static struct scsi_host_template driver_template = { .slave_configure = adpt_slave_configure, .can_queue = MAX_TO_IOP_MESSAGES, .this_id = 7, - .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c index fff682976c56..eefe14d453db 100644 --- a/drivers/scsi/fdomain.c +++ b/drivers/scsi/fdomain.c @@ -1764,7 +1764,6 @@ struct scsi_host_template fdomain_driver_template = { .can_queue = 1, .this_id = 6, .sg_tablesize = 64, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index 5980c10c734d..d6498fabe628 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/debugfs.h> +#include <linux/vmalloc.h> #include "fnic.h" static struct dentry *fnic_trace_debugfs_root; diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index 65a9bde26974..4e15c4bf0795 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -21,6 +21,7 @@ #include <linux/spinlock.h> #include <linux/kallsyms.h> #include <linux/time.h> +#include <linux/vmalloc.h> #include "fnic_io.h" #include "fnic.h" diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 8eab107b53fb..1dafeb43333b 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -43,6 +43,8 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_dbg.h> #include <linux/cciss_ioctl.h> #include <linux/string.h> #include <linux/bitmap.h> @@ -56,7 +58,7 @@ #include "hpsa.h" /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ -#define HPSA_DRIVER_VERSION "3.4.4-1" +#define HPSA_DRIVER_VERSION "3.4.10-0" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" @@ -129,6 +131,7 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, + {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, @@ -186,6 +189,7 @@ static struct board_type products[] = { {0x21CC103C, "Smart Array", &SA5_access}, {0x21CD103C, "Smart Array", &SA5_access}, {0x21CE103C, "Smart HBA", &SA5_access}, + {0x05809005, "SmartHBA-SA", &SA5_access}, {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, @@ -194,6 +198,10 @@ static struct board_type products[] = { {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; +#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) +static const struct scsi_cmnd hpsa_cmd_busy; +#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) +static const struct scsi_cmnd hpsa_cmd_idle; static int number_of_controllers; static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); @@ -207,6 +215,9 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, static void cmd_free(struct ctlr_info *h, struct CommandList *c); static struct CommandList *cmd_alloc(struct ctlr_info *h); +static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); +static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, + struct scsi_cmnd *scmd); static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, int cmd_type); @@ -222,6 +233,7 @@ static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); static int hpsa_slave_alloc(struct scsi_device *sdev); +static int hpsa_slave_configure(struct scsi_device *sdev); static void hpsa_slave_destroy(struct scsi_device *sdev); static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); @@ -232,7 +244,8 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, int min_blocks, u32 *bucket_map); -static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); +static void hpsa_free_performant_mode(struct ctlr_info *h); +static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); static inline u32 next_command(struct ctlr_info *h, u8 q); static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, @@ -252,6 +265,8 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); static void hpsa_command_resubmit_worker(struct work_struct *work); +static u32 lockup_detected(struct ctlr_info *h); +static int detect_controller_lockup(struct ctlr_info *h); static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) { @@ -265,40 +280,86 @@ static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) return (struct ctlr_info *) *priv; } +static inline bool hpsa_is_cmd_idle(struct CommandList *c) +{ + return c->scsi_cmd == SCSI_CMD_IDLE; +} + +static inline bool hpsa_is_pending_event(struct CommandList *c) +{ + return c->abort_pending || c->reset_pending; +} + +/* extract sense key, asc, and ascq from sense data. -1 means invalid. */ +static void decode_sense_data(const u8 *sense_data, int sense_data_len, + u8 *sense_key, u8 *asc, u8 *ascq) +{ + struct scsi_sense_hdr sshdr; + bool rc; + + *sense_key = -1; + *asc = -1; + *ascq = -1; + + if (sense_data_len < 1) + return; + + rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr); + if (rc) { + *sense_key = sshdr.sense_key; + *asc = sshdr.asc; + *ascq = sshdr.ascq; + } +} + static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c) { - if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) + u8 sense_key, asc, ascq; + int sense_len; + + if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) + sense_len = sizeof(c->err_info->SenseInfo); + else + sense_len = c->err_info->SenseLen; + + decode_sense_data(c->err_info->SenseInfo, sense_len, + &sense_key, &asc, &ascq); + if (sense_key != UNIT_ATTENTION || asc == -1) return 0; - switch (c->err_info->SenseInfo[12]) { + switch (asc) { case STATE_CHANGED: - dev_warn(&h->pdev->dev, HPSA "%d: a state change " - "detected, command retried\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: a state change detected, command retried\n", + h->devname); break; case LUN_FAILED: dev_warn(&h->pdev->dev, - HPSA "%d: LUN failure detected\n", h->ctlr); + "%s: LUN failure detected\n", h->devname); break; case REPORT_LUNS_CHANGED: dev_warn(&h->pdev->dev, - HPSA "%d: report LUN data changed\n", h->ctlr); + "%s: report LUN data changed\n", h->devname); /* * Note: this REPORT_LUNS_CHANGED condition only occurs on the external * target (array) devices. */ break; case POWER_OR_RESET: - dev_warn(&h->pdev->dev, HPSA "%d: a power on " - "or device reset detected\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: a power on or device reset detected\n", + h->devname); break; case UNIT_ATTENTION_CLEARED: - dev_warn(&h->pdev->dev, HPSA "%d: unit attention " - "cleared by another initiator\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: unit attention cleared by another initiator\n", + h->devname); break; default: - dev_warn(&h->pdev->dev, HPSA "%d: unknown " - "unit attention detected\n", h->ctlr); + dev_warn(&h->pdev->dev, + "%s: unknown unit attention detected\n", + h->devname); break; } return 1; @@ -314,6 +375,20 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c) return 1; } +static u32 lockup_detected(struct ctlr_info *h); +static ssize_t host_show_lockup_detected(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ld; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + ld = lockup_detected(h); + + return sprintf(buf, "ld=%d\n", ld); +} + static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -425,7 +500,7 @@ static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, /* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ - 0x324b103C, /* SmartArray P711m */ + 0x324b103C, /* Smart Array P711m */ 0x3223103C, /* Smart Array P800 */ 0x3234103C, /* Smart Array P400 */ 0x3235103C, /* Smart Array P400i */ @@ -467,24 +542,32 @@ static u32 soft_unresettable_controller[] = { 0x409D0E11, /* Smart Array 6400 EM */ }; -static int ctlr_is_hard_resettable(u32 board_id) +static u32 needs_abort_tags_swizzled[] = { + 0x323D103C, /* Smart Array P700m */ + 0x324a103C, /* Smart Array P712m */ + 0x324b103C, /* SmartArray P711m */ +}; + +static int board_id_in_array(u32 a[], int nelems, u32 board_id) { int i; - for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) - if (unresettable_controller[i] == board_id) - return 0; - return 1; + for (i = 0; i < nelems; i++) + if (a[i] == board_id) + return 1; + return 0; } -static int ctlr_is_soft_resettable(u32 board_id) +static int ctlr_is_hard_resettable(u32 board_id) { - int i; + return !board_id_in_array(unresettable_controller, + ARRAY_SIZE(unresettable_controller), board_id); +} - for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) - if (soft_unresettable_controller[i] == board_id) - return 0; - return 1; +static int ctlr_is_soft_resettable(u32 board_id) +{ + return !board_id_in_array(soft_unresettable_controller, + ARRAY_SIZE(soft_unresettable_controller), board_id); } static int ctlr_is_resettable(u32 board_id) @@ -493,6 +576,12 @@ static int ctlr_is_resettable(u32 board_id) ctlr_is_soft_resettable(board_id); } +static int ctlr_needs_abort_tags_swizzled(u32 board_id) +{ + return board_id_in_array(needs_abort_tags_swizzled, + ARRAY_SIZE(needs_abort_tags_swizzled), board_id); +} + static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { @@ -647,12 +736,15 @@ static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL); static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); +static DEVICE_ATTR(lockup_detected, S_IRUGO, + host_show_lockup_detected, NULL); static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, &dev_attr_lunid, &dev_attr_unique_id, &dev_attr_hp_ssd_smart_path_enabled, + &dev_attr_lockup_detected, NULL, }; @@ -667,6 +759,9 @@ static struct device_attribute *hpsa_shost_attrs[] = { NULL, }; +#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \ + HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS) + static struct scsi_host_template hpsa_driver_template = { .module = THIS_MODULE, .name = HPSA, @@ -681,6 +776,7 @@ static struct scsi_host_template hpsa_driver_template = { .eh_device_reset_handler = hpsa_eh_device_reset_handler, .ioctl = hpsa_ioctl, .slave_alloc = hpsa_slave_alloc, + .slave_configure = hpsa_slave_configure, .slave_destroy = hpsa_slave_destroy, #ifdef CONFIG_COMPAT .compat_ioctl = hpsa_compat_ioctl, @@ -743,30 +839,43 @@ static inline u32 next_command(struct ctlr_info *h, u8 q) * a separate special register for submitting commands. */ -/* set_performant_mode: Modify the tag for cciss performant +/* + * set_performant_mode: Modify the tag for cciss performant * set bit 0 for pull model, bits 3-1 for block fetch * register number */ -static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) +#define DEFAULT_REPLY_QUEUE (-1) +static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, + int reply_queue) { if (likely(h->transMethod & CFGTBL_Trans_Performant)) { c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); - if (likely(h->msix_vector > 0)) + if (unlikely(!h->msix_vector)) + return; + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) c->Header.ReplyQueue = raw_smp_processor_id() % h->nreply_queues; + else + c->Header.ReplyQueue = reply_queue % h->nreply_queues; } } static void set_ioaccel1_performant_mode(struct ctlr_info *h, - struct CommandList *c) + struct CommandList *c, + int reply_queue) { struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; - /* Tell the controller to post the reply to the queue for this + /* + * Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - cp->ReplyQueue = smp_processor_id() % h->nreply_queues; - /* Set the bits in the address sent down to include: + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) + cp->ReplyQueue = smp_processor_id() % h->nreply_queues; + else + cp->ReplyQueue = reply_queue % h->nreply_queues; + /* + * Set the bits in the address sent down to include: * - performant mode bit (bit 0) * - pull count (bits 1-3) * - command type (bits 4-6) @@ -775,20 +884,48 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h, IOACCEL1_BUSADDR_CMDTYPE; } -static void set_ioaccel2_performant_mode(struct ctlr_info *h, - struct CommandList *c) +static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) { - struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) + &h->ioaccel2_cmd_pool[c->cmdindex]; /* Tell the controller to post the reply to the queue for this * processor. This seems to give the best I/O throughput. */ - cp->reply_queue = smp_processor_id() % h->nreply_queues; + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) + cp->reply_queue = smp_processor_id() % h->nreply_queues; + else + cp->reply_queue = reply_queue % h->nreply_queues; /* Set the bits in the address sent down to include: * - performant mode bit not used in ioaccel mode 2 * - pull count (bits 0-3) * - command type isn't needed for ioaccel2 */ + c->busaddr |= h->ioaccel2_blockFetchTable[0]; +} + +static void set_ioaccel2_performant_mode(struct ctlr_info *h, + struct CommandList *c, + int reply_queue) +{ + struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + + /* + * Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) + cp->reply_queue = smp_processor_id() % h->nreply_queues; + else + cp->reply_queue = reply_queue % h->nreply_queues; + /* + * Set the bits in the address sent down to include: + * - performant mode bit not used in ioaccel mode 2 + * - pull count (bits 0-3) + * - command type isn't needed for ioaccel2 + */ c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); } @@ -821,26 +958,38 @@ static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; } -static void enqueue_cmd_and_start_io(struct ctlr_info *h, - struct CommandList *c) +static void __enqueue_cmd_and_start_io(struct ctlr_info *h, + struct CommandList *c, int reply_queue) { dial_down_lockup_detection_during_fw_flash(h, c); atomic_inc(&h->commands_outstanding); switch (c->cmd_type) { case CMD_IOACCEL1: - set_ioaccel1_performant_mode(h, c); + set_ioaccel1_performant_mode(h, c, reply_queue); writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); break; case CMD_IOACCEL2: - set_ioaccel2_performant_mode(h, c); + set_ioaccel2_performant_mode(h, c, reply_queue); + writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); + break; + case IOACCEL2_TMF: + set_ioaccel2_tmf_performant_mode(h, c, reply_queue); writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); break; default: - set_performant_mode(h, c); + set_performant_mode(h, c, reply_queue); h->access.submit_command(h, c); } } +static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) +{ + if (unlikely(hpsa_is_pending_event(c))) + return finish_cmd(c); + + __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); +} + static inline int is_hba_lunid(unsigned char scsi3addr[]) { return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; @@ -881,6 +1030,23 @@ static int hpsa_find_target_lun(struct ctlr_info *h, return !found; } +static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev, char *description) +{ + dev_printk(level, &h->pdev->dev, + "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n", + h->scsi_host->host_no, dev->bus, dev->target, dev->lun, + description, + scsi_device_type(dev->devtype), + dev->vendor, + dev->model, + dev->raid_level > RAID_UNKNOWN ? + "RAID-?" : raid_label[dev->raid_level], + dev->offload_config ? '+' : '-', + dev->offload_enabled ? '+' : '-', + dev->expose_state); +} + /* Add an entry into h->dev[] array. */ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, struct hpsa_scsi_dev_t *device, @@ -948,15 +1114,10 @@ lun_assigned: h->ndevices++; added[*nadded] = device; (*nadded)++; - - /* initially, (before registering with scsi layer) we don't - * know our hostno and we don't want to print anything first - * time anyway (the scsi layer's inquiries will show that info) - */ - /* if (hostno != -1) */ - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", - scsi_device_type(device->devtype), hostno, - device->bus, device->target, device->lun); + hpsa_show_dev_msg(KERN_INFO, h, device, + device->expose_state & HPSA_SCSI_ADD ? "added" : "masked"); + device->offload_to_be_enabled = device->offload_enabled; + device->offload_enabled = 0; return 0; } @@ -964,6 +1125,7 @@ lun_assigned: static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, int entry, struct hpsa_scsi_dev_t *new_entry) { + int offload_enabled; /* assumes h->devlock is held */ BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); @@ -982,16 +1144,29 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, */ h->dev[entry]->raid_map = new_entry->raid_map; h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; - wmb(); /* ensure raid map updated prior to ->offload_enabled */ } + if (new_entry->hba_ioaccel_enabled) { + h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; + wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ + } + h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; h->dev[entry]->offload_config = new_entry->offload_config; h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; - h->dev[entry]->offload_enabled = new_entry->offload_enabled; h->dev[entry]->queue_depth = new_entry->queue_depth; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", - scsi_device_type(new_entry->devtype), hostno, new_entry->bus, - new_entry->target, new_entry->lun); + /* + * We can turn off ioaccel offload now, but need to delay turning + * it on until we can update h->dev[entry]->phys_disk[], but we + * can't do that until all the devices are updated. + */ + h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled; + if (!new_entry->offload_enabled) + h->dev[entry]->offload_enabled = 0; + + offload_enabled = h->dev[entry]->offload_enabled; + h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled; + hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); + h->dev[entry]->offload_enabled = offload_enabled; } /* Replace an entry from h->dev[] array. */ @@ -1017,9 +1192,9 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, h->dev[entry] = new_entry; added[*nadded] = new_entry; (*nadded)++; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", - scsi_device_type(new_entry->devtype), hostno, new_entry->bus, - new_entry->target, new_entry->lun); + hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); + new_entry->offload_to_be_enabled = new_entry->offload_enabled; + new_entry->offload_enabled = 0; } /* Remove an entry from h->dev[] array. */ @@ -1039,9 +1214,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, for (i = entry; i < h->ndevices-1; i++) h->dev[i] = h->dev[i+1]; h->ndevices--; - dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", - scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, - sd->lun); + hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); } #define SCSI3ADDR_EQ(a, b) ( \ @@ -1283,6 +1456,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) nraid_map_entries = RAID_MAP_MAX_ENTRIES; + logical_drive->nphysical_disks = nraid_map_entries; + qdepth = 0; for (i = 0; i < nraid_map_entries; i++) { logical_drive->phys_disk[i] = NULL; @@ -1312,7 +1487,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, */ if (!logical_drive->phys_disk[i]) { logical_drive->offload_enabled = 0; - logical_drive->queue_depth = h->nr_cmds; + logical_drive->offload_to_be_enabled = 0; + logical_drive->queue_depth = 8; } } if (nraid_map_entries) @@ -1335,6 +1511,16 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, continue; if (!is_logical_dev_addr_mode(dev[i]->scsi3addr)) continue; + + /* + * If offload is currently enabled, the RAID map and + * phys_disk[] assignment *better* not be changing + * and since it isn't changing, we do not need to + * update it. + */ + if (dev[i]->offload_enabled) + continue; + hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); } } @@ -1411,9 +1597,7 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, */ if (sd[i]->volume_offline) { hpsa_show_volume_status(h, sd[i]); - dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", - h->scsi_host->host_no, - sd[i]->bus, sd[i]->target, sd[i]->lun); + hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); continue; } @@ -1433,6 +1617,14 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, /* but if it does happen, we just ignore that device */ } } + hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); + + /* Now that h->dev[]->phys_disk[] is coherent, we can enable + * any logical drives that need it enabled. + */ + for (i = 0; i < h->ndevices; i++) + h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; + spin_unlock_irqrestore(&h->devlock, flags); /* Monitor devices which are in one of several NOT READY states to be @@ -1456,20 +1648,22 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, sh = h->scsi_host; /* Notify scsi mid layer of any removed devices */ for (i = 0; i < nremoved; i++) { - struct scsi_device *sdev = - scsi_device_lookup(sh, removed[i]->bus, - removed[i]->target, removed[i]->lun); - if (sdev != NULL) { - scsi_remove_device(sdev); - scsi_device_put(sdev); - } else { - /* We don't expect to get here. - * future cmds to this device will get selection - * timeout as if the device was gone. - */ - dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " - " for removal.", hostno, removed[i]->bus, - removed[i]->target, removed[i]->lun); + if (removed[i]->expose_state & HPSA_SCSI_ADD) { + struct scsi_device *sdev = + scsi_device_lookup(sh, removed[i]->bus, + removed[i]->target, removed[i]->lun); + if (sdev != NULL) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + /* + * We don't expect to get here. + * future cmds to this device will get selection + * timeout as if the device was gone. + */ + hpsa_show_dev_msg(KERN_WARNING, h, removed[i], + "didn't find device for removal."); + } } kfree(removed[i]); removed[i] = NULL; @@ -1477,16 +1671,18 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, /* Notify scsi mid layer of any added devices */ for (i = 0; i < nadded; i++) { + if (!(added[i]->expose_state & HPSA_SCSI_ADD)) + continue; if (scsi_add_device(sh, added[i]->bus, added[i]->target, added[i]->lun) == 0) continue; - dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " - "device not added.\n", hostno, added[i]->bus, - added[i]->target, added[i]->lun); + hpsa_show_dev_msg(KERN_WARNING, h, added[i], + "addition failed, device not added."); /* now we have to remove it from h->dev, * since it didn't get added to scsi mid layer */ fixup_botched_add(h, added[i]); + added[i] = NULL; } free_and_out: @@ -1512,7 +1708,6 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, return NULL; } -/* link sdev->hostdata to our per-device structure. */ static int hpsa_slave_alloc(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *sd; @@ -1523,21 +1718,80 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) spin_lock_irqsave(&h->devlock, flags); sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), sdev_id(sdev), sdev->lun); - if (sd != NULL) { - sdev->hostdata = sd; - if (sd->queue_depth) - scsi_change_queue_depth(sdev, sd->queue_depth); + if (likely(sd)) { atomic_set(&sd->ioaccel_cmds_out, 0); - } + sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL; + } else + sdev->hostdata = NULL; spin_unlock_irqrestore(&h->devlock, flags); return 0; } +/* configure scsi device based on internal per-device structure */ +static int hpsa_slave_configure(struct scsi_device *sdev) +{ + struct hpsa_scsi_dev_t *sd; + int queue_depth; + + sd = sdev->hostdata; + sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH); + + if (sd) + queue_depth = sd->queue_depth != 0 ? + sd->queue_depth : sdev->host->can_queue; + else + queue_depth = sdev->host->can_queue; + + scsi_change_queue_depth(sdev, queue_depth); + + return 0; +} + static void hpsa_slave_destroy(struct scsi_device *sdev) { /* nothing to do. */ } +static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (!h->ioaccel2_cmd_sg_list) + return; + for (i = 0; i < h->nr_cmds; i++) { + kfree(h->ioaccel2_cmd_sg_list[i]); + h->ioaccel2_cmd_sg_list[i] = NULL; + } + kfree(h->ioaccel2_cmd_sg_list); + h->ioaccel2_cmd_sg_list = NULL; +} + +static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (h->chainsize <= 0) + return 0; + + h->ioaccel2_cmd_sg_list = + kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds, + GFP_KERNEL); + if (!h->ioaccel2_cmd_sg_list) + return -ENOMEM; + for (i = 0; i < h->nr_cmds; i++) { + h->ioaccel2_cmd_sg_list[i] = + kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) * + h->maxsgentries, GFP_KERNEL); + if (!h->ioaccel2_cmd_sg_list[i]) + goto clean; + } + return 0; + +clean: + hpsa_free_ioaccel2_sg_chain_blocks(h); + return -ENOMEM; +} + static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) { int i; @@ -1552,7 +1806,7 @@ static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) h->cmd_sg_list = NULL; } -static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) +static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) { int i; @@ -1580,6 +1834,39 @@ clean: return -ENOMEM; } +static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, + struct io_accel2_cmd *cp, struct CommandList *c) +{ + struct ioaccel2_sg_element *chain_block; + u64 temp64; + u32 chain_size; + + chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; + chain_size = le32_to_cpu(cp->data_len); + temp64 = pci_map_single(h->pdev, chain_block, chain_size, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&h->pdev->dev, temp64)) { + /* prevent subsequent unmapping */ + cp->sg->address = 0; + return -1; + } + cp->sg->address = cpu_to_le64(temp64); + return 0; +} + +static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, + struct io_accel2_cmd *cp) +{ + struct ioaccel2_sg_element *chain_sg; + u64 temp64; + u32 chain_size; + + chain_sg = cp->sg; + temp64 = le64_to_cpu(chain_sg->address); + chain_size = le32_to_cpu(cp->data_len); + pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); +} + static int hpsa_map_sg_chain_block(struct ctlr_info *h, struct CommandList *c) { @@ -1629,6 +1916,7 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, { int data_len; int retry = 0; + u32 ioaccel2_resid = 0; switch (c2->error_data.serv_response) { case IOACCEL2_SERV_RESPONSE_COMPLETE: @@ -1636,9 +1924,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: break; case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: - dev_warn(&h->pdev->dev, - "%s: task complete with check condition.\n", - "HP SSD Smart Path"); cmd->result |= SAM_STAT_CHECK_CONDITION; if (c2->error_data.data_present != IOACCEL2_SENSE_DATA_PRESENT) { @@ -1658,58 +1943,56 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: - dev_warn(&h->pdev->dev, - "%s: task complete with BUSY status.\n", - "HP SSD Smart Path"); retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: - dev_warn(&h->pdev->dev, - "%s: task complete with reservation conflict.\n", - "HP SSD Smart Path"); retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: - /* Make scsi midlayer do unlimited retries */ - cmd->result = DID_IMM_RETRY << 16; + retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: - dev_warn(&h->pdev->dev, - "%s: task complete with aborted status.\n", - "HP SSD Smart Path"); retry = 1; break; default: - dev_warn(&h->pdev->dev, - "%s: task complete with unrecognized status: 0x%02x\n", - "HP SSD Smart Path", c2->error_data.status); retry = 1; break; } break; case IOACCEL2_SERV_RESPONSE_FAILURE: - /* don't expect to get here. */ - dev_warn(&h->pdev->dev, - "unexpected delivery or target failure, status = 0x%02x\n", - c2->error_data.status); - retry = 1; + switch (c2->error_data.status) { + case IOACCEL2_STATUS_SR_IO_ERROR: + case IOACCEL2_STATUS_SR_IO_ABORTED: + case IOACCEL2_STATUS_SR_OVERRUN: + retry = 1; + break; + case IOACCEL2_STATUS_SR_UNDERRUN: + cmd->result = (DID_OK << 16); /* host byte */ + cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ + ioaccel2_resid = get_unaligned_le32( + &c2->error_data.resid_cnt[0]); + scsi_set_resid(cmd, ioaccel2_resid); + break; + case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: + case IOACCEL2_STATUS_SR_INVALID_DEVICE: + case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: + /* We will get an event from ctlr to trigger rescan */ + retry = 1; + break; + default: + retry = 1; + } break; case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: break; case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: break; case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: - dev_warn(&h->pdev->dev, "task management function rejected.\n"); retry = 1; break; case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: - dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); break; default: - dev_warn(&h->pdev->dev, - "%s: Unrecognized server response: 0x%02x\n", - "HP SSD Smart Path", - c2->error_data.serv_response); retry = 1; break; } @@ -1717,6 +2000,87 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, return retry; /* retry on raid path? */ } +static void hpsa_cmd_resolve_events(struct ctlr_info *h, + struct CommandList *c) +{ + bool do_wake = false; + + /* + * Prevent the following race in the abort handler: + * + * 1. LLD is requested to abort a SCSI command + * 2. The SCSI command completes + * 3. The struct CommandList associated with step 2 is made available + * 4. New I/O request to LLD to another LUN re-uses struct CommandList + * 5. Abort handler follows scsi_cmnd->host_scribble and + * finds struct CommandList and tries to aborts it + * Now we have aborted the wrong command. + * + * Reset c->scsi_cmd here so that the abort or reset handler will know + * this command has completed. Then, check to see if the handler is + * waiting for this command, and, if so, wake it. + */ + c->scsi_cmd = SCSI_CMD_IDLE; + mb(); /* Declare command idle before checking for pending events. */ + if (c->abort_pending) { + do_wake = true; + c->abort_pending = false; + } + if (c->reset_pending) { + unsigned long flags; + struct hpsa_scsi_dev_t *dev; + + /* + * There appears to be a reset pending; lock the lock and + * reconfirm. If so, then decrement the count of outstanding + * commands and wake the reset command if this is the last one. + */ + spin_lock_irqsave(&h->lock, flags); + dev = c->reset_pending; /* Re-fetch under the lock. */ + if (dev && atomic_dec_and_test(&dev->reset_cmds_out)) + do_wake = true; + c->reset_pending = NULL; + spin_unlock_irqrestore(&h->lock, flags); + } + + if (do_wake) + wake_up_all(&h->event_sync_wait_queue); +} + +static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, + struct CommandList *c) +{ + hpsa_cmd_resolve_events(h, c); + cmd_tagged_free(h, c); +} + +static void hpsa_cmd_free_and_done(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd) +{ + hpsa_cmd_resolve_and_free(h, c); + cmd->scsi_done(cmd); +} + +static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) +{ + INIT_WORK(&c->work, hpsa_command_resubmit_worker); + queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); +} + +static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd) +{ + cmd->result = DID_ABORT << 16; +} + +static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c, + struct scsi_cmnd *cmd) +{ + hpsa_set_scsi_cmd_aborted(cmd); + dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", + c->Request.CDB, c->err_info->ScsiStatus); + hpsa_cmd_resolve_and_free(h, c); +} + static void process_ioaccel2_completion(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, struct hpsa_scsi_dev_t *dev) @@ -1725,13 +2089,11 @@ static void process_ioaccel2_completion(struct ctlr_info *h, /* check for good status */ if (likely(c2->error_data.serv_response == 0 && - c2->error_data.status == 0)) { - cmd_free(h, c); - cmd->scsi_done(cmd); - return; - } + c2->error_data.status == 0)) + return hpsa_cmd_free_and_done(h, c, cmd); - /* Any RAID offload error results in retry which will use + /* + * Any RAID offload error results in retry which will use * the normal I/O path so the controller can handle whatever's * wrong. */ @@ -1741,19 +2103,42 @@ static void process_ioaccel2_completion(struct ctlr_info *h, if (c2->error_data.status == IOACCEL2_STATUS_SR_IOACCEL_DISABLED) dev->offload_enabled = 0; - goto retry_cmd; + + return hpsa_retry_cmd(h, c); } if (handle_ioaccel_mode2_error(h, c, cmd, c2)) - goto retry_cmd; + return hpsa_retry_cmd(h, c); - cmd_free(h, c); - cmd->scsi_done(cmd); - return; + return hpsa_cmd_free_and_done(h, c, cmd); +} -retry_cmd: - INIT_WORK(&c->work, hpsa_command_resubmit_worker); - queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); +/* Returns 0 on success, < 0 otherwise. */ +static int hpsa_evaluate_tmf_status(struct ctlr_info *h, + struct CommandList *cp) +{ + u8 tmf_status = cp->err_info->ScsiStatus; + + switch (tmf_status) { + case CISS_TMF_COMPLETE: + /* + * CISS_TMF_COMPLETE never happens, instead, + * ei->CommandStatus == 0 for this case. + */ + case CISS_TMF_SUCCESS: + return 0; + case CISS_TMF_INVALID_FRAME: + case CISS_TMF_NOT_SUPPORTED: + case CISS_TMF_FAILED: + case CISS_TMF_WRONG_LUN: + case CISS_TMF_OVERLAPPED_TAG: + break; + default: + dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", + tmf_status); + break; + } + return -tmf_status; } static void complete_scsi_command(struct CommandList *cp) @@ -1762,51 +2147,58 @@ static void complete_scsi_command(struct CommandList *cp) struct ctlr_info *h; struct ErrorInfo *ei; struct hpsa_scsi_dev_t *dev; + struct io_accel2_cmd *c2; - unsigned char sense_key; - unsigned char asc; /* additional sense code */ - unsigned char ascq; /* additional sense code qualifier */ + u8 sense_key; + u8 asc; /* additional sense code */ + u8 ascq; /* additional sense code qualifier */ unsigned long sense_data_size; ei = cp->err_info; cmd = cp->scsi_cmd; h = cp->h; dev = cmd->device->hostdata; + c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; scsi_dma_unmap(cmd); /* undo the DMA mappings */ if ((cp->cmd_type == CMD_SCSI) && (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) hpsa_unmap_sg_chain_block(h, cp); + if ((cp->cmd_type == CMD_IOACCEL2) && + (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) + hpsa_unmap_ioaccel2_sg_chain_block(h, c2); + cmd->result = (DID_OK << 16); /* host byte */ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) atomic_dec(&cp->phys_disk->ioaccel_cmds_out); - if (cp->cmd_type == CMD_IOACCEL2) - return process_ioaccel2_completion(h, cp, cmd, dev); - - cmd->result |= ei->ScsiStatus; + /* + * We check for lockup status here as it may be set for + * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by + * fail_all_oustanding_cmds() + */ + if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { + /* DID_NO_CONNECT will prevent a retry */ + cmd->result = DID_NO_CONNECT << 16; + return hpsa_cmd_free_and_done(h, cp, cmd); + } - scsi_set_resid(cmd, ei->ResidualCnt); - if (ei->CommandStatus == 0) { - if (cp->cmd_type == CMD_IOACCEL1) - atomic_dec(&cp->phys_disk->ioaccel_cmds_out); - cmd_free(h, cp); - cmd->scsi_done(cmd); - return; + if ((unlikely(hpsa_is_pending_event(cp)))) { + if (cp->reset_pending) + return hpsa_cmd_resolve_and_free(h, cp); + if (cp->abort_pending) + return hpsa_cmd_abort_and_free(h, cp, cmd); } - /* copy the sense data */ - if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) - sense_data_size = SCSI_SENSE_BUFFERSIZE; - else - sense_data_size = sizeof(ei->SenseInfo); - if (ei->SenseLen < sense_data_size) - sense_data_size = ei->SenseLen; + if (cp->cmd_type == CMD_IOACCEL2) + return process_ioaccel2_completion(h, cp, cmd, dev); - memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); + scsi_set_resid(cmd, ei->ResidualCnt); + if (ei->CommandStatus == 0) + return hpsa_cmd_free_and_done(h, cp, cmd); /* For I/O accelerator commands, copy over some fields to the normal * CISS header used below for error handling. @@ -1828,10 +2220,7 @@ static void complete_scsi_command(struct CommandList *cp) if (is_logical_dev_addr_mode(dev->scsi3addr)) { if (ei->CommandStatus == CMD_IOACCEL_DISABLED) dev->offload_enabled = 0; - INIT_WORK(&cp->work, hpsa_command_resubmit_worker); - queue_work_on(raw_smp_processor_id(), - h->resubmit_wq, &cp->work); - return; + return hpsa_retry_cmd(h, cp); } } @@ -1839,14 +2228,18 @@ static void complete_scsi_command(struct CommandList *cp) switch (ei->CommandStatus) { case CMD_TARGET_STATUS: - if (ei->ScsiStatus) { - /* Get sense key */ - sense_key = 0xf & ei->SenseInfo[2]; - /* Get additional sense code */ - asc = ei->SenseInfo[12]; - /* Get addition sense code qualifier */ - ascq = ei->SenseInfo[13]; - } + cmd->result |= ei->ScsiStatus; + /* copy the sense data */ + if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) + sense_data_size = SCSI_SENSE_BUFFERSIZE; + else + sense_data_size = sizeof(ei->SenseInfo); + if (ei->SenseLen < sense_data_size) + sense_data_size = ei->SenseLen; + memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); + if (ei->ScsiStatus) + decode_sense_data(ei->SenseInfo, sense_data_size, + &sense_key, &asc, &ascq); if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { if (sense_key == ABORTED_COMMAND) { cmd->result |= DID_SOFT_ERROR << 16; @@ -1918,10 +2311,8 @@ static void complete_scsi_command(struct CommandList *cp) cp->Request.CDB); break; case CMD_ABORTED: - cmd->result = DID_ABORT << 16; - dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", - cp->Request.CDB, ei->ScsiStatus); - break; + /* Return now to avoid calling scsi_done(). */ + return hpsa_cmd_abort_and_free(h, cp, cmd); case CMD_ABORT_FAILED: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", @@ -1941,6 +2332,10 @@ static void complete_scsi_command(struct CommandList *cp) cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "Command unabortable\n"); break; + case CMD_TMF_STATUS: + if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ + cmd->result = DID_ERROR << 16; + break; case CMD_IOACCEL_DISABLED: /* This only handles the direct pass-through case since RAID * offload is handled above. Just attempt a retry. @@ -1954,8 +2349,8 @@ static void complete_scsi_command(struct CommandList *cp) dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); } - cmd_free(h, cp); - cmd->scsi_done(cmd); + + return hpsa_cmd_free_and_done(h, cp, cmd); } static void hpsa_pci_unmap(struct pci_dev *pdev, @@ -1998,14 +2393,36 @@ static int hpsa_map_one(struct pci_dev *pdev, return 0; } -static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, - struct CommandList *c) +#define NO_TIMEOUT ((unsigned long) -1) +#define DEFAULT_TIMEOUT 30000 /* milliseconds */ +static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, + struct CommandList *c, int reply_queue, unsigned long timeout_msecs) { DECLARE_COMPLETION_ONSTACK(wait); c->waiting = &wait; - enqueue_cmd_and_start_io(h, c); - wait_for_completion(&wait); + __enqueue_cmd_and_start_io(h, c, reply_queue); + if (timeout_msecs == NO_TIMEOUT) { + /* TODO: get rid of this no-timeout thing */ + wait_for_completion_io(&wait); + return IO_OK; + } + if (!wait_for_completion_io_timeout(&wait, + msecs_to_jiffies(timeout_msecs))) { + dev_warn(&h->pdev->dev, "Command timed out.\n"); + return -ETIMEDOUT; + } + return IO_OK; +} + +static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, + int reply_queue, unsigned long timeout_msecs) +{ + if (unlikely(lockup_detected(h))) { + c->err_info->CommandStatus = CMD_CTLR_LOCKUP; + return IO_OK; + } + return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); } static u32 lockup_detected(struct ctlr_info *h) @@ -2020,25 +2437,19 @@ static u32 lockup_detected(struct ctlr_info *h) return rc; } -static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, - struct CommandList *c) -{ - /* If controller lockup detected, fake a hardware error. */ - if (unlikely(lockup_detected(h))) - c->err_info->CommandStatus = CMD_HARDWARE_ERR; - else - hpsa_scsi_do_simple_cmd_core(h, c); -} - #define MAX_DRIVER_CMD_RETRIES 25 -static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, - struct CommandList *c, int data_direction) +static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, + struct CommandList *c, int data_direction, unsigned long timeout_msecs) { int backoff_time = 10, retry_count = 0; + int rc; do { memset(c->err_info, 0, sizeof(*c->err_info)); - hpsa_scsi_do_simple_cmd_core(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, + timeout_msecs); + if (rc) + break; retry_count++; if (retry_count > 3) { msleep(backoff_time); @@ -2049,6 +2460,9 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, check_for_busy(h, c)) && retry_count <= MAX_DRIVER_CMD_RETRIES); hpsa_pci_unmap(h->pdev, c, 1, data_direction); + if (retry_count > MAX_DRIVER_CMD_RETRIES) + rc = -EIO; + return rc; } static void hpsa_print_cmd(struct ctlr_info *h, char *txt, @@ -2072,16 +2486,23 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h, { const struct ErrorInfo *ei = cp->err_info; struct device *d = &cp->h->pdev->dev; - const u8 *sd = ei->SenseInfo; + u8 sense_key, asc, ascq; + int sense_len; switch (ei->CommandStatus) { case CMD_TARGET_STATUS: + if (ei->SenseLen > sizeof(ei->SenseInfo)) + sense_len = sizeof(ei->SenseInfo); + else + sense_len = ei->SenseLen; + decode_sense_data(ei->SenseInfo, sense_len, + &sense_key, &asc, &ascq); hpsa_print_cmd(h, "SCSI status", cp); if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) - dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", - sd[2] & 0x0f, sd[12], sd[13]); + dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n", + sense_key, asc, ascq); else - dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus); + dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus); if (ei->ScsiStatus == 0) dev_warn(d, "SCSI status is abnormally zero. " "(probably indicates selection timeout " @@ -2125,6 +2546,9 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h, case CMD_UNABORTABLE: hpsa_print_cmd(h, "unabortable", cp); break; + case CMD_CTLR_LOCKUP: + hpsa_print_cmd(h, "controller lockup detected", cp); + break; default: hpsa_print_cmd(h, "unknown status", cp); dev_warn(d, "Unknown command status %x\n", @@ -2142,17 +2566,15 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, c = cmd_alloc(h); - if (c == NULL) { - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } - if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD)) { rc = -1; goto out; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); @@ -2172,17 +2594,15 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, struct ErrorInfo *ei; c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } - if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, page, scsi3addr, TYPE_CMD)) { rc = -1; goto out; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); @@ -2191,10 +2611,10 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, out: cmd_free(h, c); return rc; - } +} static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, - u8 reset_type) + u8 reset_type, int reply_queue) { int rc = IO_OK; struct CommandList *c; @@ -2202,16 +2622,16 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } /* fill_cmd can't fail here, no data buffer to map. */ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ - hpsa_scsi_do_simple_cmd_core(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + if (rc) { + dev_warn(&h->pdev->dev, "Failed to send reset command\n"); + goto out; + } /* no unmap needed here because no data xfer. */ ei = c->err_info; @@ -2219,10 +2639,129 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, hpsa_scsi_interpret_error(h, c); rc = -1; } +out: cmd_free(h, c); return rc; } +static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, + struct hpsa_scsi_dev_t *dev, + unsigned char *scsi3addr) +{ + int i; + bool match = false; + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; + + if (hpsa_is_cmd_idle(c)) + return false; + + switch (c->cmd_type) { + case CMD_SCSI: + case CMD_IOCTL_PEND: + match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes, + sizeof(c->Header.LUN.LunAddrBytes)); + break; + + case CMD_IOACCEL1: + case CMD_IOACCEL2: + if (c->phys_disk == dev) { + /* HBA mode match */ + match = true; + } else { + /* Possible RAID mode -- check each phys dev. */ + /* FIXME: Do we need to take out a lock here? If + * so, we could just call hpsa_get_pdisk_of_ioaccel2() + * instead. */ + for (i = 0; i < dev->nphysical_disks && !match; i++) { + /* FIXME: an alternate test might be + * + * match = dev->phys_disk[i]->ioaccel_handle + * == c2->scsi_nexus; */ + match = dev->phys_disk[i] == c->phys_disk; + } + } + break; + + case IOACCEL2_TMF: + for (i = 0; i < dev->nphysical_disks && !match; i++) { + match = dev->phys_disk[i]->ioaccel_handle == + le32_to_cpu(ac->it_nexus); + } + break; + + case 0: /* The command is in the middle of being initialized. */ + match = false; + break; + + default: + dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", + c->cmd_type); + BUG(); + } + + return match; +} + +static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, + unsigned char *scsi3addr, u8 reset_type, int reply_queue) +{ + int i; + int rc = 0; + + /* We can really only handle one reset at a time */ + if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { + dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); + return -EINTR; + } + + BUG_ON(atomic_read(&dev->reset_cmds_out) != 0); + + for (i = 0; i < h->nr_cmds; i++) { + struct CommandList *c = h->cmd_pool + i; + int refcount = atomic_inc_return(&c->refcount); + + if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) { + unsigned long flags; + + /* + * Mark the target command as having a reset pending, + * then lock a lock so that the command cannot complete + * while we're considering it. If the command is not + * idle then count it; otherwise revoke the event. + */ + c->reset_pending = dev; + spin_lock_irqsave(&h->lock, flags); /* Implied MB */ + if (!hpsa_is_cmd_idle(c)) + atomic_inc(&dev->reset_cmds_out); + else + c->reset_pending = NULL; + spin_unlock_irqrestore(&h->lock, flags); + } + + cmd_free(h, c); + } + + rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue); + if (!rc) + wait_event(h->event_sync_wait_queue, + atomic_read(&dev->reset_cmds_out) == 0 || + lockup_detected(h)); + + if (unlikely(lockup_detected(h))) { + dev_warn(&h->pdev->dev, + "Controller lockup detected during reset wait\n"); + mutex_unlock(&h->reset_mutex); + rc = -ENODEV; + } + + if (unlikely(rc)) + atomic_set(&dev->reset_cmds_out, 0); + + mutex_unlock(&h->reset_mutex); + return rc; +} + static void hpsa_get_raid_level(struct ctlr_info *h, unsigned char *scsi3addr, unsigned char *raid_level) { @@ -2328,23 +2867,23 @@ static int hpsa_get_raid_map(struct ctlr_info *h, struct ErrorInfo *ei; c = cmd_alloc(h); - if (c == NULL) { - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } + if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, sizeof(this_device->raid_map), 0, scsi3addr, TYPE_CMD)) { - dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); + dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); cmd_free(h, c); - return -ENOMEM; + return -1; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); - cmd_free(h, c); - return -1; + rc = -1; + goto out; } cmd_free(h, c); @@ -2356,6 +2895,9 @@ static int hpsa_get_raid_map(struct ctlr_info *h, } hpsa_debug_map_buff(h, rc, &this_device->raid_map); return rc; +out: + cmd_free(h, c); + return rc; } static int hpsa_bmic_id_physical_device(struct ctlr_info *h, @@ -2375,7 +2917,8 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h, c->Request.CDB[2] = bmic_device_index & 0xff; c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, + NO_TIMEOUT); ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(h, c); @@ -2438,6 +2981,7 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, this_device->offload_config = 0; this_device->offload_enabled = 0; + this_device->offload_to_be_enabled = 0; buf = kzalloc(64, GFP_KERNEL); if (!buf) @@ -2461,6 +3005,7 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, if (hpsa_get_raid_map(h, scsi3addr, this_device)) this_device->offload_enabled = 0; } + this_device->offload_to_be_enabled = this_device->offload_enabled; out: kfree(buf); return; @@ -2495,10 +3040,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, struct ErrorInfo *ei; c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -1; - } + /* address the controller */ memset(scsi3addr, 0, sizeof(scsi3addr)); if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, @@ -2508,7 +3050,10 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, } if (extended_response) c->Request.CDB[1] = extended_response; - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_FROMDEVICE, NO_TIMEOUT); + if (rc) + goto out; ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { @@ -2600,8 +3145,10 @@ static int hpsa_volume_offline(struct ctlr_info *h, unsigned char scsi3addr[]) { struct CommandList *c; - unsigned char *sense, sense_key, asc, ascq; - int ldstat = 0; + unsigned char *sense; + u8 sense_key, asc, ascq; + int sense_len; + int rc, ldstat = 0; u16 cmd_status; u8 scsi_status; #define ASC_LUN_NOT_READY 0x04 @@ -2609,14 +3156,19 @@ static int hpsa_volume_offline(struct ctlr_info *h, #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 c = cmd_alloc(h); - if (!c) - return 0; + (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); - hpsa_scsi_do_simple_cmd_core(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + if (rc) { + cmd_free(h, c); + return 0; + } sense = c->err_info->SenseInfo; - sense_key = sense[2]; - asc = sense[12]; - ascq = sense[13]; + if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) + sense_len = sizeof(c->err_info->SenseInfo); + else + sense_len = c->err_info->SenseLen; + decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq); cmd_status = c->err_info->CommandStatus; scsi_status = c->err_info->ScsiStatus; cmd_free(h, c); @@ -2656,6 +3208,52 @@ static int hpsa_volume_offline(struct ctlr_info *h, return 0; } +/* + * Find out if a logical device supports aborts by simply trying one. + * Smart Array may claim not to support aborts on logical drives, but + * if a MSA2000 * is connected, the drives on that will be presented + * by the Smart Array as logical drives, and aborts may be sent to + * those devices successfully. So the simplest way to find out is + * to simply try an abort and see how the device responds. + */ +static int hpsa_device_supports_aborts(struct ctlr_info *h, + unsigned char *scsi3addr) +{ + struct CommandList *c; + struct ErrorInfo *ei; + int rc = 0; + + u64 tag = (u64) -1; /* bogus tag */ + + /* Assume that physical devices support aborts */ + if (!is_logical_dev_addr_mode(scsi3addr)) + return 1; + + c = cmd_alloc(h); + + (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); + (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); + /* no unmap needed here because no data xfer. */ + ei = c->err_info; + switch (ei->CommandStatus) { + case CMD_INVALID: + rc = 0; + break; + case CMD_UNABORTABLE: + case CMD_ABORT_FAILED: + rc = 1; + break; + case CMD_TMF_STATUS: + rc = hpsa_evaluate_tmf_status(h, c); + break; + default: + rc = 0; + break; + } + cmd_free(h, c); + return rc; +} + static int hpsa_update_device_info(struct ctlr_info *h, unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, unsigned char *is_OBDR_device) @@ -2708,6 +3306,8 @@ static int hpsa_update_device_info(struct ctlr_info *h, this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; this_device->offload_enabled = 0; + this_device->offload_to_be_enabled = 0; + this_device->hba_ioaccel_enabled = 0; this_device->volume_offline = 0; this_device->queue_depth = h->nr_cmds; } @@ -2721,7 +3321,6 @@ static int hpsa_update_device_info(struct ctlr_info *h, strncmp(obdr_sig, OBDR_TAPE_SIG, OBDR_SIG_LEN) == 0); } - kfree(inq_buff); return 0; @@ -2730,6 +3329,31 @@ bail_out: return 1; } +static void hpsa_update_device_supports_aborts(struct ctlr_info *h, + struct hpsa_scsi_dev_t *dev, u8 *scsi3addr) +{ + unsigned long flags; + int rc, entry; + /* + * See if this device supports aborts. If we already know + * the device, we already know if it supports aborts, otherwise + * we have to find out if it supports aborts by trying one. + */ + spin_lock_irqsave(&h->devlock, flags); + rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry); + if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) && + entry >= 0 && entry < h->ndevices) { + dev->supports_aborts = h->dev[entry]->supports_aborts; + spin_unlock_irqrestore(&h->devlock, flags); + } else { + spin_unlock_irqrestore(&h->devlock, flags); + dev->supports_aborts = + hpsa_device_supports_aborts(h, scsi3addr); + if (dev->supports_aborts < 0) + dev->supports_aborts = 0; + } +} + static unsigned char *ext_target_model[] = { "MSA2012", "MSA2024", @@ -2835,6 +3459,7 @@ static int add_ext_target_dev(struct ctlr_info *h, (*n_ext_target_devs)++; hpsa_set_bus_target_lun(this_device, tmpdevice->bus, tmpdevice->target, 0); + hpsa_update_device_supports_aborts(h, this_device, scsi3addr); set_bit(tmpdevice->target, lunzerobits); return 1; } @@ -2850,88 +3475,23 @@ static int add_ext_target_dev(struct ctlr_info *h, static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) { - struct ReportExtendedLUNdata *physicals = NULL; - int responsesize = 24; /* size of physical extended response */ - int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; - u32 nphysicals = 0; /* number of reported physical devs */ - int found = 0; /* found match (1) or not (0) */ - u32 find; /* handle we need to match */ + struct io_accel2_cmd *c2 = + &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; + unsigned long flags; int i; - struct scsi_cmnd *scmd; /* scsi command within request being aborted */ - struct hpsa_scsi_dev_t *d; /* device of request being aborted */ - struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ - __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ - __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ - - if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) - return 0; /* no match */ - - /* point to the ioaccel2 device handle */ - c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; - if (c2a == NULL) - return 0; /* no match */ - - scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; - if (scmd == NULL) - return 0; /* no match */ - - d = scmd->device->hostdata; - if (d == NULL) - return 0; /* no match */ - - it_nexus = cpu_to_le32(d->ioaccel_handle); - scsi_nexus = c2a->scsi_nexus; - find = le32_to_cpu(c2a->scsi_nexus); - - if (h->raid_offload_debug > 0) - dev_info(&h->pdev->dev, - "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", - __func__, scsi_nexus, - d->device_id[0], d->device_id[1], d->device_id[2], - d->device_id[3], d->device_id[4], d->device_id[5], - d->device_id[6], d->device_id[7], d->device_id[8], - d->device_id[9], d->device_id[10], d->device_id[11], - d->device_id[12], d->device_id[13], d->device_id[14], - d->device_id[15]); - - /* Get the list of physical devices */ - physicals = kzalloc(reportsize, GFP_KERNEL); - if (physicals == NULL) - return 0; - if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) { - dev_err(&h->pdev->dev, - "Can't lookup %s device handle: report physical LUNs failed.\n", - "HP SSD Smart Path"); - kfree(physicals); - return 0; - } - nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / - responsesize; - - /* find ioaccel2 handle in list of physicals: */ - for (i = 0; i < nphysicals; i++) { - struct ext_report_lun_entry *entry = &physicals->LUN[i]; - - /* handle is in bytes 28-31 of each lun */ - if (entry->ioaccel_handle != find) - continue; /* didn't match */ - found = 1; - memcpy(scsi3addr, entry->lunid, 8); - if (h->raid_offload_debug > 0) - dev_info(&h->pdev->dev, - "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", - __func__, find, - entry->ioaccel_handle, scsi3addr); - break; /* found it */ - } - - kfree(physicals); - if (found) - return 1; - else - return 0; + spin_lock_irqsave(&h->devlock, flags); + for (i = 0; i < h->ndevices; i++) + if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) { + memcpy(scsi3addr, h->dev[i]->scsi3addr, + sizeof(h->dev[i]->scsi3addr)); + spin_unlock_irqrestore(&h->devlock, flags); + return 1; + } + spin_unlock_irqrestore(&h->devlock, flags); + return 0; } + /* * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, * logdev. The number of luns in physdev and logdev are returned in @@ -3036,6 +3596,8 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, (struct ext_report_lun_entry *) lunaddrbytes; dev->ioaccel_handle = rle->ioaccel_handle; + if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle) + dev->hba_ioaccel_enabled = 1; memset(id_phys, 0, sizeof(*id_phys)); rc = hpsa_bmic_id_physical_device(h, lunaddrbytes, GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys, @@ -3050,6 +3612,7 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, else dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ atomic_set(&dev->ioaccel_cmds_out, 0); + atomic_set(&dev->reset_cmds_out, 0); } static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) @@ -3142,16 +3705,19 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) /* Figure out where the LUN ID info is coming from */ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, i, nphysicals, nlogicals, physdev_list, logdev_list); - /* skip masked physical devices. */ - if (lunaddrbytes[3] & 0xC0 && - i < nphysicals + (raid_ctlr_position == 0)) - continue; + + /* skip masked non-disk devices */ + if (MASKED_DEVICE(lunaddrbytes)) + if (i < nphysicals + (raid_ctlr_position == 0) && + NON_DISK_PHYS_DEV(lunaddrbytes)) + continue; /* Get device type, vendor, model, device id */ if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, &is_OBDR)) continue; /* skip it if we can't talk to it. */ figure_bus_target_lun(h, lunaddrbytes, tmpdevice); + hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes); this_device = currentsd[ncurrent]; /* @@ -3170,6 +3736,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) *this_device = *tmpdevice; + /* do not expose masked devices */ + if (MASKED_DEVICE(lunaddrbytes) && + i < nphysicals + (raid_ctlr_position == 0)) { + if (h->hba_mode_enabled) + dev_warn(&h->pdev->dev, + "Masked physical device detected\n"); + this_device->expose_state = HPSA_DO_NOT_EXPOSE; + } else { + this_device->expose_state = + HPSA_SG_ATTACH | HPSA_ULD_ATTACH; + } + switch (this_device->devtype) { case TYPE_ROM: /* We don't *really* support actual CD-ROM devices, @@ -3183,34 +3761,31 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) ncurrent++; break; case TYPE_DISK: - if (h->hba_mode_enabled) { - /* never use raid mapper in HBA mode */ - this_device->offload_enabled = 0; - ncurrent++; - break; - } else if (h->acciopath_status) { - if (i >= nphysicals) { - ncurrent++; - break; - } - } else { - if (i < nphysicals) - break; + if (i >= nphysicals) { ncurrent++; break; } - if (h->transMethod & CFGTBL_Trans_io_accel1 || - h->transMethod & CFGTBL_Trans_io_accel2) { - hpsa_get_ioaccel_drive_info(h, this_device, - lunaddrbytes, id_phys); - atomic_set(&this_device->ioaccel_cmds_out, 0); - ncurrent++; - } + + if (h->hba_mode_enabled) + /* never use raid mapper in HBA mode */ + this_device->offload_enabled = 0; + else if (!(h->transMethod & CFGTBL_Trans_io_accel1 || + h->transMethod & CFGTBL_Trans_io_accel2)) + break; + + hpsa_get_ioaccel_drive_info(h, this_device, + lunaddrbytes, id_phys); + atomic_set(&this_device->ioaccel_cmds_out, 0); + ncurrent++; break; case TYPE_TAPE: case TYPE_MEDIUM_CHANGER: ncurrent++; break; + case TYPE_ENCLOSURE: + if (h->hba_mode_enabled) + ncurrent++; + break; case TYPE_RAID: /* Only present the Smartarray HBA as a RAID controller. * If it's a RAID controller other than the HBA itself @@ -3227,7 +3802,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) if (ncurrent >= HPSA_MAX_DEVICES) break; } - hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent); adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); out: kfree(tmpdevice); @@ -3260,7 +3834,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h, struct scsi_cmnd *cmd) { struct scatterlist *sg; - int use_sg, i, sg_index, chained; + int use_sg, i, sg_limit, chained, last_sg; struct SGDescriptor *curr_sg; BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); @@ -3272,22 +3846,39 @@ static int hpsa_scatter_gather(struct ctlr_info *h, if (!use_sg) goto sglist_finished; + /* + * If the number of entries is greater than the max for a single list, + * then we have a chained list; we will set up all but one entry in the + * first list (the last entry is saved for link information); + * otherwise, we don't have a chained list and we'll set up at each of + * the entries in the one list. + */ curr_sg = cp->SG; - chained = 0; - sg_index = 0; - scsi_for_each_sg(cmd, sg, use_sg, i) { - if (i == h->max_cmd_sg_entries - 1 && - use_sg > h->max_cmd_sg_entries) { - chained = 1; - curr_sg = h->cmd_sg_list[cp->cmdindex]; - sg_index = 0; - } + chained = use_sg > h->max_cmd_sg_entries; + sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; + last_sg = scsi_sg_count(cmd) - 1; + scsi_for_each_sg(cmd, sg, sg_limit, i) { hpsa_set_sg_descriptor(curr_sg, sg); curr_sg++; } + if (chained) { + /* + * Continue with the chained list. Set curr_sg to the chained + * list. Modify the limit to the total count less the entries + * we've already set up. Resume the scan at the list entry + * where the previous loop left off. + */ + curr_sg = h->cmd_sg_list[cp->cmdindex]; + sg_limit = use_sg - sg_limit; + for_each_sg(sg, sg, sg_limit, i) { + hpsa_set_sg_descriptor(curr_sg, sg); + curr_sg++; + } + } + /* Back the pointer up to the last entry and mark it as "last". */ - (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); + (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST); if (use_sg + chained > h->maxSG) h->maxSG = use_sg + chained; @@ -3530,10 +4121,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, u32 len; u32 total_len = 0; - if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { - atomic_dec(&phys_disk->ioaccel_cmds_out); - return IO_ACCEL_INELIGIBLE; - } + BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); if (fixup_ioaccel_cdb(cdb, &cdb_len)) { atomic_dec(&phys_disk->ioaccel_cmds_out); @@ -3556,8 +4144,19 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, } if (use_sg) { - BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); curr_sg = cp->sg; + if (use_sg > h->ioaccel_maxsg) { + addr64 = le64_to_cpu( + h->ioaccel2_cmd_sg_list[c->cmdindex]->address); + curr_sg->address = cpu_to_le64(addr64); + curr_sg->length = 0; + curr_sg->reserved[0] = 0; + curr_sg->reserved[1] = 0; + curr_sg->reserved[2] = 0; + curr_sg->chain_indicator = 0x80; + + curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; + } scsi_for_each_sg(cmd, sg, use_sg, i) { addr64 = (u64) sg_dma_address(sg); len = sg_dma_len(sg); @@ -3602,14 +4201,22 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); memcpy(cp->cdb, cdb, sizeof(cp->cdb)); - /* fill in sg elements */ - cp->sg_count = (u8) use_sg; - cp->data_len = cpu_to_le32(total_len); cp->err_ptr = cpu_to_le64(c->busaddr + offsetof(struct io_accel2_cmd, error_data)); cp->err_len = cpu_to_le32(sizeof(cp->error_data)); + /* fill in sg elements */ + if (use_sg > h->ioaccel_maxsg) { + cp->sg_count = 1; + if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { + atomic_dec(&phys_disk->ioaccel_cmds_out); + scsi_dma_unmap(cmd); + return -1; + } + } else + cp->sg_count = (u8) use_sg; + enqueue_cmd_and_start_io(h, c); return 0; } @@ -3992,7 +4599,11 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, dev->phys_disk[map_index]); } -/* Submit commands down the "normal" RAID stack path */ +/* + * Submit commands down the "normal" RAID stack path + * All callers to hpsa_ciss_submit must check lockup_detected + * beforehand, before (opt.) and after calling cmd_alloc + */ static int hpsa_ciss_submit(struct ctlr_info *h, struct CommandList *c, struct scsi_cmnd *cmd, unsigned char scsi3addr[]) @@ -4007,7 +4618,6 @@ static int hpsa_ciss_submit(struct ctlr_info *h, /* Fill in the request block... */ c->Request.Timeout = 0; - memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); c->Request.CDBLen = cmd->cmd_len; memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); @@ -4050,7 +4660,7 @@ static int hpsa_ciss_submit(struct ctlr_info *h, } if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ - cmd_free(h, c); + hpsa_cmd_resolve_and_free(h, c); return SCSI_MLQUEUE_HOST_BUSY; } enqueue_cmd_and_start_io(h, c); @@ -4058,25 +4668,125 @@ static int hpsa_ciss_submit(struct ctlr_info *h, return 0; } +static void hpsa_cmd_init(struct ctlr_info *h, int index, + struct CommandList *c) +{ + dma_addr_t cmd_dma_handle, err_dma_handle; + + /* Zero out all of commandlist except the last field, refcount */ + memset(c, 0, offsetof(struct CommandList, refcount)); + c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); + cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); + c->err_info = h->errinfo_pool + index; + memset(c->err_info, 0, sizeof(*c->err_info)); + err_dma_handle = h->errinfo_pool_dhandle + + index * sizeof(*c->err_info); + c->cmdindex = index; + c->busaddr = (u32) cmd_dma_handle; + c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); + c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); + c->h = h; + c->scsi_cmd = SCSI_CMD_IDLE; +} + +static void hpsa_preinitialize_commands(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < h->nr_cmds; i++) { + struct CommandList *c = h->cmd_pool + i; + + hpsa_cmd_init(h, i, c); + atomic_set(&c->refcount, 0); + } +} + +static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, + struct CommandList *c) +{ + dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); + + BUG_ON(c->cmdindex != index); + + memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); + memset(c->err_info, 0, sizeof(*c->err_info)); + c->busaddr = (u32) cmd_dma_handle; +} + +static int hpsa_ioaccel_submit(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd, + unsigned char *scsi3addr) +{ + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + int rc = IO_ACCEL_INELIGIBLE; + + cmd->host_scribble = (unsigned char *) c; + + if (dev->offload_enabled) { + hpsa_cmd_init(h, c->cmdindex, c); + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + rc = hpsa_scsi_ioaccel_raid_map(h, c); + if (rc < 0) /* scsi_dma_map failed. */ + rc = SCSI_MLQUEUE_HOST_BUSY; + } else if (dev->hba_ioaccel_enabled) { + hpsa_cmd_init(h, c->cmdindex, c); + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + rc = hpsa_scsi_ioaccel_direct_map(h, c); + if (rc < 0) /* scsi_dma_map failed. */ + rc = SCSI_MLQUEUE_HOST_BUSY; + } + return rc; +} + static void hpsa_command_resubmit_worker(struct work_struct *work) { struct scsi_cmnd *cmd; struct hpsa_scsi_dev_t *dev; - struct CommandList *c = - container_of(work, struct CommandList, work); + struct CommandList *c = container_of(work, struct CommandList, work); cmd = c->scsi_cmd; dev = cmd->device->hostdata; if (!dev) { cmd->result = DID_NO_CONNECT << 16; - cmd->scsi_done(cmd); - return; + return hpsa_cmd_free_and_done(c->h, c, cmd); + } + if (c->reset_pending) + return hpsa_cmd_resolve_and_free(c->h, c); + if (c->abort_pending) + return hpsa_cmd_abort_and_free(c->h, c, cmd); + if (c->cmd_type == CMD_IOACCEL2) { + struct ctlr_info *h = c->h; + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + int rc; + + if (c2->error_data.serv_response == + IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { + rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); + if (rc == 0) + return; + if (rc == SCSI_MLQUEUE_HOST_BUSY) { + /* + * If we get here, it means dma mapping failed. + * Try again via scsi mid layer, which will + * then get SCSI_MLQUEUE_HOST_BUSY. + */ + cmd->result = DID_IMM_RETRY << 16; + return hpsa_cmd_free_and_done(h, c, cmd); + } + /* else, fall thru and resubmit down CISS path */ + } } + hpsa_cmd_partial_init(c->h, c->cmdindex, c); if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { /* * If we get here, it means dma mapping failed. Try * again via scsi mid layer, which will then get * SCSI_MLQUEUE_HOST_BUSY. + * + * hpsa_ciss_submit will have already freed c + * if it encountered a dma mapping failure. */ cmd->result = DID_IMM_RETRY << 16; cmd->scsi_done(cmd); @@ -4094,30 +4804,24 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) /* Get the ptr to our adapter structure out of cmd->host. */ h = sdev_to_hba(cmd->device); + + BUG_ON(cmd->request->tag < 0); + dev = cmd->device->hostdata; if (!dev) { cmd->result = DID_NO_CONNECT << 16; cmd->scsi_done(cmd); return 0; } + memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); if (unlikely(lockup_detected(h))) { - cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); - return 0; - } - c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return SCSI_MLQUEUE_HOST_BUSY; - } - if (unlikely(lockup_detected(h))) { - cmd->result = DID_ERROR << 16; - cmd_free(h, c); + cmd->result = DID_NO_CONNECT << 16; cmd->scsi_done(cmd); return 0; } + c = cmd_tagged_alloc(h, cmd); /* * Call alternate submit routine for I/O accelerated commands. @@ -4126,27 +4830,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) if (likely(cmd->retries == 0 && cmd->request->cmd_type == REQ_TYPE_FS && h->acciopath_status)) { - - cmd->host_scribble = (unsigned char *) c; - c->cmd_type = CMD_SCSI; - c->scsi_cmd = cmd; - - if (dev->offload_enabled) { - rc = hpsa_scsi_ioaccel_raid_map(h, c); - if (rc == 0) - return 0; /* Sent on ioaccel path */ - if (rc < 0) { /* scsi_dma_map failed. */ - cmd_free(h, c); - return SCSI_MLQUEUE_HOST_BUSY; - } - } else if (dev->ioaccel_handle) { - rc = hpsa_scsi_ioaccel_direct_map(h, c); - if (rc == 0) - return 0; /* Sent on direct map path */ - if (rc < 0) { /* scsi_dma_map failed. */ - cmd_free(h, c); - return SCSI_MLQUEUE_HOST_BUSY; - } + rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); + if (rc == 0) + return 0; + if (rc == SCSI_MLQUEUE_HOST_BUSY) { + hpsa_cmd_resolve_and_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; } } return hpsa_ciss_submit(h, c, cmd, scsi3addr); @@ -4228,22 +4917,16 @@ static int hpsa_scan_finished(struct Scsi_Host *sh, return finished; } -static void hpsa_unregister_scsi(struct ctlr_info *h) -{ - /* we are being forcibly unloaded, and may not refuse. */ - scsi_remove_host(h->scsi_host); - scsi_host_put(h->scsi_host); - h->scsi_host = NULL; -} - -static int hpsa_register_scsi(struct ctlr_info *h) +static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; int error; sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); - if (sh == NULL) - goto fail; + if (sh == NULL) { + dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); + return -ENOMEM; + } sh->io_port = 0; sh->n_io_port = 0; @@ -4252,80 +4935,156 @@ static int hpsa_register_scsi(struct ctlr_info *h) sh->max_cmd_len = MAX_COMMAND_SIZE; sh->max_lun = HPSA_MAX_LUN; sh->max_id = HPSA_MAX_LUN; - sh->can_queue = h->nr_cmds - - HPSA_CMDS_RESERVED_FOR_ABORTS - - HPSA_CMDS_RESERVED_FOR_DRIVER - - HPSA_MAX_CONCURRENT_PASSTHRUS; + sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; sh->cmd_per_lun = sh->can_queue; sh->sg_tablesize = h->maxsgentries; - h->scsi_host = sh; sh->hostdata[0] = (unsigned long) h; sh->irq = h->intr[h->intr_mode]; sh->unique_id = sh->irq; - error = scsi_add_host(sh, &h->pdev->dev); - if (error) - goto fail_host_put; - scsi_scan_host(sh); + error = scsi_init_shared_tag_map(sh, sh->can_queue); + if (error) { + dev_err(&h->pdev->dev, + "%s: scsi_init_shared_tag_map failed for controller %d\n", + __func__, h->ctlr); + scsi_host_put(sh); + return error; + } + h->scsi_host = sh; return 0; +} - fail_host_put: - dev_err(&h->pdev->dev, "%s: scsi_add_host" - " failed for controller %d\n", __func__, h->ctlr); - scsi_host_put(sh); - return error; - fail: - dev_err(&h->pdev->dev, "%s: scsi_host_alloc" - " failed for controller %d\n", __func__, h->ctlr); - return -ENOMEM; +static int hpsa_scsi_add_host(struct ctlr_info *h) +{ + int rv; + + rv = scsi_add_host(h->scsi_host, &h->pdev->dev); + if (rv) { + dev_err(&h->pdev->dev, "scsi_add_host failed\n"); + return rv; + } + scsi_scan_host(h->scsi_host); + return 0; } -static int wait_for_device_to_become_ready(struct ctlr_info *h, - unsigned char lunaddr[]) +/* + * The block layer has already gone to the trouble of picking out a unique, + * small-integer tag for this request. We use an offset from that value as + * an index to select our command block. (The offset allows us to reserve the + * low-numbered entries for our own uses.) + */ +static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) +{ + int idx = scmd->request->tag; + + if (idx < 0) + return idx; + + /* Offset to leave space for internal cmds. */ + return idx += HPSA_NRESERVED_CMDS; +} + +/* + * Send a TEST_UNIT_READY command to the specified LUN using the specified + * reply queue; returns zero if the unit is ready, and non-zero otherwise. + */ +static int hpsa_send_test_unit_ready(struct ctlr_info *h, + struct CommandList *c, unsigned char lunaddr[], + int reply_queue) +{ + int rc; + + /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ + (void) fill_cmd(c, TEST_UNIT_READY, h, + NULL, 0, 0, lunaddr, TYPE_CMD); + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + if (rc) + return rc; + /* no unmap needed here because no data xfer. */ + + /* Check if the unit is already ready. */ + if (c->err_info->CommandStatus == CMD_SUCCESS) + return 0; + + /* + * The first command sent after reset will receive "unit attention" to + * indicate that the LUN has been reset...this is actually what we're + * looking for (but, success is good too). + */ + if (c->err_info->CommandStatus == CMD_TARGET_STATUS && + c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && + (c->err_info->SenseInfo[2] == NO_SENSE || + c->err_info->SenseInfo[2] == UNIT_ATTENTION)) + return 0; + + return 1; +} + +/* + * Wait for a TEST_UNIT_READY command to complete, retrying as necessary; + * returns zero when the unit is ready, and non-zero when giving up. + */ +static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, + struct CommandList *c, + unsigned char lunaddr[], int reply_queue) { int rc; int count = 0; int waittime = 1; /* seconds */ - struct CommandList *c; - - c = cmd_alloc(h); - if (!c) { - dev_warn(&h->pdev->dev, "out of memory in " - "wait_for_device_to_become_ready.\n"); - return IO_ERROR; - } /* Send test unit ready until device ready, or give up. */ - while (count < HPSA_TUR_RETRY_LIMIT) { + for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) { - /* Wait for a bit. do this first, because if we send + /* + * Wait for a bit. do this first, because if we send * the TUR right away, the reset will just abort it. */ msleep(1000 * waittime); - count++; - rc = 0; /* Device ready. */ + + rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); + if (!rc) + break; /* Increase wait time with each try, up to a point. */ if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) - waittime = waittime * 2; + waittime *= 2; - /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ - (void) fill_cmd(c, TEST_UNIT_READY, h, - NULL, 0, 0, lunaddr, TYPE_CMD); - hpsa_scsi_do_simple_cmd_core(h, c); - /* no unmap needed here because no data xfer. */ + dev_warn(&h->pdev->dev, + "waiting %d secs for device to become ready.\n", + waittime); + } - if (c->err_info->CommandStatus == CMD_SUCCESS) - break; + return rc; +} - if (c->err_info->CommandStatus == CMD_TARGET_STATUS && - c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && - (c->err_info->SenseInfo[2] == NO_SENSE || - c->err_info->SenseInfo[2] == UNIT_ATTENTION)) - break; +static int wait_for_device_to_become_ready(struct ctlr_info *h, + unsigned char lunaddr[], + int reply_queue) +{ + int first_queue; + int last_queue; + int rq; + int rc = 0; + struct CommandList *c; - dev_warn(&h->pdev->dev, "waiting %d secs " - "for device to become ready.\n", waittime); - rc = 1; /* device not ready. */ + c = cmd_alloc(h); + + /* + * If no specific reply queue was requested, then send the TUR + * repeatedly, requesting a reply on each reply queue; otherwise execute + * the loop exactly once using only the specified queue. + */ + if (reply_queue == DEFAULT_REPLY_QUEUE) { + first_queue = 0; + last_queue = h->nreply_queues - 1; + } else { + first_queue = reply_queue; + last_queue = reply_queue; + } + + for (rq = first_queue; rq <= last_queue; rq++) { + rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); + if (rc) + break; } if (rc) @@ -4345,6 +5104,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) int rc; struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; + char msg[40]; /* find the controller to which the command to be aborted was sent */ h = sdev_to_hba(scsicmd->device); @@ -4356,19 +5116,38 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) dev = scsicmd->device->hostdata; if (!dev) { - dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " - "device lookup failed.\n"); + dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); return FAILED; } - dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", - h->scsi_host->host_no, dev->bus, dev->target, dev->lun); - /* send a reset to the SCSI LUN which the command was sent to */ - rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); - if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) + + /* if controller locked up, we can guarantee command won't complete */ + if (lockup_detected(h)) { + sprintf(msg, "cmd %d RESET FAILED, lockup detected", + hpsa_get_cmd_index(scsicmd)); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + return FAILED; + } + + /* this reset request might be the result of a lockup; check */ + if (detect_controller_lockup(h)) { + sprintf(msg, "cmd %d RESET FAILED, new lockup detected", + hpsa_get_cmd_index(scsicmd)); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + return FAILED; + } + + /* Do not attempt on controller */ + if (is_hba_lunid(dev->scsi3addr)) return SUCCESS; - dev_warn(&h->pdev->dev, "resetting device failed.\n"); - return FAILED; + hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting"); + + /* send a reset to the SCSI LUN which the command was sent to */ + rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN, + DEFAULT_REPLY_QUEUE); + sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed"); + hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); + return rc == 0 ? SUCCESS : FAILED; } static void swizzle_abort_tag(u8 *tag) @@ -4412,7 +5191,7 @@ static void hpsa_get_tag(struct ctlr_info *h, } static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, - struct CommandList *abort, int swizzle) + struct CommandList *abort, int reply_queue) { int rc = IO_OK; struct CommandList *c; @@ -4420,19 +5199,15 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, __le32 tagupper, taglower; c = cmd_alloc(h); - if (c == NULL) { /* trouble... */ - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - return -ENOMEM; - } /* fill_cmd can't fail here, no buffer to map */ - (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort, + (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag, 0, 0, scsi3addr, TYPE_MSG); - if (swizzle) + if (h->needs_abort_tags_swizzled) swizzle_abort_tag(&c->Request.CDB[4]); - hpsa_scsi_do_simple_cmd_core(h, c); + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); hpsa_get_tag(h, abort, &taglower, &tagupper); - dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", + dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", __func__, tagupper, taglower); /* no unmap needed here because no data xfer. */ @@ -4440,6 +5215,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, switch (ei->CommandStatus) { case CMD_SUCCESS: break; + case CMD_TMF_STATUS: + rc = hpsa_evaluate_tmf_status(h, c); + break; case CMD_UNABORTABLE: /* Very common, don't make noise. */ rc = -1; break; @@ -4456,6 +5234,48 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, return rc; } +static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h, + struct CommandList *command_to_abort, int reply_queue) +{ + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; + struct io_accel2_cmd *c2a = + &h->ioaccel2_cmd_pool[command_to_abort->cmdindex]; + struct scsi_cmnd *scmd = command_to_abort->scsi_cmd; + struct hpsa_scsi_dev_t *dev = scmd->device->hostdata; + + /* + * We're overlaying struct hpsa_tmf_struct on top of something which + * was allocated as a struct io_accel2_cmd, so we better be sure it + * actually fits, and doesn't overrun the error info space. + */ + BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) > + sizeof(struct io_accel2_cmd)); + BUG_ON(offsetof(struct io_accel2_cmd, error_data) < + offsetof(struct hpsa_tmf_struct, error_len) + + sizeof(ac->error_len)); + + c->cmd_type = IOACCEL2_TMF; + c->scsi_cmd = SCSI_CMD_BUSY; + + /* Adjust the DMA address to point to the accelerated command buffer */ + c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + + (c->cmdindex * sizeof(struct io_accel2_cmd)); + BUG_ON(c->busaddr & 0x0000007F); + + memset(ac, 0, sizeof(*c2)); /* yes this is correct */ + ac->iu_type = IOACCEL2_IU_TMF_TYPE; + ac->reply_queue = reply_queue; + ac->tmf = IOACCEL2_TMF_ABORT; + ac->it_nexus = cpu_to_le32(dev->ioaccel_handle); + memset(ac->lun_id, 0, sizeof(ac->lun_id)); + ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT); + ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag)); + ac->error_ptr = cpu_to_le64(c->busaddr + + offsetof(struct io_accel2_cmd, error_data)); + ac->error_len = cpu_to_le32(sizeof(c2->error_data)); +} + /* ioaccel2 path firmware cannot handle abort task requests. * Change abort requests to physical target reset, and send to the * address of the physical disk used for the ioaccel 2 command. @@ -4464,7 +5284,7 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, */ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, - unsigned char *scsi3addr, struct CommandList *abort) + unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) { int rc = IO_OK; struct scsi_cmnd *scmd; /* scsi command within request being aborted */ @@ -4483,8 +5303,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, if (h->raid_offload_debug > 0) dev_info(&h->pdev->dev, - "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", h->scsi_host->host_no, dev->bus, dev->target, dev->lun, + "Reset as abort", scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); @@ -4506,7 +5327,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", psa[0], psa[1], psa[2], psa[3], psa[4], psa[5], psa[6], psa[7]); - rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); + rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue); if (rc != 0) { dev_warn(&h->pdev->dev, "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", @@ -4516,7 +5337,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, } /* wait for device to recover */ - if (wait_for_device_to_become_ready(h, psa) != 0) { + if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { dev_warn(&h->pdev->dev, "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", psa[0], psa[1], psa[2], psa[3], @@ -4533,25 +5354,94 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, return rc; /* success */ } -/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to - * tell which kind we're dealing with, so we send the abort both ways. There - * shouldn't be any collisions between swizzled and unswizzled tags due to the - * way we construct our tags but we check anyway in case the assumptions which - * make this true someday become false. - */ +static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, + struct CommandList *abort, int reply_queue) +{ + int rc = IO_OK; + struct CommandList *c; + __le32 taglower, tagupper; + struct hpsa_scsi_dev_t *dev; + struct io_accel2_cmd *c2; + + dev = abort->scsi_cmd->device->hostdata; + if (!dev->offload_enabled && !dev->hba_ioaccel_enabled) + return -1; + + c = cmd_alloc(h); + setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); + c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); + hpsa_get_tag(h, abort, &taglower, &tagupper); + dev_dbg(&h->pdev->dev, + "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", + __func__, tagupper, taglower); + /* no unmap needed here because no data xfer. */ + + dev_dbg(&h->pdev->dev, + "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n", + __func__, tagupper, taglower, c2->error_data.serv_response); + switch (c2->error_data.serv_response) { + case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: + case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: + rc = 0; + break; + case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: + case IOACCEL2_SERV_RESPONSE_FAILURE: + case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: + rc = -1; + break; + default: + dev_warn(&h->pdev->dev, + "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n", + __func__, tagupper, taglower, + c2->error_data.serv_response); + rc = -1; + } + cmd_free(h, c); + dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, + tagupper, taglower); + return rc; +} + static int hpsa_send_abort_both_ways(struct ctlr_info *h, - unsigned char *scsi3addr, struct CommandList *abort) + unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) { - /* ioccelerator mode 2 commands should be aborted via the + /* + * ioccelerator mode 2 commands should be aborted via the * accelerated path, since RAID path is unaware of these commands, - * but underlying firmware can't handle abort TMF. - * Change abort to physical device reset. + * but not all underlying firmware can handle abort TMF. + * Change abort to physical device reset when abort TMF is unsupported. */ - if (abort->cmd_type == CMD_IOACCEL2) - return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); + if (abort->cmd_type == CMD_IOACCEL2) { + if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) + return hpsa_send_abort_ioaccel2(h, abort, + reply_queue); + else + return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, + abort, reply_queue); + } + return hpsa_send_abort(h, scsi3addr, abort, reply_queue); +} - return hpsa_send_abort(h, scsi3addr, abort, 0) && - hpsa_send_abort(h, scsi3addr, abort, 1); +/* Find out which reply queue a command was meant to return on */ +static int hpsa_extract_reply_queue(struct ctlr_info *h, + struct CommandList *c) +{ + if (c->cmd_type == CMD_IOACCEL2) + return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue; + return c->Header.ReplyQueue; +} + +/* + * Limit concurrency of abort commands to prevent + * over-subscription of commands + */ +static inline int wait_for_available_abort_cmd(struct ctlr_info *h) +{ +#define ABORT_CMD_WAIT_MSECS 5000 + return !wait_event_timeout(h->abort_cmd_wait_queue, + atomic_dec_if_positive(&h->abort_cmds_available) >= 0, + msecs_to_jiffies(ABORT_CMD_WAIT_MSECS)); } /* Send an abort for the specified command. @@ -4561,7 +5451,7 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h, static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) { - int i, rc; + int rc; struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; struct CommandList *abort; /* pointer to command to be aborted */ @@ -4569,27 +5459,19 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) char msg[256]; /* For debug messaging. */ int ml = 0; __le32 tagupper, taglower; - int refcount; + int refcount, reply_queue; - /* Find the controller of the command to be aborted */ - h = sdev_to_hba(sc->device); - if (WARN(h == NULL, - "ABORT REQUEST FAILED, Controller lookup failed.\n")) + if (sc == NULL) return FAILED; - if (lockup_detected(h)) + if (sc->device == NULL) return FAILED; - /* Check that controller supports some kind of task abort */ - if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && - !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) + /* Find the controller of the command to be aborted */ + h = sdev_to_hba(sc->device); + if (h == NULL) return FAILED; - memset(msg, 0, sizeof(msg)); - ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ", - h->scsi_host->host_no, sc->device->channel, - sc->device->id, sc->device->lun); - /* Find the device of the command to be aborted */ dev = sc->device->hostdata; if (!dev) { @@ -4598,6 +5480,31 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) return FAILED; } + /* If controller locked up, we can guarantee command won't complete */ + if (lockup_detected(h)) { + hpsa_show_dev_msg(KERN_WARNING, h, dev, + "ABORT FAILED, lockup detected"); + return FAILED; + } + + /* This is a good time to check if controller lockup has occurred */ + if (detect_controller_lockup(h)) { + hpsa_show_dev_msg(KERN_WARNING, h, dev, + "ABORT FAILED, new lockup detected"); + return FAILED; + } + + /* Check that controller supports some kind of task abort */ + if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && + !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) + return FAILED; + + memset(msg, 0, sizeof(msg)); + ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p", + h->scsi_host->host_no, sc->device->channel, + sc->device->id, sc->device->lun, + "Aborting command", sc); + /* Get SCSI command to be aborted */ abort = (struct CommandList *) sc->host_scribble; if (abort == NULL) { @@ -4609,50 +5516,115 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) cmd_free(h, abort); return SUCCESS; } + + /* Don't bother trying the abort if we know it won't work. */ + if (abort->cmd_type != CMD_IOACCEL2 && + abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) { + cmd_free(h, abort); + return FAILED; + } + + /* + * Check that we're aborting the right command. + * It's possible the CommandList already completed and got re-used. + */ + if (abort->scsi_cmd != sc) { + cmd_free(h, abort); + return SUCCESS; + } + + abort->abort_pending = true; hpsa_get_tag(h, abort, &taglower, &tagupper); + reply_queue = hpsa_extract_reply_queue(h, abort); ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); as = abort->scsi_cmd; if (as != NULL) - ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", - as->cmnd[0], as->serial_number); - dev_dbg(&h->pdev->dev, "%s\n", msg); - dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", - h->scsi_host->host_no, dev->bus, dev->target, dev->lun); + ml += sprintf(msg+ml, + "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ", + as->cmd_len, as->cmnd[0], as->cmnd[1], + as->serial_number); + dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg); + hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command"); + /* * Command is in flight, or possibly already completed * by the firmware (but not to the scsi mid layer) but we can't * distinguish which. Send the abort down. */ - rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort); + if (wait_for_available_abort_cmd(h)) { + dev_warn(&h->pdev->dev, + "%s FAILED, timeout waiting for an abort command to become available.\n", + msg); + cmd_free(h, abort); + return FAILED; + } + rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue); + atomic_inc(&h->abort_cmds_available); + wake_up_all(&h->abort_cmd_wait_queue); if (rc != 0) { - dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg); - dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", - h->scsi_host->host_no, - dev->bus, dev->target, dev->lun); + dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg); + hpsa_show_dev_msg(KERN_WARNING, h, dev, + "FAILED to abort command"); cmd_free(h, abort); return FAILED; } - dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); + dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg); + wait_event(h->event_sync_wait_queue, + abort->scsi_cmd != sc || lockup_detected(h)); + cmd_free(h, abort); + return !lockup_detected(h) ? SUCCESS : FAILED; +} - /* If the abort(s) above completed and actually aborted the - * command, then the command to be aborted should already be - * completed. If not, wait around a bit more to see if they - * manage to complete normally. - */ -#define ABORT_COMPLETE_WAIT_SECS 30 - for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { - refcount = atomic_read(&abort->refcount); - if (refcount < 2) { - cmd_free(h, abort); - return SUCCESS; - } else { - msleep(100); - } +/* + * For operations with an associated SCSI command, a command block is allocated + * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the + * block request tag as an index into a table of entries. cmd_tagged_free() is + * the complement, although cmd_free() may be called instead. + */ +static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, + struct scsi_cmnd *scmd) +{ + int idx = hpsa_get_cmd_index(scmd); + struct CommandList *c = h->cmd_pool + idx; + + if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { + dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", + idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); + /* The index value comes from the block layer, so if it's out of + * bounds, it's probably not our bug. + */ + BUG(); } - dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", - msg, ABORT_COMPLETE_WAIT_SECS); - cmd_free(h, abort); - return FAILED; + + atomic_inc(&c->refcount); + if (unlikely(!hpsa_is_cmd_idle(c))) { + /* + * We expect that the SCSI layer will hand us a unique tag + * value. Thus, there should never be a collision here between + * two requests...because if the selected command isn't idle + * then someone is going to be very disappointed. + */ + dev_err(&h->pdev->dev, + "tag collision (tag=%d) in cmd_tagged_alloc().\n", + idx); + if (c->scsi_cmd != NULL) + scsi_print_command(c->scsi_cmd); + scsi_print_command(scmd); + } + + hpsa_cmd_partial_init(h, idx, c); + return c; +} + +static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) +{ + /* + * Release our reference to the block. We don't need to do anything + * else to free it, because it is accessed by index. (There's no point + * in checking the result of the decrement, since we cannot guarantee + * that there isn't a concurrent abort which is also accessing it.) + */ + (void)atomic_dec(&c->refcount); } /* @@ -4660,16 +5632,15 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track * which ones are free or in use. Lock must be held when calling this. * cmd_free() is the complement. + * This function never gives up and returns NULL. If it hangs, + * another thread must call cmd_free() to free some tags. */ static struct CommandList *cmd_alloc(struct ctlr_info *h) { struct CommandList *c; - int i; - union u64bit temp64; - dma_addr_t cmd_dma_handle, err_dma_handle; - int refcount; - unsigned long offset; + int refcount, i; + int offset = 0; /* * There is some *extremely* small but non-zero chance that that @@ -4681,12 +5652,20 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) * very unlucky thread might be starved anyway, never able to * beat the other threads. In reality, this happens so * infrequently as to be indistinguishable from never. + * + * Note that we start allocating commands before the SCSI host structure + * is initialized. Since the search starts at bit zero, this + * all works, since we have at least one command structure available; + * however, it means that the structures with the low indexes have to be + * reserved for driver-initiated requests, while requests from the block + * layer will use the higher indexes. */ - offset = h->last_allocation; /* benignly racy */ for (;;) { - i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset); - if (unlikely(i == h->nr_cmds)) { + i = find_next_zero_bit(h->cmd_pool_bits, + HPSA_NRESERVED_CMDS, + offset); + if (unlikely(i >= HPSA_NRESERVED_CMDS)) { offset = 0; continue; } @@ -4694,35 +5673,23 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) refcount = atomic_inc_return(&c->refcount); if (unlikely(refcount > 1)) { cmd_free(h, c); /* already in use */ - offset = (i + 1) % h->nr_cmds; + offset = (i + 1) % HPSA_NRESERVED_CMDS; continue; } set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)); break; /* it's ours now. */ } - h->last_allocation = i; /* benignly racy */ - - /* Zero out all of commandlist except the last field, refcount */ - memset(c, 0, offsetof(struct CommandList, refcount)); - c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT)); - cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c); - c->err_info = h->errinfo_pool + i; - memset(c->err_info, 0, sizeof(*c->err_info)); - err_dma_handle = h->errinfo_pool_dhandle - + i * sizeof(*c->err_info); - - c->cmdindex = i; - - c->busaddr = (u32) cmd_dma_handle; - temp64.val = (u64) err_dma_handle; - c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); - c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); - - c->h = h; + hpsa_cmd_partial_init(h, i, c); return c; } +/* + * This is the complementary operation to cmd_alloc(). Note, however, in some + * corner cases it may also be used to free blocks allocated by + * cmd_tagged_alloc() in which case the ref-count decrement does the trick and + * the clear-bit is harmless. + */ static void cmd_free(struct ctlr_info *h, struct CommandList *c) { if (atomic_dec_and_test(&c->refcount)) { @@ -4900,7 +5867,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) if (iocommand.buf_size > 0) { buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) - return -EFAULT; + return -ENOMEM; if (iocommand.Request.Type.Direction & XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user(buff, iocommand.buf, @@ -4913,12 +5880,10 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) } } c = cmd_alloc(h); - if (c == NULL) { - rc = -ENOMEM; - goto out_kfree; - } + /* Fill in the command type */ c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; /* Fill in Command Header */ c->Header.ReplyQueue = 0; /* unused in simple mode */ if (iocommand.buf_size > 0) { /* buffer to fill */ @@ -4948,10 +5913,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->SG[0].Len = cpu_to_le32(iocommand.buf_size); c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } - hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); + rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (iocommand.buf_size > 0) hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); + if (rc) { + rc = -EIO; + goto out; + } /* Copy the error information out */ memcpy(&iocommand.error_info, c->err_info, @@ -5048,11 +6017,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) sg_used++; } c = cmd_alloc(h); - if (c == NULL) { - status = -ENOMEM; - goto cleanup1; - } + c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; c->Header.ReplyQueue = 0; c->Header.SGList = (u8) sg_used; c->Header.SGTotal = cpu_to_le16(sg_used); @@ -5078,10 +6045,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) } c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); } - hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); + status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); if (sg_used) hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); + if (status) { + status = -EIO; + goto cleanup0; + } + /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); if (copy_to_user(argp, ioc, sizeof(*ioc))) { @@ -5163,14 +6135,13 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) } } -static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, +static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, u8 reset_type) { struct CommandList *c; c = cmd_alloc(h); - if (!c) - return -ENOMEM; + /* fill_cmd can't fail here, no data buffer to map */ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, RAID_CTLR_LUNID, TYPE_MSG); @@ -5181,7 +6152,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, * the command either. This is the last command we will send before * re-initializing everything, so it doesn't matter and won't leak. */ - return 0; + return; } static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, @@ -5189,9 +6160,10 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, int cmd_type) { int pci_dir = XFER_NONE; - struct CommandList *a; /* for commands to be aborted */ + u64 tag; /* for commands to be aborted */ c->cmd_type = CMD_IOCTL_PEND; + c->scsi_cmd = SCSI_CMD_BUSY; c->Header.ReplyQueue = 0; if (buff != NULL && size > 0) { c->Header.SGList = 1; @@ -5305,10 +6277,10 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.CDB[7] = 0x00; break; case HPSA_ABORT_MSG: - a = buff; /* point to command to be aborted */ + memcpy(&tag, buff, sizeof(tag)); dev_dbg(&h->pdev->dev, - "Abort Tag:0x%016llx request Tag:0x%016llx", - a->Header.tag, c->Header.tag); + "Abort Tag:0x%016llx using rqst Tag:0x%016llx", + tag, c->Header.tag); c->Request.CDBLen = 16; c->Request.type_attr_dir = TYPE_ATTR_DIR(cmd_type, @@ -5319,8 +6291,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.CDB[2] = 0x00; /* reserved */ c->Request.CDB[3] = 0x00; /* reserved */ /* Tag to abort goes in CDB[4]-CDB[11] */ - memcpy(&c->Request.CDB[4], &a->Header.tag, - sizeof(a->Header.tag)); + memcpy(&c->Request.CDB[4], &tag, sizeof(tag)); c->Request.CDB[12] = 0x00; /* reserved */ c->Request.CDB[13] = 0x00; /* reserved */ c->Request.CDB[14] = 0x00; /* reserved */ @@ -5399,7 +6370,7 @@ static inline void finish_cmd(struct CommandList *c) if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI || c->cmd_type == CMD_IOACCEL2)) complete_scsi_command(c); - else if (c->cmd_type == CMD_IOCTL_PEND) + else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) complete(c->waiting); } @@ -5733,7 +6704,7 @@ static int controller_reset_failed(struct CfgTable __iomem *cfgtable) /* This does a hard reset of the controller using PCI power management * states or the using the doorbell register. */ -static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) +static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id) { u64 cfg_offset; u32 cfg_base_addr; @@ -5744,7 +6715,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) int rc; struct CfgTable __iomem *cfgtable; u32 use_doorbell; - u32 board_id; u16 command_register; /* For controllers as old as the P600, this is very nearly @@ -5760,11 +6730,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) * using the doorbell register. */ - rc = hpsa_lookup_board_id(pdev, &board_id); - if (rc < 0) { - dev_warn(&pdev->dev, "Board ID not found\n"); - return rc; - } if (!ctlr_is_resettable(board_id)) { dev_warn(&pdev->dev, "Controller not resettable\n"); return -ENODEV; @@ -5930,10 +6895,22 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) return -1; } +static void hpsa_disable_interrupt_mode(struct ctlr_info *h) +{ + if (h->msix_vector) { + if (h->pdev->msix_enabled) + pci_disable_msix(h->pdev); + h->msix_vector = 0; + } else if (h->msi_vector) { + if (h->pdev->msi_enabled) + pci_disable_msi(h->pdev); + h->msi_vector = 0; + } +} + /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use legacy INTx mode. */ - static void hpsa_interrupt_mode(struct ctlr_info *h) { #ifdef CONFIG_PCI_MSI @@ -6064,6 +7041,21 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, return 0; } +static void hpsa_free_cfgtables(struct ctlr_info *h) +{ + if (h->transtable) { + iounmap(h->transtable); + h->transtable = NULL; + } + if (h->cfgtable) { + iounmap(h->cfgtable); + h->cfgtable = NULL; + } +} + +/* Find and map CISS config table and transfer table ++ * several items must be unmapped (freed) later ++ * */ static int hpsa_find_cfgtables(struct ctlr_info *h) { u64 cfg_offset; @@ -6090,25 +7082,31 @@ static int hpsa_find_cfgtables(struct ctlr_info *h) h->transtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index)+cfg_offset+trans_offset, sizeof(*h->transtable)); - if (!h->transtable) + if (!h->transtable) { + dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); + hpsa_free_cfgtables(h); return -ENOMEM; + } return 0; } static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) { - h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); +#define MIN_MAX_COMMANDS 16 + BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS); + + h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); /* Limit commands in memory limited kdump scenario. */ if (reset_devices && h->max_commands > 32) h->max_commands = 32; - if (h->max_commands < 16) { - dev_warn(&h->pdev->dev, "Controller reports " - "max supported commands of %d, an obvious lie. " - "Using 16. Ensure that firmware is up to date.\n", - h->max_commands); - h->max_commands = 16; + if (h->max_commands < MIN_MAX_COMMANDS) { + dev_warn(&h->pdev->dev, + "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n", + h->max_commands, + MIN_MAX_COMMANDS); + h->max_commands = MIN_MAX_COMMANDS; } } @@ -6153,6 +7151,8 @@ static void hpsa_find_board_params(struct ctlr_info *h) dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); + if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); } static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) @@ -6222,6 +7222,8 @@ static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) * as we enter this code.) */ for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { + if (h->remove_in_progress) + goto done; spin_lock_irqsave(&h->lock, flags); doorbell_value = readl(h->vaddr + SA5_DOORBELL); spin_unlock_irqrestore(&h->lock, flags); @@ -6262,6 +7264,22 @@ error: return -ENODEV; } +/* free items allocated or mapped by hpsa_pci_init */ +static void hpsa_free_pci_init(struct ctlr_info *h) +{ + hpsa_free_cfgtables(h); /* pci_init 4 */ + iounmap(h->vaddr); /* pci_init 3 */ + h->vaddr = NULL; + hpsa_disable_interrupt_mode(h); /* pci_init 2 */ + /* + * call pci_disable_device before pci_release_regions per + * Documentation/PCI/pci.txt + */ + pci_disable_device(h->pdev); /* pci_init 1 */ + pci_release_regions(h->pdev); /* pci_init 2 */ +} + +/* several items must be freed later */ static int hpsa_pci_init(struct ctlr_info *h) { int prod_index, err; @@ -6272,19 +7290,24 @@ static int hpsa_pci_init(struct ctlr_info *h) h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); + h->needs_abort_tags_swizzled = + ctlr_needs_abort_tags_swizzled(h->board_id); + pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); err = pci_enable_device(h->pdev); if (err) { - dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); + dev_err(&h->pdev->dev, "failed to enable PCI device\n"); + pci_disable_device(h->pdev); return err; } err = pci_request_regions(h->pdev, HPSA); if (err) { dev_err(&h->pdev->dev, - "cannot obtain PCI resources, aborting\n"); + "failed to obtain PCI resources\n"); + pci_disable_device(h->pdev); return err; } @@ -6293,38 +7316,43 @@ static int hpsa_pci_init(struct ctlr_info *h) hpsa_interrupt_mode(h); err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) - goto err_out_free_res; + goto clean2; /* intmode+region, pci */ h->vaddr = remap_pci_mem(h->paddr, 0x250); if (!h->vaddr) { + dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); err = -ENOMEM; - goto err_out_free_res; + goto clean2; /* intmode+region, pci */ } err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) - goto err_out_free_res; + goto clean3; /* vaddr, intmode+region, pci */ err = hpsa_find_cfgtables(h); if (err) - goto err_out_free_res; + goto clean3; /* vaddr, intmode+region, pci */ hpsa_find_board_params(h); if (!hpsa_CISS_signature_present(h)) { err = -ENODEV; - goto err_out_free_res; + goto clean4; /* cfgtables, vaddr, intmode+region, pci */ } hpsa_set_driver_support_bits(h); hpsa_p600_dma_prefetch_quirk(h); err = hpsa_enter_simple_mode(h); if (err) - goto err_out_free_res; + goto clean4; /* cfgtables, vaddr, intmode+region, pci */ return 0; -err_out_free_res: - if (h->transtable) - iounmap(h->transtable); - if (h->cfgtable) - iounmap(h->cfgtable); - if (h->vaddr) - iounmap(h->vaddr); +clean4: /* cfgtables, vaddr, intmode+region, pci */ + hpsa_free_cfgtables(h); +clean3: /* vaddr, intmode+region, pci */ + iounmap(h->vaddr); + h->vaddr = NULL; +clean2: /* intmode+region, pci */ + hpsa_disable_interrupt_mode(h); + /* + * call pci_disable_device before pci_release_regions per + * Documentation/PCI/pci.txt + */ pci_disable_device(h->pdev); pci_release_regions(h->pdev); return err; @@ -6346,7 +7374,7 @@ static void hpsa_hba_inquiry(struct ctlr_info *h) } } -static int hpsa_init_reset_devices(struct pci_dev *pdev) +static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id) { int rc, i; void __iomem *vaddr; @@ -6382,7 +7410,7 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) iounmap(vaddr); /* Reset the controller with a PCI power-cycle or via doorbell */ - rc = hpsa_kdump_hard_reset_controller(pdev); + rc = hpsa_kdump_hard_reset_controller(pdev, board_id); /* -ENOTSUPP here means we cannot reset the controller * but it's already (and still) up and running in @@ -6408,7 +7436,29 @@ out_disable: return rc; } -static int hpsa_allocate_cmd_pool(struct ctlr_info *h) +static void hpsa_free_cmd_pool(struct ctlr_info *h) +{ + kfree(h->cmd_pool_bits); + h->cmd_pool_bits = NULL; + if (h->cmd_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(struct CommandList), + h->cmd_pool, + h->cmd_pool_dhandle); + h->cmd_pool = NULL; + h->cmd_pool_dhandle = 0; + } + if (h->errinfo_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(struct ErrorInfo), + h->errinfo_pool, + h->errinfo_pool_dhandle); + h->errinfo_pool = NULL; + h->errinfo_pool_dhandle = 0; + } +} + +static int hpsa_alloc_cmd_pool(struct ctlr_info *h) { h->cmd_pool_bits = kzalloc( DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * @@ -6425,34 +7475,13 @@ static int hpsa_allocate_cmd_pool(struct ctlr_info *h) dev_err(&h->pdev->dev, "out of memory in %s", __func__); goto clean_up; } + hpsa_preinitialize_commands(h); return 0; clean_up: hpsa_free_cmd_pool(h); return -ENOMEM; } -static void hpsa_free_cmd_pool(struct ctlr_info *h) -{ - kfree(h->cmd_pool_bits); - if (h->cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct CommandList), - h->cmd_pool, h->cmd_pool_dhandle); - if (h->ioaccel2_cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), - h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); - if (h->errinfo_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct ErrorInfo), - h->errinfo_pool, - h->errinfo_pool_dhandle); - if (h->ioaccel_cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct io_accel1_cmd), - h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); -} - static void hpsa_irq_affinity_hints(struct ctlr_info *h) { int i, cpu; @@ -6474,12 +7503,14 @@ static void hpsa_free_irqs(struct ctlr_info *h) i = h->intr_mode; irq_set_affinity_hint(h->intr[i], NULL); free_irq(h->intr[i], &h->q[i]); + h->q[i] = 0; return; } for (i = 0; i < h->msix_vector; i++) { irq_set_affinity_hint(h->intr[i], NULL); free_irq(h->intr[i], &h->q[i]); + h->q[i] = 0; } for (; i < MAX_REPLY_QUEUES; i++) h->q[i] = 0; @@ -6502,8 +7533,9 @@ static int hpsa_request_irqs(struct ctlr_info *h, if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { /* If performant mode and MSI-X, use multiple reply queues */ for (i = 0; i < h->msix_vector; i++) { + sprintf(h->intrname[i], "%s-msix%d", h->devname, i); rc = request_irq(h->intr[i], msixhandler, - 0, h->devname, + 0, h->intrname[i], &h->q[i]); if (rc) { int j; @@ -6524,18 +7556,30 @@ static int hpsa_request_irqs(struct ctlr_info *h, } else { /* Use single reply pool */ if (h->msix_vector > 0 || h->msi_vector) { + if (h->msix_vector) + sprintf(h->intrname[h->intr_mode], + "%s-msix", h->devname); + else + sprintf(h->intrname[h->intr_mode], + "%s-msi", h->devname); rc = request_irq(h->intr[h->intr_mode], - msixhandler, 0, h->devname, + msixhandler, 0, + h->intrname[h->intr_mode], &h->q[h->intr_mode]); } else { + sprintf(h->intrname[h->intr_mode], + "%s-intx", h->devname); rc = request_irq(h->intr[h->intr_mode], - intxhandler, IRQF_SHARED, h->devname, + intxhandler, IRQF_SHARED, + h->intrname[h->intr_mode], &h->q[h->intr_mode]); } + irq_set_affinity_hint(h->intr[h->intr_mode], NULL); } if (rc) { - dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", + dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", h->intr[h->intr_mode], h->devname); + hpsa_free_irqs(h); return -ENODEV; } return 0; @@ -6543,42 +7587,27 @@ static int hpsa_request_irqs(struct ctlr_info *h, static int hpsa_kdump_soft_reset(struct ctlr_info *h) { - if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, - HPSA_RESET_TYPE_CONTROLLER)) { - dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); - return -EIO; - } + int rc; + hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER); dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); - if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { + rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); + if (rc) { dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); - return -1; + return rc; } dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); - if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { + rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); + if (rc) { dev_warn(&h->pdev->dev, "Board failed to become ready " "after soft reset.\n"); - return -1; + return rc; } return 0; } -static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) -{ - hpsa_free_irqs(h); -#ifdef CONFIG_PCI_MSI - if (h->msix_vector) { - if (h->pdev->msix_enabled) - pci_disable_msix(h->pdev); - } else if (h->msi_vector) { - if (h->pdev->msi_enabled) - pci_disable_msi(h->pdev); - } -#endif /* CONFIG_PCI_MSI */ -} - static void hpsa_free_reply_queues(struct ctlr_info *h) { int i; @@ -6586,30 +7615,36 @@ static void hpsa_free_reply_queues(struct ctlr_info *h) for (i = 0; i < h->nreply_queues; i++) { if (!h->reply_queue[i].head) continue; - pci_free_consistent(h->pdev, h->reply_queue_size, - h->reply_queue[i].head, h->reply_queue[i].busaddr); + pci_free_consistent(h->pdev, + h->reply_queue_size, + h->reply_queue[i].head, + h->reply_queue[i].busaddr); h->reply_queue[i].head = NULL; h->reply_queue[i].busaddr = 0; } + h->reply_queue_size = 0; } static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) { - hpsa_free_irqs_and_disable_msix(h); - hpsa_free_sg_chain_blocks(h); - hpsa_free_cmd_pool(h); - kfree(h->ioaccel1_blockFetchTable); - kfree(h->blockFetchTable); - hpsa_free_reply_queues(h); - if (h->vaddr) - iounmap(h->vaddr); - if (h->transtable) - iounmap(h->transtable); - if (h->cfgtable) - iounmap(h->cfgtable); - pci_disable_device(h->pdev); - pci_release_regions(h->pdev); - kfree(h); + hpsa_free_performant_mode(h); /* init_one 7 */ + hpsa_free_sg_chain_blocks(h); /* init_one 6 */ + hpsa_free_cmd_pool(h); /* init_one 5 */ + hpsa_free_irqs(h); /* init_one 4 */ + scsi_host_put(h->scsi_host); /* init_one 3 */ + h->scsi_host = NULL; /* init_one 3 */ + hpsa_free_pci_init(h); /* init_one 2_5 */ + free_percpu(h->lockup_detected); /* init_one 2 */ + h->lockup_detected = NULL; /* init_one 2 */ + if (h->resubmit_wq) { + destroy_workqueue(h->resubmit_wq); /* init_one 1 */ + h->resubmit_wq = NULL; + } + if (h->rescan_ctlr_wq) { + destroy_workqueue(h->rescan_ctlr_wq); + h->rescan_ctlr_wq = NULL; + } + kfree(h); /* init_one 1 */ } /* Called when controller lockup detected. */ @@ -6617,17 +7652,22 @@ static void fail_all_outstanding_cmds(struct ctlr_info *h) { int i, refcount; struct CommandList *c; + int failcount = 0; flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ for (i = 0; i < h->nr_cmds; i++) { c = h->cmd_pool + i; refcount = atomic_inc_return(&c->refcount); if (refcount > 1) { - c->err_info->CommandStatus = CMD_HARDWARE_ERR; + c->err_info->CommandStatus = CMD_CTLR_LOCKUP; finish_cmd(c); + atomic_dec(&h->commands_outstanding); + failcount++; } cmd_free(h, c); } + dev_warn(&h->pdev->dev, + "failed %d commands in fail_all\n", failcount); } static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) @@ -6653,18 +7693,19 @@ static void controller_lockup_detected(struct ctlr_info *h) if (!lockup_detected) { /* no heartbeat, but controller gave us a zero. */ dev_warn(&h->pdev->dev, - "lockup detected but scratchpad register is zero\n"); + "lockup detected after %d but scratchpad register is zero\n", + h->heartbeat_sample_interval / HZ); lockup_detected = 0xffffffff; } set_lockup_detected_for_all_cpus(h, lockup_detected); spin_unlock_irqrestore(&h->lock, flags); - dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", - lockup_detected); + dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", + lockup_detected, h->heartbeat_sample_interval / HZ); pci_disable_device(h->pdev); fail_all_outstanding_cmds(h); } -static void detect_controller_lockup(struct ctlr_info *h) +static int detect_controller_lockup(struct ctlr_info *h) { u64 now; u32 heartbeat; @@ -6674,7 +7715,7 @@ static void detect_controller_lockup(struct ctlr_info *h) /* If we've received an interrupt recently, we're ok. */ if (time_after64(h->last_intr_timestamp + (h->heartbeat_sample_interval), now)) - return; + return false; /* * If we've already checked the heartbeat recently, we're ok. @@ -6683,7 +7724,7 @@ static void detect_controller_lockup(struct ctlr_info *h) */ if (time_after64(h->last_heartbeat_timestamp + (h->heartbeat_sample_interval), now)) - return; + return false; /* If heartbeat has not changed since we last looked, we're not ok. */ spin_lock_irqsave(&h->lock, flags); @@ -6691,12 +7732,13 @@ static void detect_controller_lockup(struct ctlr_info *h) spin_unlock_irqrestore(&h->lock, flags); if (h->last_heartbeat == heartbeat) { controller_lockup_detected(h); - return; + return true; } /* We're ok. */ h->last_heartbeat = heartbeat; h->last_heartbeat_timestamp = now; + return false; } static void hpsa_ack_ctlr_events(struct ctlr_info *h) @@ -6843,11 +7885,18 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct ctlr_info *h; int try_soft_reset = 0; unsigned long flags; + u32 board_id; if (number_of_controllers == 0) printk(KERN_INFO DRIVER_NAME "\n"); - rc = hpsa_init_reset_devices(pdev); + rc = hpsa_lookup_board_id(pdev, &board_id); + if (rc < 0) { + dev_warn(&pdev->dev, "Board ID not found\n"); + return rc; + } + + rc = hpsa_init_reset_devices(pdev, board_id); if (rc) { if (rc != -ENOTSUPP) return rc; @@ -6868,42 +7917,41 @@ reinit_after_soft_reset: */ BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); h = kzalloc(sizeof(*h), GFP_KERNEL); - if (!h) + if (!h) { + dev_err(&pdev->dev, "Failed to allocate controller head\n"); return -ENOMEM; + } h->pdev = pdev; + h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; INIT_LIST_HEAD(&h->offline_device_list); spin_lock_init(&h->lock); spin_lock_init(&h->offline_device_lock); spin_lock_init(&h->scan_lock); atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); - - h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); - if (!h->rescan_ctlr_wq) { - rc = -ENOMEM; - goto clean1; - } - - h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); - if (!h->resubmit_wq) { - rc = -ENOMEM; - goto clean1; - } + atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS); /* Allocate and clear per-cpu variable lockup_detected */ h->lockup_detected = alloc_percpu(u32); if (!h->lockup_detected) { + dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); rc = -ENOMEM; - goto clean1; + goto clean1; /* aer/h */ } set_lockup_detected_for_all_cpus(h, 0); rc = hpsa_pci_init(h); - if (rc != 0) - goto clean1; + if (rc) + goto clean2; /* lu, aer/h */ + + /* relies on h-> settings made by hpsa_pci_init, including + * interrupt_mode h->intr */ + rc = hpsa_scsi_host_alloc(h); + if (rc) + goto clean2_5; /* pci, lu, aer/h */ - sprintf(h->devname, HPSA "%d", number_of_controllers); + sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); h->ctlr = number_of_controllers; number_of_controllers++; @@ -6917,34 +7965,57 @@ reinit_after_soft_reset: dac = 0; } else { dev_err(&pdev->dev, "no suitable DMA available\n"); - goto clean1; + goto clean3; /* shost, pci, lu, aer/h */ } } /* make sure the board interrupts are off */ h->access.set_intr_mask(h, HPSA_INTR_OFF); - if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) - goto clean2; - dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", - h->devname, pdev->device, - h->intr[h->intr_mode], dac ? "" : " not"); - rc = hpsa_allocate_cmd_pool(h); + rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); + if (rc) + goto clean3; /* shost, pci, lu, aer/h */ + rc = hpsa_alloc_cmd_pool(h); if (rc) - goto clean2_and_free_irqs; - if (hpsa_allocate_sg_chain_blocks(h)) - goto clean4; + goto clean4; /* irq, shost, pci, lu, aer/h */ + rc = hpsa_alloc_sg_chain_blocks(h); + if (rc) + goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ init_waitqueue_head(&h->scan_wait_queue); + init_waitqueue_head(&h->abort_cmd_wait_queue); + init_waitqueue_head(&h->event_sync_wait_queue); + mutex_init(&h->reset_mutex); h->scan_finished = 1; /* no scan currently in progress */ pci_set_drvdata(pdev, h); h->ndevices = 0; h->hba_mode_enabled = 0; - h->scsi_host = NULL; + spin_lock_init(&h->devlock); - hpsa_put_ctlr_into_performant_mode(h); + rc = hpsa_put_ctlr_into_performant_mode(h); + if (rc) + goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ + + /* hook into SCSI subsystem */ + rc = hpsa_scsi_add_host(h); + if (rc) + goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + + /* create the resubmit workqueue */ + h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); + if (!h->rescan_ctlr_wq) { + rc = -ENOMEM; + goto clean7; + } - /* At this point, the controller is ready to take commands. + h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); + if (!h->resubmit_wq) { + rc = -ENOMEM; + goto clean7; /* aer/h */ + } + + /* + * At this point, the controller is ready to take commands. * Now, if reset_devices and the hard reset didn't work, try * the soft reset and see if that works. */ @@ -6966,13 +8037,24 @@ reinit_after_soft_reset: if (rc) { dev_warn(&h->pdev->dev, "Failed to request_irq after soft reset.\n"); - goto clean4; + /* + * cannot goto clean7 or free_irqs will be called + * again. Instead, do its work + */ + hpsa_free_performant_mode(h); /* clean7 */ + hpsa_free_sg_chain_blocks(h); /* clean6 */ + hpsa_free_cmd_pool(h); /* clean5 */ + /* + * skip hpsa_free_irqs(h) clean4 since that + * was just called before request_irqs failed + */ + goto clean3; } rc = hpsa_kdump_soft_reset(h); if (rc) /* Neither hard nor soft reset worked, we're hosed. */ - goto clean4; + goto clean9; dev_info(&h->pdev->dev, "Board READY.\n"); dev_info(&h->pdev->dev, @@ -6993,21 +8075,20 @@ reinit_after_soft_reset: hpsa_undo_allocations_after_kdump_soft_reset(h); try_soft_reset = 0; if (rc) - /* don't go to clean4, we already unallocated */ + /* don't goto clean, we already unallocated */ return -ENODEV; goto reinit_after_soft_reset; } - /* Enable Accelerated IO path at driver layer */ - h->acciopath_status = 1; + /* Enable Accelerated IO path at driver layer */ + h->acciopath_status = 1; /* Turn the interrupts on so we can service requests */ h->access.set_intr_mask(h, HPSA_INTR_ON); hpsa_hba_inquiry(h); - hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ /* Monitor the controller for firmware lockups */ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; @@ -7019,19 +8100,36 @@ reinit_after_soft_reset: h->heartbeat_sample_interval); return 0; -clean4: +clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */ + kfree(h->hba_inquiry_data); +clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + hpsa_free_performant_mode(h); + h->access.set_intr_mask(h, HPSA_INTR_OFF); +clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ hpsa_free_sg_chain_blocks(h); +clean5: /* cmd, irq, shost, pci, lu, aer/h */ hpsa_free_cmd_pool(h); -clean2_and_free_irqs: +clean4: /* irq, shost, pci, lu, aer/h */ hpsa_free_irqs(h); -clean2: -clean1: - if (h->resubmit_wq) +clean3: /* shost, pci, lu, aer/h */ + scsi_host_put(h->scsi_host); + h->scsi_host = NULL; +clean2_5: /* pci, lu, aer/h */ + hpsa_free_pci_init(h); +clean2: /* lu, aer/h */ + if (h->lockup_detected) { + free_percpu(h->lockup_detected); + h->lockup_detected = NULL; + } +clean1: /* wq/aer/h */ + if (h->resubmit_wq) { destroy_workqueue(h->resubmit_wq); - if (h->rescan_ctlr_wq) + h->resubmit_wq = NULL; + } + if (h->rescan_ctlr_wq) { destroy_workqueue(h->rescan_ctlr_wq); - if (h->lockup_detected) - free_percpu(h->lockup_detected); + h->rescan_ctlr_wq = NULL; + } kfree(h); return rc; } @@ -7040,8 +8138,8 @@ static void hpsa_flush_cache(struct ctlr_info *h) { char *flush_buf; struct CommandList *c; + int rc; - /* Don't bother trying to flush the cache if locked up */ if (unlikely(lockup_detected(h))) return; flush_buf = kzalloc(4, GFP_KERNEL); @@ -7049,21 +8147,20 @@ static void hpsa_flush_cache(struct ctlr_info *h) return; c = cmd_alloc(h); - if (!c) { - dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); - goto out_of_memory; - } + if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, RAID_CTLR_LUNID, TYPE_CMD)) { goto out; } - hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); + rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, + PCI_DMA_TODEVICE, NO_TIMEOUT); + if (rc) + goto out; if (c->err_info->CommandStatus != 0) out: dev_warn(&h->pdev->dev, "error flushing cache on controller\n"); cmd_free(h, c); -out_of_memory: kfree(flush_buf); } @@ -7078,15 +8175,18 @@ static void hpsa_shutdown(struct pci_dev *pdev) */ hpsa_flush_cache(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); - hpsa_free_irqs_and_disable_msix(h); + hpsa_free_irqs(h); /* init_one 4 */ + hpsa_disable_interrupt_mode(h); /* pci_init 2 */ } static void hpsa_free_device_info(struct ctlr_info *h) { int i; - for (i = 0; i < h->ndevices; i++) + for (i = 0; i < h->ndevices; i++) { kfree(h->dev[i]); + h->dev[i] = NULL; + } } static void hpsa_remove_one(struct pci_dev *pdev) @@ -7108,29 +8208,34 @@ static void hpsa_remove_one(struct pci_dev *pdev) cancel_delayed_work_sync(&h->rescan_ctlr_work); destroy_workqueue(h->rescan_ctlr_wq); destroy_workqueue(h->resubmit_wq); - hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ + + /* includes hpsa_free_irqs - init_one 4 */ + /* includes hpsa_disable_interrupt_mode - pci_init 2 */ hpsa_shutdown(pdev); - iounmap(h->vaddr); - iounmap(h->transtable); - iounmap(h->cfgtable); - hpsa_free_device_info(h); - hpsa_free_sg_chain_blocks(h); - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct CommandList), - h->cmd_pool, h->cmd_pool_dhandle); - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(struct ErrorInfo), - h->errinfo_pool, h->errinfo_pool_dhandle); - hpsa_free_reply_queues(h); - kfree(h->cmd_pool_bits); - kfree(h->blockFetchTable); - kfree(h->ioaccel1_blockFetchTable); - kfree(h->ioaccel2_blockFetchTable); - kfree(h->hba_inquiry_data); - pci_disable_device(pdev); - pci_release_regions(pdev); - free_percpu(h->lockup_detected); - kfree(h); + + hpsa_free_device_info(h); /* scan */ + + kfree(h->hba_inquiry_data); /* init_one 10 */ + h->hba_inquiry_data = NULL; /* init_one 10 */ + if (h->scsi_host) + scsi_remove_host(h->scsi_host); /* init_one 8 */ + hpsa_free_ioaccel2_sg_chain_blocks(h); + hpsa_free_performant_mode(h); /* init_one 7 */ + hpsa_free_sg_chain_blocks(h); /* init_one 6 */ + hpsa_free_cmd_pool(h); /* init_one 5 */ + + /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */ + + scsi_host_put(h->scsi_host); /* init_one 3 */ + h->scsi_host = NULL; /* init_one 3 */ + + /* includes hpsa_disable_interrupt_mode - pci_init 2 */ + hpsa_free_pci_init(h); /* init_one 2.5 */ + + free_percpu(h->lockup_detected); /* init_one 2 */ + h->lockup_detected = NULL; /* init_one 2 */ + /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ + kfree(h); /* init_one 1 */ } static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, @@ -7188,7 +8293,10 @@ static void calc_bucket_map(int bucket[], int num_buckets, } } -/* return -ENODEV or other reason on error, 0 on success */ +/* + * return -ENODEV on err, 0 on success (or no action) + * allocates numerous items that must be freed later + */ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) { int i; @@ -7370,7 +8478,23 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) return 0; } -static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) +/* Free ioaccel1 mode command blocks and block fetch table */ +static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) +{ + if (h->ioaccel_cmd_pool) { + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), + h->ioaccel_cmd_pool, + h->ioaccel_cmd_pool_dhandle); + h->ioaccel_cmd_pool = NULL; + h->ioaccel_cmd_pool_dhandle = 0; + } + kfree(h->ioaccel1_blockFetchTable); + h->ioaccel1_blockFetchTable = NULL; +} + +/* Allocate ioaccel1 mode command blocks and block fetch table */ +static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) { h->ioaccel_maxsg = readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); @@ -7401,16 +8525,32 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) return 0; clean_up: - if (h->ioaccel_cmd_pool) + hpsa_free_ioaccel1_cmd_and_bft(h); + return -ENOMEM; +} + +/* Free ioaccel2 mode command blocks and block fetch table */ +static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) +{ + hpsa_free_ioaccel2_sg_chain_blocks(h); + + if (h->ioaccel2_cmd_pool) { pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), - h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); - kfree(h->ioaccel1_blockFetchTable); - return 1; + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + h->ioaccel2_cmd_pool, + h->ioaccel2_cmd_pool_dhandle); + h->ioaccel2_cmd_pool = NULL; + h->ioaccel2_cmd_pool_dhandle = 0; + } + kfree(h->ioaccel2_blockFetchTable); + h->ioaccel2_blockFetchTable = NULL; } -static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) +/* Allocate ioaccel2 mode command blocks and block fetch table */ +static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) { + int rc; + /* Allocate ioaccel2 mode command blocks and block fetch table */ h->ioaccel_maxsg = @@ -7430,7 +8570,13 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) sizeof(u32)), GFP_KERNEL); if ((h->ioaccel2_cmd_pool == NULL) || - (h->ioaccel2_blockFetchTable == NULL)) + (h->ioaccel2_blockFetchTable == NULL)) { + rc = -ENOMEM; + goto clean_up; + } + + rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); + if (rc) goto clean_up; memset(h->ioaccel2_cmd_pool, 0, @@ -7438,41 +8584,50 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) return 0; clean_up: - if (h->ioaccel2_cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), - h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); - kfree(h->ioaccel2_blockFetchTable); - return 1; + hpsa_free_ioaccel2_cmd_and_bft(h); + return rc; } -static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) +/* Free items allocated by hpsa_put_ctlr_into_performant_mode */ +static void hpsa_free_performant_mode(struct ctlr_info *h) +{ + kfree(h->blockFetchTable); + h->blockFetchTable = NULL; + hpsa_free_reply_queues(h); + hpsa_free_ioaccel1_cmd_and_bft(h); + hpsa_free_ioaccel2_cmd_and_bft(h); +} + +/* return -ENODEV on error, 0 on success (or no action) + * allocates numerous items that must be freed later + */ +static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) { u32 trans_support; unsigned long transMethod = CFGTBL_Trans_Performant | CFGTBL_Trans_use_short_tags; - int i; + int i, rc; if (hpsa_simple_mode) - return; + return 0; trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & PERFORMANT_MODE)) - return; + return 0; /* Check for I/O accelerator mode support */ if (trans_support & CFGTBL_Trans_io_accel1) { transMethod |= CFGTBL_Trans_io_accel1 | CFGTBL_Trans_enable_directed_msix; - if (hpsa_alloc_ioaccel_cmd_and_bft(h)) - goto clean_up; - } else { - if (trans_support & CFGTBL_Trans_io_accel2) { - transMethod |= CFGTBL_Trans_io_accel2 | + rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); + if (rc) + return rc; + } else if (trans_support & CFGTBL_Trans_io_accel2) { + transMethod |= CFGTBL_Trans_io_accel2 | CFGTBL_Trans_enable_directed_msix; - if (ioaccel2_alloc_cmds_and_bft(h)) - goto clean_up; - } + rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); + if (rc) + return rc; } h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; @@ -7484,8 +8639,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) h->reply_queue[i].head = pci_alloc_consistent(h->pdev, h->reply_queue_size, &(h->reply_queue[i].busaddr)); - if (!h->reply_queue[i].head) - goto clean_up; + if (!h->reply_queue[i].head) { + rc = -ENOMEM; + goto clean1; /* rq, ioaccel */ + } h->reply_queue[i].size = h->max_commands; h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ h->reply_queue[i].current_entry = 0; @@ -7494,15 +8651,24 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) /* Need a block fetch table for performant mode */ h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * sizeof(u32)), GFP_KERNEL); - if (!h->blockFetchTable) - goto clean_up; + if (!h->blockFetchTable) { + rc = -ENOMEM; + goto clean1; /* rq, ioaccel */ + } - hpsa_enter_performant_mode(h, trans_support); - return; + rc = hpsa_enter_performant_mode(h, trans_support); + if (rc) + goto clean2; /* bft, rq, ioaccel */ + return 0; -clean_up: - hpsa_free_reply_queues(h); +clean2: /* bft, rq, ioaccel */ kfree(h->blockFetchTable); + h->blockFetchTable = NULL; +clean1: /* rq, ioaccel */ + hpsa_free_reply_queues(h); + hpsa_free_ioaccel1_cmd_and_bft(h); + hpsa_free_ioaccel2_cmd_and_bft(h); + return rc; } static int is_accelerated_cmd(struct CommandList *c) diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 657713050349..6ee4da6b1153 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -47,6 +47,7 @@ struct hpsa_scsi_dev_t { unsigned char raid_level; /* from inquiry page 0xC1 */ unsigned char volume_offline; /* discovered via TUR or VPD */ u16 queue_depth; /* max queue_depth for this device */ + atomic_t reset_cmds_out; /* Count of commands to-be affected */ atomic_t ioaccel_cmds_out; /* Only used for physical devices * counts commands sent to physical * device via "ioaccel" path. @@ -54,6 +55,8 @@ struct hpsa_scsi_dev_t { u32 ioaccel_handle; int offload_config; /* I/O accel RAID offload configured */ int offload_enabled; /* I/O accel RAID offload enabled */ + int offload_to_be_enabled; + int hba_ioaccel_enabled; int offload_to_mirror; /* Send next I/O accelerator RAID * offload request to mirror drive */ @@ -68,6 +71,13 @@ struct hpsa_scsi_dev_t { * devices in order to honor physical device queue depth limits. */ struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES]; + int nphysical_disks; + int supports_aborts; +#define HPSA_DO_NOT_EXPOSE 0x0 +#define HPSA_SG_ATTACH 0x1 +#define HPSA_ULD_ATTACH 0x2 +#define HPSA_SCSI_ADD (HPSA_SG_ATTACH | HPSA_ULD_ATTACH) + u8 expose_state; }; struct reply_queue_buffer { @@ -133,7 +143,6 @@ struct ctlr_info { struct CfgTable __iomem *cfgtable; int interrupts_enabled; int max_commands; - int last_allocation; atomic_t commands_outstanding; # define PERF_MODE_INT 0 # define DOORBELL_INT 1 @@ -154,6 +163,7 @@ struct ctlr_info { u8 max_cmd_sg_entries; int chainsize; struct SGDescriptor **cmd_sg_list; + struct ioaccel2_sg_element **ioaccel2_cmd_sg_list; /* pointers to command and error info pool */ struct CommandList *cmd_pool; @@ -211,6 +221,7 @@ struct ctlr_info { int remove_in_progress; /* Address of h->q[x] is passed to intr handler to know which queue */ u8 q[MAX_REPLY_QUEUES]; + char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */ u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ #define HPSATMF_BITS_SUPPORTED (1 << 0) #define HPSATMF_PHYS_LUN_RESET (1 << 1) @@ -222,6 +233,7 @@ struct ctlr_info { #define HPSATMF_PHYS_QRY_TASK (1 << 7) #define HPSATMF_PHYS_QRY_TSET (1 << 8) #define HPSATMF_PHYS_QRY_ASYNC (1 << 9) +#define HPSATMF_IOACCEL_ENABLED (1 << 15) #define HPSATMF_MASK_SUPPORTED (1 << 16) #define HPSATMF_LOG_LUN_RESET (1 << 17) #define HPSATMF_LOG_NEX_RESET (1 << 18) @@ -251,8 +263,13 @@ struct ctlr_info { struct list_head offline_device_list; int acciopath_status; int raid_offload_debug; + int needs_abort_tags_swizzled; struct workqueue_struct *resubmit_wq; struct workqueue_struct *rescan_ctlr_wq; + atomic_t abort_cmds_available; + wait_queue_head_t abort_cmd_wait_queue; + wait_queue_head_t event_sync_wait_queue; + struct mutex reset_mutex; }; struct offline_device_entry { diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 3a621c74b76f..c601622cc98e 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -42,8 +42,22 @@ #define CMD_UNSOLICITED_ABORT 0x000A #define CMD_TIMEOUT 0x000B #define CMD_UNABORTABLE 0x000C +#define CMD_TMF_STATUS 0x000D #define CMD_IOACCEL_DISABLED 0x000E +#define CMD_CTLR_LOCKUP 0xffff +/* Note: CMD_CTLR_LOCKUP is not a value defined by the CISS spec + * it is a value defined by the driver that commands can be marked + * with when a controller lockup has been detected by the driver + */ +/* TMF function status values */ +#define CISS_TMF_COMPLETE 0x00 +#define CISS_TMF_INVALID_FRAME 0x02 +#define CISS_TMF_NOT_SUPPORTED 0x04 +#define CISS_TMF_FAILED 0x05 +#define CISS_TMF_SUCCESS 0x08 +#define CISS_TMF_WRONG_LUN 0x09 +#define CISS_TMF_OVERLAPPED_TAG 0x0a /* Unit Attentions ASC's as defined for the MSA2012sa */ #define POWER_OR_RESET 0x29 @@ -240,6 +254,7 @@ struct ReportLUNdata { struct ext_report_lun_entry { u8 lunid[8]; +#define MASKED_DEVICE(x) ((x)[3] & 0xC0) #define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F) #define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6]) #define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \ @@ -247,6 +262,8 @@ struct ext_report_lun_entry { u8 wwid[8]; u8 device_type; u8 device_flags; +#define NON_DISK_PHYS_DEV(x) ((x)[17] & 0x01) +#define PHYS_IOACCEL(x) ((x)[17] & 0x08) u8 lun_count; /* multi-lun device, how many luns */ u8 redundant_paths; u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ @@ -379,6 +396,7 @@ struct ErrorInfo { #define CMD_SCSI 0x03 #define CMD_IOACCEL1 0x04 #define CMD_IOACCEL2 0x05 +#define IOACCEL2_TMF 0x06 #define DIRECT_LOOKUP_SHIFT 4 #define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) @@ -421,7 +439,10 @@ struct CommandList { * not used. */ struct hpsa_scsi_dev_t *phys_disk; - atomic_t refcount; /* Must be last to avoid memset in cmd_alloc */ + + int abort_pending; + struct hpsa_scsi_dev_t *reset_pending; + atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */ } __aligned(COMMANDLIST_ALIGNMENT); /* Max S/G elements in I/O accelerator command */ @@ -515,6 +536,12 @@ struct io_accel2_scsi_response { #define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28 #define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40 #define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E +#define IOACCEL2_STATUS_SR_IO_ERROR 0x01 +#define IOACCEL2_STATUS_SR_IO_ABORTED 0x02 +#define IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE 0x03 +#define IOACCEL2_STATUS_SR_INVALID_DEVICE 0x04 +#define IOACCEL2_STATUS_SR_UNDERRUN 0x51 +#define IOACCEL2_STATUS_SR_OVERRUN 0x75 u8 data_present; /* low 2 bits */ #define IOACCEL2_NO_DATAPRESENT 0x000 #define IOACCEL2_RESPONSE_DATAPRESENT 0x001 @@ -567,6 +594,7 @@ struct io_accel2_cmd { #define IOACCEL2_DIR_NO_DATA 0x00 #define IOACCEL2_DIR_DATA_IN 0x01 #define IOACCEL2_DIR_DATA_OUT 0x02 +#define IOACCEL2_TMF_ABORT 0x01 /* * SCSI Task Management Request format for Accelerator Mode 2 */ @@ -575,13 +603,13 @@ struct hpsa_tmf_struct { u8 reply_queue; /* Reply Queue ID */ u8 tmf; /* Task Management Function */ u8 reserved1; /* byte 3 Reserved */ - u32 it_nexus; /* SCSI I-T Nexus */ + __le32 it_nexus; /* SCSI I-T Nexus */ u8 lun_id[8]; /* LUN ID for TMF request */ __le64 tag; /* cciss tag associated w/ request */ __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */ __le64 error_ptr; /* Error Pointer */ __le32 error_len; /* Error Length */ -}; +} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); /* Configuration Table Structure */ struct HostWrite { diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index acea5d6eebd0..6a41c36b16b0 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1053,7 +1053,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); srp_cmd->opcode = SRP_CMD; memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); - srp_cmd->lun = cpu_to_be64(((u64)lun) << 48); + int_to_scsilun(lun, &srp_cmd->lun); if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { if (!firmware_has_feature(FW_FEATURE_CMO)) @@ -1529,7 +1529,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) /* Set up an abort SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); + int_to_scsilun(lun, &tsk_mgmt->lun); tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; tsk_mgmt->task_tag = (u64) found_evt; @@ -1652,7 +1652,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) /* Set up a lun reset SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); + int_to_scsilun(lun, &tsk_mgmt->lun); tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; evt->sync_srp = &srp_rsp; diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 89a8266560d0..4e1a632ccf16 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -1109,7 +1109,6 @@ static struct scsi_host_template imm_template = { .bios_param = imm_biosparam, .this_id = 7, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, .can_queue = 1, .slave_alloc = imm_adjust_queue, diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index e5dae7b54d9a..6a926bae76b2 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -2833,7 +2833,6 @@ static struct scsi_host_template initio_template = { .can_queue = MAX_TARGETS * i91u_MAXQUEUE, .this_id = 1, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 47412cf4eaac..73790a1d0969 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -272,7 +272,7 @@ #define IPR_RUNTIME_RESET 0x40000000 #define IPR_IPL_INIT_MIN_STAGE_TIME 5 -#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15 +#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 30 #define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 #define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 #define IPR_IPL_INIT_STAGE_MASK 0xff000000 diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 7542f11d3fcd..02cb76fd4420 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -206,10 +206,6 @@ module_param(ips, charp, 0); #define IPS_VERSION_HIGH IPS_VER_MAJOR_STRING "." IPS_VER_MINOR_STRING #define IPS_VERSION_LOW "." IPS_VER_BUILD_STRING " " -#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__) -#warning "This driver has only been tested on the x86/ia64/x86_64 platforms" -#endif - #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \ PCI_DMA_BIDIRECTIONAL : \ @@ -6788,6 +6784,11 @@ ips_remove_device(struct pci_dev *pci_dev) static int __init ips_module_init(void) { +#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__) + printk(KERN_ERR "ips: This driver has only been tested on the x86/ia64/x86_64 platforms\n"); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); +#endif + if (pci_register_driver(&ips_pci_driver) < 0) return -ENODEV; ips_driver_template.module = THIS_MODULE; diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index cd41b63a2f10..0dfcabe3ca7c 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -160,7 +160,6 @@ static struct scsi_host_template isci_sht = { .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, .can_queue = ISCI_CAN_QUEUE_VAL, - .cmd_per_lun = 1, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 9b81a34d7449..a5a56fa31e70 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -230,6 +230,8 @@ struct lpfc_stats { uint32_t elsRcvRRQ; uint32_t elsRcvRTV; uint32_t elsRcvECHO; + uint32_t elsRcvLCB; + uint32_t elsRcvRDP; uint32_t elsXmitFLOGI; uint32_t elsXmitFDISC; uint32_t elsXmitPLOGI; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 587e3e962f2b..b0e6fe46448d 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -498,3 +498,5 @@ bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *, bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, struct lpfc_name *, uint64_t *, struct lpfc_name *, struct lpfc_name *, uint64_t *, uint32_t *); +int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox); +void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 513edcb0c2da..25aa9b98d53a 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -710,7 +710,7 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt, * returns a pointer to that log in the private_data field in @file. * * Returns: - * This function returns zero if successful. On error it will return an negative + * This function returns zero if successful. On error it will return a negative * error value. **/ static int @@ -760,7 +760,7 @@ out: * returns a pointer to that log in the private_data field in @file. * * Returns: - * This function returns zero if successful. On error it will return an negative + * This function returns zero if successful. On error it will return a negative * error value. **/ static int @@ -810,7 +810,7 @@ out: * returns a pointer to that log in the private_data field in @file. * * Returns: - * This function returns zero if successful. On error it will return an negative + * This function returns zero if successful. On error it will return a negative * error value. **/ static int @@ -852,7 +852,7 @@ out: * returns a pointer to that log in the private_data field in @file. * * Returns: - * This function returns zero if successful. On error it will return an negative + * This function returns zero if successful. On error it will return a negative * error value. **/ static int @@ -894,7 +894,7 @@ out: * returns a pointer to that log in the private_data field in @file. * * Returns: - * This function returns zero if successful. On error it will return an negative + * This function returns zero if successful. On error it will return a negative * error value. **/ static int @@ -1115,7 +1115,7 @@ lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file) * returns a pointer to that log in the private_data field in @file. * * Returns: - * This function returns zero if successful. On error it will return an negative + * This function returns zero if successful. On error it will return a negative * error value. **/ static int diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 6977027979be..361f5b3d9d93 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -79,7 +79,6 @@ struct lpfc_nodelist { struct lpfc_name nlp_portname; struct lpfc_name nlp_nodename; uint32_t nlp_flag; /* entry flags */ - uint32_t nlp_add_flag; /* additional flags */ uint32_t nlp_DID; /* FC D_ID of entry */ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ uint16_t nlp_type; @@ -147,6 +146,7 @@ struct lpfc_node_rrq { #define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ #define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ #define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */ +#define NLP_IN_DEV_LOSS 0x00800000 /* devloss in progress */ #define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful ACC */ #define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from @@ -158,8 +158,6 @@ struct lpfc_node_rrq { #define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ #define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ -/* Defines for nlp_add_flag (uint32) */ -#define NLP_IN_DEV_LOSS 0x00000001 /* Dev Loss processing in progress */ /* ndlp usage management macros */ #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 851e8efe364e..36bf58ba750a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1509,12 +1509,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *ndlp) { struct lpfc_vport *vport = ndlp->vport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *new_ndlp; struct lpfc_rport_data *rdata; struct fc_rport *rport; struct serv_parm *sp; uint8_t name[sizeof(struct lpfc_name)]; - uint32_t rc, keepDID = 0; + uint32_t rc, keepDID = 0, keep_nlp_flag = 0; + uint16_t keep_nlp_state; int put_node; int put_rport; unsigned long *active_rrqs_xri_bitmap = NULL; @@ -1603,11 +1605,14 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, ndlp->active_rrqs_xri_bitmap, phba->cfg_rrq_xri_bitmap_sz); - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) - new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_lock_irq(shost->host_lock); + keep_nlp_flag = new_ndlp->nlp_flag; + new_ndlp->nlp_flag = ndlp->nlp_flag; + ndlp->nlp_flag = keep_nlp_flag; + spin_unlock_irq(shost->host_lock); - /* Set state will put new_ndlp on to node list if not already done */ + /* Set nlp_states accordingly */ + keep_nlp_state = new_ndlp->nlp_state; lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); /* Move this back to NPR state */ @@ -1624,8 +1629,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, if (rport) { rdata = rport->dd_data; if (rdata->pnode == ndlp) { - lpfc_nlp_put(ndlp); + /* break the link before dropping the ref */ ndlp->rport = NULL; + lpfc_nlp_put(ndlp); rdata->pnode = lpfc_nlp_get(new_ndlp); new_ndlp->rport = rport; } @@ -1648,7 +1654,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, memcpy(ndlp->active_rrqs_xri_bitmap, active_rrqs_xri_bitmap, phba->cfg_rrq_xri_bitmap_sz); - lpfc_drop_node(vport, ndlp); + + if (!NLP_CHK_NODE_ACT(ndlp)) + lpfc_drop_node(vport, ndlp); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, @@ -1665,20 +1673,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, active_rrqs_xri_bitmap, phba->cfg_rrq_xri_bitmap_sz); - /* Since we are swapping the ndlp passed in with the new one - * and the did has already been swapped, copy over state. - * The new WWNs are already in new_ndlp since thats what - * we looked it up by in the begining of this routine. - */ - new_ndlp->nlp_state = ndlp->nlp_state; - - /* Since we are switching over to the new_ndlp, the old - * ndlp should be put in the NPR state, unless we have - * already started re-discovery on it. + /* Since we are switching over to the new_ndlp, + * reset the old ndlp state */ if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) - lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + keep_nlp_state = NLP_STE_NPR_NODE; + lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); /* Fix up the rport accordingly */ rport = ndlp->rport; @@ -3667,15 +3668,6 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * At this point, the driver is done so release the IOCB */ lpfc_els_free_iocb(phba, cmdiocb); - - /* - * Remove the ndlp reference if it's a fabric node that has - * sent us an unsolicted LOGO. - */ - if (ndlp->nlp_type & NLP_FABRIC) - lpfc_nlp_put(ndlp); - - return; } /** @@ -4020,7 +4012,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, ndlp->nlp_rpi, vport->fc_flag); if (ndlp->nlp_flag & NLP_LOGO_ACC) { spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || + ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) + ndlp->nlp_flag &= ~NLP_LOGO_ACC; spin_unlock_irq(shost->host_lock); elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; } else { @@ -4587,16 +4581,16 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && - (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && - (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { + (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && + (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && + (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); sentplogi++; vport->num_disc_nodes++; if (vport->num_disc_nodes >= - vport->cfg_discovery_threads) { + vport->cfg_discovery_threads) { spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_NLP_MORE; spin_unlock_irq(shost->host_lock); @@ -4615,6 +4609,660 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) return sentplogi; } +void +lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, + uint32_t word0) +{ + + desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); + desc->payload.els_req = word0; + desc->length = cpu_to_be32(sizeof(desc->payload)); +} + +void +lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, + uint8_t *page_a0, uint8_t *page_a2) +{ + uint16_t wavelength; + uint16_t temperature; + uint16_t rx_power; + uint16_t tx_bias; + uint16_t tx_power; + uint16_t vcc; + uint16_t flag = 0; + struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; + struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; + + desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); + + trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) + &page_a0[SSF_TRANSCEIVER_CODE_B4]; + trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) + &page_a0[SSF_TRANSCEIVER_CODE_B5]; + + if ((trasn_code_byte4->fc_sw_laser) || + (trasn_code_byte5->fc_sw_laser_sl) || + (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ + flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); + } else if (trasn_code_byte4->fc_lw_laser) { + wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | + page_a0[SSF_WAVELENGTH_B0]; + if (wavelength == SFP_WAVELENGTH_LC1310) + flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; + if (wavelength == SFP_WAVELENGTH_LL1550) + flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; + } + /* check if its SFP+ */ + flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? + SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) + << SFP_FLAG_CT_SHIFT; + + /* check if its OPTICAL */ + flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? + SFP_FLAG_IS_OPTICAL_PORT : 0) + << SFP_FLAG_IS_OPTICAL_SHIFT; + + temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | + page_a2[SFF_TEMPERATURE_B0]); + vcc = (page_a2[SFF_VCC_B1] << 8 | + page_a2[SFF_VCC_B0]); + tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | + page_a2[SFF_TXPOWER_B0]); + tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | + page_a2[SFF_TX_BIAS_CURRENT_B0]); + rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | + page_a2[SFF_RXPOWER_B0]); + desc->sfp_info.temperature = cpu_to_be16(temperature); + desc->sfp_info.rx_power = cpu_to_be16(rx_power); + desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); + desc->sfp_info.tx_power = cpu_to_be16(tx_power); + desc->sfp_info.vcc = cpu_to_be16(vcc); + + desc->sfp_info.flags = cpu_to_be16(flag); + desc->length = cpu_to_be32(sizeof(desc->sfp_info)); +} + +void +lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, + READ_LNK_VAR *stat) +{ + uint32_t type; + + desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); + + type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; + + desc->info.port_type = cpu_to_be32(type); + + desc->info.link_status.link_failure_cnt = + cpu_to_be32(stat->linkFailureCnt); + desc->info.link_status.loss_of_synch_cnt = + cpu_to_be32(stat->lossSyncCnt); + desc->info.link_status.loss_of_signal_cnt = + cpu_to_be32(stat->lossSignalCnt); + desc->info.link_status.primitive_seq_proto_err = + cpu_to_be32(stat->primSeqErrCnt); + desc->info.link_status.invalid_trans_word = + cpu_to_be32(stat->invalidXmitWord); + desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); + + desc->length = cpu_to_be32(sizeof(desc->info)); +} + +void +lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) +{ + uint16_t rdp_cap = 0; + uint16_t rdp_speed; + + desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); + + switch (phba->sli4_hba.link_state.speed) { + case LPFC_FC_LA_SPEED_1G: + rdp_speed = RDP_PS_1GB; + break; + case LPFC_FC_LA_SPEED_2G: + rdp_speed = RDP_PS_2GB; + break; + case LPFC_FC_LA_SPEED_4G: + rdp_speed = RDP_PS_4GB; + break; + case LPFC_FC_LA_SPEED_8G: + rdp_speed = RDP_PS_8GB; + break; + case LPFC_FC_LA_SPEED_10G: + rdp_speed = RDP_PS_10GB; + break; + case LPFC_FC_LA_SPEED_16G: + rdp_speed = RDP_PS_16GB; + break; + case LPFC_FC_LA_SPEED_32G: + rdp_speed = RDP_PS_32GB; + break; + default: + rdp_speed = RDP_PS_UNKNOWN; + break; + } + + desc->info.port_speed.speed = cpu_to_be16(rdp_speed); + + if (phba->lmt & LMT_16Gb) + rdp_cap |= RDP_PS_16GB; + if (phba->lmt & LMT_10Gb) + rdp_cap |= RDP_PS_10GB; + if (phba->lmt & LMT_8Gb) + rdp_cap |= RDP_PS_8GB; + if (phba->lmt & LMT_4Gb) + rdp_cap |= RDP_PS_4GB; + if (phba->lmt & LMT_2Gb) + rdp_cap |= RDP_PS_2GB; + if (phba->lmt & LMT_1Gb) + rdp_cap |= RDP_PS_1GB; + + if (rdp_cap == 0) + rdp_cap = RDP_CAP_UNKNOWN; + + desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); + desc->length = cpu_to_be32(sizeof(desc->info)); +} + +void +lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, + struct lpfc_hba *phba) +{ + + desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); + + memcpy(desc->port_names.wwnn, phba->wwnn, + sizeof(desc->port_names.wwnn)); + + memcpy(desc->port_names.wwpn, &phba->wwpn, + sizeof(desc->port_names.wwpn)); + + desc->length = cpu_to_be32(sizeof(desc->port_names)); +} + +void +lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, + struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + + desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); + if (vport->fc_flag & FC_FABRIC) { + memcpy(desc->port_names.wwnn, &vport->fabric_nodename, + sizeof(desc->port_names.wwnn)); + + memcpy(desc->port_names.wwpn, &vport->fabric_portname, + sizeof(desc->port_names.wwpn)); + } else { /* Point to Point */ + memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, + sizeof(desc->port_names.wwnn)); + + memcpy(desc->port_names.wwnn, &ndlp->nlp_portname, + sizeof(desc->port_names.wwpn)); + } + + desc->length = cpu_to_be32(sizeof(desc->port_names)); +} + +void +lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, + int status) +{ + struct lpfc_nodelist *ndlp = rdp_context->ndlp; + struct lpfc_vport *vport = ndlp->vport; + struct lpfc_iocbq *elsiocb; + IOCB_t *icmd; + uint8_t *pcmd; + struct ls_rjt *stat; + struct fc_rdp_res_frame *rdp_res; + uint32_t cmdsize; + int rc; + + if (status != SUCCESS) + goto error; + cmdsize = sizeof(struct fc_rdp_res_frame); + + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, + lpfc_max_els_tries, rdp_context->ndlp, + rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); + lpfc_nlp_put(ndlp); + if (!elsiocb) + goto free_rdp_context; + + icmd = &elsiocb->iocb; + icmd->ulpContext = rdp_context->rx_id; + icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2171 Xmit RDP response tag x%x xri x%x, " + "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", + elsiocb->iotag, elsiocb->iocb.ulpContext, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + rdp_res = (struct fc_rdp_res_frame *) + (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + + /* For RDP payload */ + lpfc_rdp_res_link_service(&rdp_res->link_service_desc, ELS_CMD_RDP); + + lpfc_rdp_res_sfp_desc(&rdp_res->sfp_desc, + rdp_context->page_a0, rdp_context->page_a2); + lpfc_rdp_res_speed(&rdp_res->portspeed_desc, phba); + lpfc_rdp_res_link_error(&rdp_res->link_error_desc, + &rdp_context->link_stat); + lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba); + lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc, + vport, ndlp); + rdp_res->length = cpu_to_be32(RDP_DESC_PAYLOAD_SIZE); + + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + + phba->fc_stat.elsXmitACC++; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); + + kfree(rdp_context); + + return; +error: + cmdsize = 2 * sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, + ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); + lpfc_nlp_put(ndlp); + if (!elsiocb) + goto free_rdp_context; + + icmd = &elsiocb->iocb; + icmd->ulpContext = rdp_context->rx_id; + icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + + *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; + stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); + stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + + phba->fc_stat.elsXmitLSRJT++; + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + + if (rc == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); +free_rdp_context: + kfree(rdp_context); +} + +int +lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) +{ + LPFC_MBOXQ_t *mbox = NULL; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, + "7105 failed to allocate mailbox memory"); + return 1; + } + + if (lpfc_sli4_dump_page_a0(phba, mbox)) + goto prep_mbox_fail; + mbox->vport = rdp_context->ndlp->vport; + mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; + mbox->context2 = (struct lpfc_rdp_context *) rdp_context; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto issue_mbox_fail; + + return 0; + +prep_mbox_fail: +issue_mbox_fail: + mempool_free(mbox, phba->mbox_mem_pool); + return 1; +} + +/* + * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes an unsolicited RDP(Read Diagnostic Parameters) + * IOCB. First, the payload of the unsolicited RDP is checked. + * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 + * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, + * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl + * gather all data and send RDP response. + * + * Return code + * 0 - Sent the acc response + * 1 - Sent the reject response. + */ +static int +lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *pcmd; + uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; + struct fc_rdp_req_frame *rdp_req; + struct lpfc_rdp_context *rdp_context; + IOCB_t *cmd = NULL; + struct ls_rjt stat; + + if (phba->sli_rev < LPFC_SLI_REV4 || + (bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_2)) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_expl = LSEXP_REQ_UNSUPPORTED; + goto error; + } + + if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { + rjt_err = LSRJT_UNABLE_TPC; + rjt_expl = LSEXP_REQ_UNSUPPORTED; + goto error; + } + + pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; + rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; + + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2422 ELS RDP Request " + "dec len %d tag x%x port_id %d len %d\n", + be32_to_cpu(rdp_req->rdp_des_length), + be32_to_cpu(rdp_req->nport_id_desc.tag), + be32_to_cpu(rdp_req->nport_id_desc.nport_id), + be32_to_cpu(rdp_req->nport_id_desc.length)); + + if (sizeof(struct fc_rdp_nport_desc) != + be32_to_cpu(rdp_req->rdp_des_length)) + goto rjt_logerr; + if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) + goto rjt_logerr; + if (RDP_NPORT_ID_SIZE != + be32_to_cpu(rdp_req->nport_id_desc.length)) + goto rjt_logerr; + rdp_context = kmalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); + if (!rdp_context) { + rjt_err = LSRJT_UNABLE_TPC; + goto error; + } + + memset(rdp_context, 0, sizeof(struct lpfc_rdp_context)); + cmd = &cmdiocb->iocb; + rdp_context->ndlp = lpfc_nlp_get(ndlp); + rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; + rdp_context->rx_id = cmd->ulpContext; + rdp_context->cmpl = lpfc_els_rdp_cmpl; + if (lpfc_get_rdp_info(phba, rdp_context)) { + lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, + "2423 Unable to send mailbox"); + kfree(rdp_context); + rjt_err = LSRJT_UNABLE_TPC; + lpfc_nlp_put(ndlp); + goto error; + } + + return 0; + +rjt_logerr: + rjt_err = LSRJT_LOGICAL_ERR; + +error: + memset(&stat, 0, sizeof(stat)); + stat.un.b.lsRjtRsnCode = rjt_err; + stat.un.b.lsRjtRsnCodeExp = rjt_expl; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 1; +} + + +static void +lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb; + IOCB_t *icmd; + uint8_t *pcmd; + struct lpfc_iocbq *elsiocb; + struct lpfc_nodelist *ndlp; + struct ls_rjt *stat; + union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_lcb_context *lcb_context; + struct fc_lcb_res_frame *lcb_res; + uint32_t cmdsize, shdr_status, shdr_add_status; + int rc; + + mb = &pmb->u.mb; + lcb_context = (struct lpfc_lcb_context *)pmb->context1; + ndlp = lcb_context->ndlp; + pmb->context1 = NULL; + pmb->context2 = NULL; + + shdr = (union lpfc_sli4_cfg_shdr *) + &pmb->u.mqe.un.beacon_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, + "0194 SET_BEACON_CONFIG mailbox " + "completed with status x%x add_status x%x," + " mbx status x%x\n", + shdr_status, shdr_add_status, mb->mbxStatus); + + if (mb->mbxStatus && !(shdr_status && + shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) { + mempool_free(pmb, phba->mbox_mem_pool); + goto error; + } + + mempool_free(pmb, phba->mbox_mem_pool); + cmdsize = sizeof(struct fc_lcb_res_frame); + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + + /* Decrement the ndlp reference count from previous mbox command */ + lpfc_nlp_put(ndlp); + + if (!elsiocb) + goto free_lcb_context; + + lcb_res = (struct fc_lcb_res_frame *) + (((struct lpfc_dmabuf *)elsiocb->context2)->virt); + + icmd = &elsiocb->iocb; + icmd->ulpContext = lcb_context->rx_id; + icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; + + pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + *((uint32_t *)(pcmd)) = ELS_CMD_ACC; + lcb_res->lcb_sub_command = lcb_context->sub_command; + lcb_res->lcb_type = lcb_context->type; + lcb_res->lcb_frequency = lcb_context->frequency; + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + phba->fc_stat.elsXmitACC++; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); + + kfree(lcb_context); + return; + +error: + cmdsize = sizeof(struct fc_lcb_res_frame); + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_LS_RJT); + lpfc_nlp_put(ndlp); + if (!elsiocb) + goto free_lcb_context; + + icmd = &elsiocb->iocb; + icmd->ulpContext = lcb_context->rx_id; + icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; + pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + + *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; + stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); + stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + phba->fc_stat.elsXmitLSRJT++; + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); +free_lcb_context: + kfree(lcb_context); +} + +static int +lpfc_sli4_set_beacon(struct lpfc_vport *vport, + struct lpfc_lcb_context *lcb_context, + uint32_t beacon_state) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox = NULL; + uint32_t len; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return 1; + + len = sizeof(struct lpfc_mbx_set_beacon_config) - + sizeof(struct lpfc_sli4_cfg_mhdr); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, + LPFC_SLI4_MBX_EMBED); + mbox->context1 = (void *)lcb_context; + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_els_lcb_rsp; + bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, + phba->sli4_hba.physical_port); + bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, + beacon_state); + bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1); + bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0); + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + return 1; + } + + return 0; +} + + +/** + * lpfc_els_rcv_lcb - Process an unsolicited LCB + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. + * First, the payload of the unsolicited LCB is checked. + * Then based on Subcommand beacon will either turn on or off. + * + * Return code + * 0 - Sent the acc response + * 1 - Sent the reject response. + **/ +static int +lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *pcmd; + IOCB_t *icmd; + uint8_t *lp; + struct fc_lcb_request_frame *beacon; + struct lpfc_lcb_context *lcb_context; + uint8_t state, rjt_err; + struct ls_rjt stat; + + icmd = &cmdiocb->iocb; + pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; + lp = (uint8_t *)pcmd->virt; + beacon = (struct fc_lcb_request_frame *)pcmd->virt; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " + "type x%x frequency %x duration x%x\n", + lp[0], lp[1], lp[2], + beacon->lcb_command, + beacon->lcb_sub_command, + beacon->lcb_type, + beacon->lcb_frequency, + be16_to_cpu(beacon->lcb_duration)); + + if (phba->sli_rev < LPFC_SLI_REV4 || + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != + LPFC_SLI_INTF_IF_TYPE_2)) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + goto rjt; + } + lcb_context = kmalloc(sizeof(struct lpfc_lcb_context), GFP_KERNEL); + + if (phba->hba_flag & HBA_FCOE_MODE) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + goto rjt; + } + if (beacon->lcb_frequency == 0) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + goto rjt; + } + if ((beacon->lcb_type != LPFC_LCB_GREEN) && + (beacon->lcb_type != LPFC_LCB_AMBER)) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + goto rjt; + } + if ((beacon->lcb_sub_command != LPFC_LCB_ON) && + (beacon->lcb_sub_command != LPFC_LCB_OFF)) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + goto rjt; + } + if ((beacon->lcb_sub_command == LPFC_LCB_ON) && + (beacon->lcb_type != LPFC_LCB_GREEN) && + (beacon->lcb_type != LPFC_LCB_AMBER)) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + goto rjt; + } + if (be16_to_cpu(beacon->lcb_duration) != 0) { + rjt_err = LSRJT_CMD_UNSUPPORTED; + goto rjt; + } + + state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; + lcb_context->sub_command = beacon->lcb_sub_command; + lcb_context->type = beacon->lcb_type; + lcb_context->frequency = beacon->lcb_frequency; + lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; + lcb_context->rx_id = cmdiocb->iocb.ulpContext; + lcb_context->ndlp = lpfc_nlp_get(ndlp); + if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { + lpfc_printf_vlog(ndlp->vport, KERN_ERR, + LOG_ELS, "0193 failed to send mail box"); + lpfc_nlp_put(ndlp); + rjt_err = LSRJT_UNABLE_TPC; + goto rjt; + } + return 0; +rjt: + memset(&stat, 0, sizeof(stat)); + stat.un.b.lsRjtRsnCode = rjt_err; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 1; +} + + /** * lpfc_els_flush_rscn - Clean up any rscn activities with a vport * @vport: pointer to a host virtual N_Port data structure. @@ -6706,8 +7354,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * Do not process any unsolicited ELS commands * if the ndlp is in DEV_LOSS */ - if (ndlp->nlp_add_flag & NLP_IN_DEV_LOSS) + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { + spin_unlock_irq(shost->host_lock); goto dropit; + } + spin_unlock_irq(shost->host_lock); elsiocb->context1 = lpfc_nlp_get(ndlp); elsiocb->vport = vport; @@ -6751,7 +7404,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; break; } - shost = lpfc_shost_from_vport(vport); if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -6821,6 +7473,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); break; + case ELS_CMD_LCB: + phba->fc_stat.elsRcvLCB++; + lpfc_els_rcv_lcb(vport, elsiocb, ndlp); + break; + case ELS_CMD_RDP: + phba->fc_stat.elsRcvRDP++; + lpfc_els_rcv_rdp(vport, elsiocb, ndlp); + break; case ELS_CMD_RSCN: phba->fc_stat.elsRcvRSCN++; lpfc_els_rcv_rscn(vport, elsiocb, ndlp); @@ -7586,7 +8246,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_do_scr_ns_plogi(phba, vport); goto out; fdisc_failed: - lpfc_vport_set_state(vport, FC_VPORT_FAILED); + if (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS) + lpfc_vport_set_state(vport, FC_VPORT_FAILED); /* Cancel discovery timer */ lpfc_can_disctmo(vport); lpfc_nlp_put(ndlp); @@ -7739,8 +8400,10 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus == IOSTAT_SUCCESS) { spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_NDISC_ACTIVE; vport->fc_flag &= ~FC_FABRIC; spin_unlock_irq(shost->host_lock); + lpfc_can_disctmo(vport); } } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2500f15d437f..ce96d5bf8ae7 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -106,6 +106,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_rport_data *rdata; struct lpfc_nodelist * ndlp; struct lpfc_vport *vport; + struct Scsi_Host *shost; struct lpfc_hba *phba; struct lpfc_work_evt *evtp; int put_node; @@ -146,48 +147,32 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) return; - if (ndlp->nlp_type & NLP_FABRIC) { - - /* If the WWPN of the rport and ndlp don't match, ignore it */ - if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, - "6789 rport name %lx != node port name %lx", - (unsigned long)rport->port_name, - (unsigned long)wwn_to_u64( - ndlp->nlp_portname.u.wwn)); - put_node = rdata->pnode != NULL; - put_rport = ndlp->rport != NULL; - rdata->pnode = NULL; - ndlp->rport = NULL; - if (put_node) - lpfc_nlp_put(ndlp); - put_device(&rport->dev); - return; - } - - put_node = rdata->pnode != NULL; - put_rport = ndlp->rport != NULL; - rdata->pnode = NULL; - ndlp->rport = NULL; - if (put_node) - lpfc_nlp_put(ndlp); - if (put_rport) - put_device(&rport->dev); - return; - } + if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "6789 rport name %llx != node port name %llx", + rport->port_name, + wwn_to_u64(ndlp->nlp_portname.u.wwn)); evtp = &ndlp->dev_loss_evt; - if (!list_empty(&evtp->evt_listp)) + if (!list_empty(&evtp->evt_listp)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "6790 rport name %llx dev_loss_evt pending", + rport->port_name); return; + } - evtp->evt_arg1 = lpfc_nlp_get(ndlp); - ndlp->nlp_add_flag |= NLP_IN_DEV_LOSS; + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_IN_DEV_LOSS; + spin_unlock_irq(shost->host_lock); - spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference * count until this queued work is done */ + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + + spin_lock_irq(&phba->hbalock); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); @@ -215,22 +200,24 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) struct fc_rport *rport; struct lpfc_vport *vport; struct lpfc_hba *phba; + struct Scsi_Host *shost; uint8_t *name; int put_node; - int put_rport; int warn_on = 0; int fcf_inuse = 0; rport = ndlp->rport; + vport = ndlp->vport; + shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irq(shost->host_lock); - if (!rport) { - ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; + if (!rport) return fcf_inuse; - } - rdata = rport->dd_data; name = (uint8_t *) &ndlp->nlp_portname; - vport = ndlp->vport; phba = vport->phba; if (phba->sli_rev == LPFC_SLI_REV4) @@ -244,6 +231,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); + /* + * lpfc_nlp_remove if reached with dangling rport drops the + * reference. To make sure that does not happen clear rport + * pointer in ndlp before lpfc_nlp_put. + */ + rdata = rport->dd_data; + /* Don't defer this if we are in the process of deleting the vport * or unloading the driver. The unload will cleanup the node * appropriately we just need to cleanup the ndlp rport info here. @@ -256,14 +250,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_sid, 0, LPFC_CTX_TGT); } put_node = rdata->pnode != NULL; - put_rport = ndlp->rport != NULL; rdata->pnode = NULL; ndlp->rport = NULL; - ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; if (put_node) lpfc_nlp_put(ndlp); - if (put_rport) - put_device(&rport->dev); + put_device(&rport->dev); + return fcf_inuse; } @@ -275,28 +267,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); - ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; return fcf_inuse; } - if (ndlp->nlp_type & NLP_FABRIC) { - /* We will clean up these Nodes in linkup */ - put_node = rdata->pnode != NULL; - put_rport = ndlp->rport != NULL; - rdata->pnode = NULL; - ndlp->rport = NULL; - ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; - if (put_node) - lpfc_nlp_put(ndlp); - if (put_rport) - put_device(&rport->dev); + put_node = rdata->pnode != NULL; + rdata->pnode = NULL; + ndlp->rport = NULL; + if (put_node) + lpfc_nlp_put(ndlp); + put_device(&rport->dev); + + if (ndlp->nlp_type & NLP_FABRIC) return fcf_inuse; - } if (ndlp->nlp_sid != NLP_NO_SID) { warn_on = 1; - /* flush the target */ - ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], ndlp->nlp_sid, 0, LPFC_CTX_TGT); } @@ -321,16 +306,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_state, ndlp->nlp_rpi); } - put_node = rdata->pnode != NULL; - put_rport = ndlp->rport != NULL; - rdata->pnode = NULL; - ndlp->rport = NULL; - ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS; - if (put_node) - lpfc_nlp_put(ndlp); - if (put_rport) - put_device(&rport->dev); - if (!(vport->load_flag & FC_UNLOADING) && !(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && @@ -1802,7 +1777,7 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, dma_addr_t phys_addr; struct lpfc_mbx_sge sge; struct lpfc_mbx_read_fcf_tbl *read_fcf; - uint32_t shdr_status, shdr_add_status; + uint32_t shdr_status, shdr_add_status, if_type; union lpfc_sli4_cfg_shdr *shdr; struct fcf_record *new_fcf_record; @@ -1823,9 +1798,11 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, lpfc_sli_pcimem_bcopy(shdr, shdr, sizeof(union lpfc_sli4_cfg_shdr)); shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status) { - if (shdr_status == STATUS_FCF_TABLE_EMPTY) + if (shdr_status == STATUS_FCF_TABLE_EMPTY || + if_type == LPFC_SLI_INTF_IF_TYPE_2) lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2726 READ_FCF_RECORD Indicates empty " "FCF table.\n"); @@ -3868,11 +3845,11 @@ out: if (vport->port_state < LPFC_VPORT_READY) { /* Link up discovery requires Fabric registration. */ - lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */ lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); + lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Issue SCR just before NameServer GID_FT Query */ lpfc_issue_els_scr(vport, SCR_DID, 0); @@ -3918,9 +3895,17 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * registered port, drop the reference that we took the last time we * registered the port. */ - if (ndlp->rport && ndlp->rport->dd_data && - ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) - lpfc_nlp_put(ndlp); + rport = ndlp->rport; + if (rport) { + rdata = rport->dd_data; + /* break the link before dropping the ref */ + ndlp->rport = NULL; + if (rdata && rdata->pnode == ndlp) + lpfc_nlp_put(ndlp); + rdata->pnode = NULL; + /* drop reference for earlier registeration */ + put_device(&rport->dev); + } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport add: did:x%x flg:x%x type x%x", @@ -4296,9 +4281,9 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (vport->phba->sli_rev == LPFC_SLI_REV4) { lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_unreg_rpi(vport, ndlp); - } else { - lpfc_nlp_put(ndlp); } + + lpfc_nlp_put(ndlp); return; } @@ -4510,7 +4495,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; - int rc; + int rc, acc_plogi = 1; uint16_t rpi; if (ndlp->nlp_flag & NLP_RPI_REGISTERED || @@ -4543,14 +4528,20 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) mbox->context1 = lpfc_nlp_get(ndlp); mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; + /* + * accept PLOGIs after unreg_rpi_cmpl + */ + acc_plogi = 0; } else mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) + if (rc == MBX_NOT_FINISHED) { mempool_free(mbox, phba->mbox_mem_pool); + acc_plogi = 1; + } } lpfc_no_rpi(phba, ndlp); @@ -4558,8 +4549,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ndlp->nlp_rpi = 0; ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; ndlp->nlp_flag &= ~NLP_NPR_ADISC; + if (acc_plogi) + ndlp->nlp_flag &= ~NLP_LOGO_ACC; return 1; } + ndlp->nlp_flag &= ~NLP_LOGO_ACC; return 0; } @@ -4761,6 +4755,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata; + struct fc_rport *rport; LPFC_MBOXQ_t *mbox; int rc; @@ -4798,14 +4793,24 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_cleanup_node(vport, ndlp); /* - * We can get here with a non-NULL ndlp->rport because when we - * unregister a rport we don't break the rport/node linkage. So if we - * do, make sure we don't leaving any dangling pointers behind. + * ndlp->rport must be set to NULL before it reaches here + * i.e. break rport/node link before doing lpfc_nlp_put for + * registered rport and then drop the reference of rport. */ if (ndlp->rport) { - rdata = ndlp->rport->dd_data; + /* + * extra lpfc_nlp_put dropped the reference of ndlp + * for registered rport so need to cleanup rport + */ + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + "0940 removed node x%p DID x%x " + " rport not null %p\n", + ndlp, ndlp->nlp_DID, ndlp->rport); + rport = ndlp->rport; + rdata = rport->dd_data; rdata->pnode = NULL; ndlp->rport = NULL; + put_device(&rport->dev); } } @@ -4833,9 +4838,19 @@ lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (matchdid.un.b.id == ndlpdid.un.b.id) { if ((mydid.un.b.domain == matchdid.un.b.domain) && (mydid.un.b.area == matchdid.un.b.area)) { + /* This code is supposed to match the ID + * for a private loop device that is + * connect to fl_port. But we need to + * check that the port did not just go + * from pt2pt to fabric or we could end + * up matching ndlp->nlp_DID 000001 to + * fabric DID 0x20101 + */ if ((ndlpdid.un.b.domain == 0) && (ndlpdid.un.b.area == 0)) { - if (ndlpdid.un.b.id) + if (ndlpdid.un.b.id && + vport->phba->fc_topology == + LPFC_TOPOLOGY_LOOP) return 1; } return 0; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 37beb9dc1311..892c5257d87c 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -543,6 +543,7 @@ struct fc_vft_header { #define ELS_CMD_TEST 0x11000000 #define ELS_CMD_RRQ 0x12000000 #define ELS_CMD_REC 0x13000000 +#define ELS_CMD_RDP 0x18000000 #define ELS_CMD_PRLI 0x20100014 #define ELS_CMD_PRLO 0x21100014 #define ELS_CMD_PRLO_ACC 0x02100014 @@ -558,6 +559,7 @@ struct fc_vft_header { #define ELS_CMD_SCR 0x62000000 #define ELS_CMD_RNID 0x78000000 #define ELS_CMD_LIRR 0x7A000000 +#define ELS_CMD_LCB 0x81000000 #else /* __LITTLE_ENDIAN_BITFIELD */ #define ELS_CMD_MASK 0xffff #define ELS_RSP_MASK 0xff @@ -580,6 +582,7 @@ struct fc_vft_header { #define ELS_CMD_TEST 0x11 #define ELS_CMD_RRQ 0x12 #define ELS_CMD_REC 0x13 +#define ELS_CMD_RDP 0x18 #define ELS_CMD_PRLI 0x14001020 #define ELS_CMD_PRLO 0x14001021 #define ELS_CMD_PRLO_ACC 0x14001002 @@ -595,6 +598,7 @@ struct fc_vft_header { #define ELS_CMD_SCR 0x62 #define ELS_CMD_RNID 0x78 #define ELS_CMD_LIRR 0x7A +#define ELS_CMD_LCB 0x81 #endif /* @@ -1010,6 +1014,198 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */ } un; } ELS_PKT; +/* + * Link Cable Beacon (LCB) ELS Frame + */ + +struct fc_lcb_request_frame { + uint32_t lcb_command; /* ELS command opcode (0x81) */ + uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ +#define LPFC_LCB_ON 0x1 +#define LPFC_LCB_OFF 0x2 + uint8_t reserved[3]; + + uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ +#define LPFC_LCB_GREEN 0x1 +#define LPFC_LCB_AMBER 0x2 + uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ + uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ +}; + +/* + * Link Cable Beacon (LCB) ELS Response Frame + */ +struct fc_lcb_res_frame { + uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */ + uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ + uint8_t reserved[3]; + uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ + uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ + uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ +}; + +/* + * Read Diagnostic Parameters (RDP) ELS frame. + */ +#define SFF_PG0_IDENT_SFP 0x3 + +#define SFP_FLAG_PT_OPTICAL 0x0 +#define SFP_FLAG_PT_SWLASER 0x01 +#define SFP_FLAG_PT_LWLASER_LC1310 0x02 +#define SFP_FLAG_PT_LWLASER_LL1550 0x03 +#define SFP_FLAG_PT_MASK 0x0F +#define SFP_FLAG_PT_SHIFT 0 + +#define SFP_FLAG_IS_OPTICAL_PORT 0x01 +#define SFP_FLAG_IS_OPTICAL_MASK 0x010 +#define SFP_FLAG_IS_OPTICAL_SHIFT 4 + +#define SFP_FLAG_IS_DESC_VALID 0x01 +#define SFP_FLAG_IS_DESC_VALID_MASK 0x020 +#define SFP_FLAG_IS_DESC_VALID_SHIFT 5 + +#define SFP_FLAG_CT_UNKNOWN 0x0 +#define SFP_FLAG_CT_SFP_PLUS 0x01 +#define SFP_FLAG_CT_MASK 0x3C +#define SFP_FLAG_CT_SHIFT 6 + +struct fc_rdp_port_name_info { + uint8_t wwnn[8]; + uint8_t wwpn[8]; +}; + + +/* + * Link Error Status Block Structure (FC-FS-3) for RDP + * This similar to RPS ELS + */ +struct fc_link_status { + uint32_t link_failure_cnt; + uint32_t loss_of_synch_cnt; + uint32_t loss_of_signal_cnt; + uint32_t primitive_seq_proto_err; + uint32_t invalid_trans_word; + uint32_t invalid_crc_cnt; + +}; + +#define RDP_PORT_NAMES_DESC_TAG 0x00010003 +struct fc_rdp_port_name_desc { + uint32_t tag; /* 0001 0003h */ + uint32_t length; /* set to size of payload struct */ + struct fc_rdp_port_name_info port_names; +}; + + +struct fc_rdp_link_error_status_payload_info { + struct fc_link_status link_status; /* 24 bytes */ + uint32_t port_type; /* bits 31-30 only */ +}; + +#define RDP_LINK_ERROR_STATUS_DESC_TAG 0x00010002 +struct fc_rdp_link_error_status_desc { + uint32_t tag; /* 0001 0002h */ + uint32_t length; /* set to size of payload struct */ + struct fc_rdp_link_error_status_payload_info info; +}; + +#define VN_PT_PHY_UNKNOWN 0x00 +#define VN_PT_PHY_PF_PORT 0x01 +#define VN_PT_PHY_ETH_MAC 0x10 +#define VN_PT_PHY_SHIFT 30 + +#define RDP_PS_1GB 0x8000 +#define RDP_PS_2GB 0x4000 +#define RDP_PS_4GB 0x2000 +#define RDP_PS_10GB 0x1000 +#define RDP_PS_8GB 0x0800 +#define RDP_PS_16GB 0x0400 +#define RDP_PS_32GB 0x0200 + +#define RDP_CAP_UNKNOWN 0x0001 +#define RDP_PS_UNKNOWN 0x0002 +#define RDP_PS_NOT_ESTABLISHED 0x0001 + +struct fc_rdp_port_speed { + uint16_t capabilities; + uint16_t speed; +}; + +struct fc_rdp_port_speed_info { + struct fc_rdp_port_speed port_speed; +}; + +#define RDP_PORT_SPEED_DESC_TAG 0x00010001 +struct fc_rdp_port_speed_desc { + uint32_t tag; /* 00010001h */ + uint32_t length; /* set to size of payload struct */ + struct fc_rdp_port_speed_info info; +}; + +#define RDP_NPORT_ID_SIZE 4 +#define RDP_N_PORT_DESC_TAG 0x00000003 +struct fc_rdp_nport_desc { + uint32_t tag; /* 0000 0003h, big endian */ + uint32_t length; /* size of RDP_N_PORT_ID struct */ + uint32_t nport_id : 12; + uint32_t reserved : 8; +}; + + +struct fc_rdp_link_service_info { + uint32_t els_req; /* Request payload word 0 value.*/ +}; + +#define RDP_LINK_SERVICE_DESC_TAG 0x00000001 +struct fc_rdp_link_service_desc { + uint32_t tag; /* Descriptor tag 1 */ + uint32_t length; /* set to size of payload struct. */ + struct fc_rdp_link_service_info payload; + /* must be ELS req Word 0(0x18) */ +}; + +struct fc_rdp_sfp_info { + uint16_t temperature; + uint16_t vcc; + uint16_t tx_bias; + uint16_t tx_power; + uint16_t rx_power; + uint16_t flags; +}; + +#define RDP_SFP_DESC_TAG 0x00010000 +struct fc_rdp_sfp_desc { + uint32_t tag; + uint32_t length; /* set to size of sfp_info struct */ + struct fc_rdp_sfp_info sfp_info; +}; + +struct fc_rdp_req_frame { + uint32_t rdp_command; /* ELS command opcode (0x18)*/ + uint32_t rdp_des_length; /* RDP Payload Word 1 */ + struct fc_rdp_nport_desc nport_id_desc; /* RDP Payload Word 2 - 4 */ +}; + + +struct fc_rdp_res_frame { + uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */ + uint32_t length; /* FC Word 1 */ + struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */ + struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */ + struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10-12 */ + struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */ + struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */ + struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */ +}; + + +#define RDP_DESC_PAYLOAD_SIZE (sizeof(struct fc_rdp_link_service_desc) \ + + sizeof(struct fc_rdp_sfp_desc) \ + + sizeof(struct fc_rdp_port_speed_desc) \ + + sizeof(struct fc_rdp_link_error_status_desc) \ + + (sizeof(struct fc_rdp_port_name_desc) * 2)) + + /******** FDMI ********/ /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ @@ -1587,6 +1783,11 @@ typedef struct { /* FireFly BIU registers */ #define TEMPERATURE_OFFSET 0xB0 /* Slim offset for critical temperature event */ /* + * return code Fail + */ +#define FAILURE 1 + +/* * Begin Structure Definitions for Mailbox Commands */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 1813c45946f4..33ec4fa39ccb 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -291,7 +291,7 @@ struct sli4_bls_rsp { struct lpfc_eqe { uint32_t word0; #define lpfc_eqe_resource_id_SHIFT 16 -#define lpfc_eqe_resource_id_MASK 0x000000FF +#define lpfc_eqe_resource_id_MASK 0x0000FFFF #define lpfc_eqe_resource_id_WORD word0 #define lpfc_eqe_minor_code_SHIFT 4 #define lpfc_eqe_minor_code_MASK 0x00000FFF @@ -914,6 +914,8 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D #define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E #define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43 +#define LPFC_MBOX_OPCODE_SET_BEACON_CONFIG 0x45 +#define LPFC_MBOX_OPCODE_GET_BEACON_CONFIG 0x46 #define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A #define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B @@ -1479,6 +1481,26 @@ struct lpfc_mbx_query_fw_config { } rsp; }; +struct lpfc_mbx_set_beacon_config { + struct mbox_header header; + uint32_t word4; +#define lpfc_mbx_set_beacon_port_num_SHIFT 0 +#define lpfc_mbx_set_beacon_port_num_MASK 0x0000003F +#define lpfc_mbx_set_beacon_port_num_WORD word4 +#define lpfc_mbx_set_beacon_port_type_SHIFT 6 +#define lpfc_mbx_set_beacon_port_type_MASK 0x00000003 +#define lpfc_mbx_set_beacon_port_type_WORD word4 +#define lpfc_mbx_set_beacon_state_SHIFT 8 +#define lpfc_mbx_set_beacon_state_MASK 0x000000FF +#define lpfc_mbx_set_beacon_state_WORD word4 +#define lpfc_mbx_set_beacon_duration_SHIFT 16 +#define lpfc_mbx_set_beacon_duration_MASK 0x000000FF +#define lpfc_mbx_set_beacon_duration_WORD word4 +#define lpfc_mbx_set_beacon_status_duration_SHIFT 24 +#define lpfc_mbx_set_beacon_status_duration_MASK 0x000000FF +#define lpfc_mbx_set_beacon_status_duration_WORD word4 +}; + struct lpfc_id_range { uint32_t word5; #define lpfc_mbx_rsrc_id_word4_0_SHIFT 0 @@ -1921,6 +1943,12 @@ struct lpfc_mbx_redisc_fcf_tbl { #define STATUS_FCF_IN_USE 0x3a #define STATUS_FCF_TABLE_EMPTY 0x43 +/* + * Additional status field for embedded SLI_CONFIG mailbox + * command. + */ +#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67 + struct lpfc_mbx_sli4_config { struct mbox_header header; }; @@ -2433,6 +2461,205 @@ struct lpfc_mbx_supp_pages { #define LPFC_SLI4_PARAMETERS 2 }; +struct lpfc_mbx_memory_dump_type3 { + uint32_t word1; +#define lpfc_mbx_memory_dump_type3_type_SHIFT 0 +#define lpfc_mbx_memory_dump_type3_type_MASK 0x0000000f +#define lpfc_mbx_memory_dump_type3_type_WORD word1 +#define lpfc_mbx_memory_dump_type3_link_SHIFT 24 +#define lpfc_mbx_memory_dump_type3_link_MASK 0x000000ff +#define lpfc_mbx_memory_dump_type3_link_WORD word1 + uint32_t word2; +#define lpfc_mbx_memory_dump_type3_page_no_SHIFT 0 +#define lpfc_mbx_memory_dump_type3_page_no_MASK 0x0000ffff +#define lpfc_mbx_memory_dump_type3_page_no_WORD word2 +#define lpfc_mbx_memory_dump_type3_offset_SHIFT 16 +#define lpfc_mbx_memory_dump_type3_offset_MASK 0x0000ffff +#define lpfc_mbx_memory_dump_type3_offset_WORD word2 + uint32_t word3; +#define lpfc_mbx_memory_dump_type3_length_SHIFT 0 +#define lpfc_mbx_memory_dump_type3_length_MASK 0x00ffffff +#define lpfc_mbx_memory_dump_type3_length_WORD word3 + uint32_t addr_lo; + uint32_t addr_hi; + uint32_t return_len; +}; + +#define DMP_PAGE_A0 0xa0 +#define DMP_PAGE_A2 0xa2 +#define DMP_SFF_PAGE_A0_SIZE 256 +#define DMP_SFF_PAGE_A2_SIZE 256 + +#define SFP_WAVELENGTH_LC1310 1310 +#define SFP_WAVELENGTH_LL1550 1550 + + +/* + * * SFF-8472 TABLE 3.4 + * */ +#define SFF_PG0_CONNECTOR_UNKNOWN 0x00 /* Unknown */ +#define SFF_PG0_CONNECTOR_SC 0x01 /* SC */ +#define SFF_PG0_CONNECTOR_FC_COPPER1 0x02 /* FC style 1 copper connector */ +#define SFF_PG0_CONNECTOR_FC_COPPER2 0x03 /* FC style 2 copper connector */ +#define SFF_PG0_CONNECTOR_BNC 0x04 /* BNC / TNC */ +#define SFF_PG0_CONNECTOR__FC_COAX 0x05 /* FC coaxial headers */ +#define SFF_PG0_CONNECTOR_FIBERJACK 0x06 /* FiberJack */ +#define SFF_PG0_CONNECTOR_LC 0x07 /* LC */ +#define SFF_PG0_CONNECTOR_MT 0x08 /* MT - RJ */ +#define SFF_PG0_CONNECTOR_MU 0x09 /* MU */ +#define SFF_PG0_CONNECTOR_SF 0x0A /* SG */ +#define SFF_PG0_CONNECTOR_OPTICAL_PIGTAIL 0x0B /* Optical pigtail */ +#define SFF_PG0_CONNECTOR_OPTICAL_PARALLEL 0x0C /* MPO Parallel Optic */ +#define SFF_PG0_CONNECTOR_HSSDC_II 0x20 /* HSSDC II */ +#define SFF_PG0_CONNECTOR_COPPER_PIGTAIL 0x21 /* Copper pigtail */ +#define SFF_PG0_CONNECTOR_RJ45 0x22 /* RJ45 */ + +/* SFF-8472 Table 3.1 Diagnostics: Data Fields Address/Page A0 */ + +#define SSF_IDENTIFIER 0 +#define SSF_EXT_IDENTIFIER 1 +#define SSF_CONNECTOR 2 +#define SSF_TRANSCEIVER_CODE_B0 3 +#define SSF_TRANSCEIVER_CODE_B1 4 +#define SSF_TRANSCEIVER_CODE_B2 5 +#define SSF_TRANSCEIVER_CODE_B3 6 +#define SSF_TRANSCEIVER_CODE_B4 7 +#define SSF_TRANSCEIVER_CODE_B5 8 +#define SSF_TRANSCEIVER_CODE_B6 9 +#define SSF_TRANSCEIVER_CODE_B7 10 +#define SSF_ENCODING 11 +#define SSF_BR_NOMINAL 12 +#define SSF_RATE_IDENTIFIER 13 +#define SSF_LENGTH_9UM_KM 14 +#define SSF_LENGTH_9UM 15 +#define SSF_LENGTH_50UM_OM2 16 +#define SSF_LENGTH_62UM_OM1 17 +#define SFF_LENGTH_COPPER 18 +#define SSF_LENGTH_50UM_OM3 19 +#define SSF_VENDOR_NAME 20 +#define SSF_VENDOR_OUI 36 +#define SSF_VENDOR_PN 40 +#define SSF_VENDOR_REV 56 +#define SSF_WAVELENGTH_B1 60 +#define SSF_WAVELENGTH_B0 61 +#define SSF_CC_BASE 63 +#define SSF_OPTIONS_B1 64 +#define SSF_OPTIONS_B0 65 +#define SSF_BR_MAX 66 +#define SSF_BR_MIN 67 +#define SSF_VENDOR_SN 68 +#define SSF_DATE_CODE 84 +#define SSF_MONITORING_TYPEDIAGNOSTIC 92 +#define SSF_ENHANCED_OPTIONS 93 +#define SFF_8472_COMPLIANCE 94 +#define SSF_CC_EXT 95 +#define SSF_A0_VENDOR_SPECIFIC 96 + +/* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */ + +#define SSF_AW_THRESHOLDS 0 +#define SSF_EXT_CAL_CONSTANTS 56 +#define SSF_CC_DMI 95 +#define SFF_TEMPERATURE_B1 96 +#define SFF_TEMPERATURE_B0 97 +#define SFF_VCC_B1 98 +#define SFF_VCC_B0 99 +#define SFF_TX_BIAS_CURRENT_B1 100 +#define SFF_TX_BIAS_CURRENT_B0 101 +#define SFF_TXPOWER_B1 102 +#define SFF_TXPOWER_B0 103 +#define SFF_RXPOWER_B1 104 +#define SFF_RXPOWER_B0 105 +#define SSF_STATUS_CONTROL 110 +#define SSF_ALARM_FLAGS_B1 112 +#define SSF_ALARM_FLAGS_B0 113 +#define SSF_WARNING_FLAGS_B1 116 +#define SSF_WARNING_FLAGS_B0 117 +#define SSF_EXT_TATUS_CONTROL_B1 118 +#define SSF_EXT_TATUS_CONTROL_B0 119 +#define SSF_A2_VENDOR_SPECIFIC 120 +#define SSF_USER_EEPROM 128 +#define SSF_VENDOR_CONTROL 148 + + +/* + * Tranceiver codes Fibre Channel SFF-8472 + * Table 3.5. + */ + +struct sff_trasnceiver_codes_byte0 { + uint8_t inifiband:4; + uint8_t teng_ethernet:4; +}; + +struct sff_trasnceiver_codes_byte1 { + uint8_t sonet:6; + uint8_t escon:2; +}; + +struct sff_trasnceiver_codes_byte2 { + uint8_t soNet:8; +}; + +struct sff_trasnceiver_codes_byte3 { + uint8_t ethernet:8; +}; + +struct sff_trasnceiver_codes_byte4 { + uint8_t fc_el_lo:1; + uint8_t fc_lw_laser:1; + uint8_t fc_sw_laser:1; + uint8_t fc_md_distance:1; + uint8_t fc_lg_distance:1; + uint8_t fc_int_distance:1; + uint8_t fc_short_distance:1; + uint8_t fc_vld_distance:1; +}; + +struct sff_trasnceiver_codes_byte5 { + uint8_t reserved1:1; + uint8_t reserved2:1; + uint8_t fc_sfp_active:1; /* Active cable */ + uint8_t fc_sfp_passive:1; /* Passive cable */ + uint8_t fc_lw_laser:1; /* Longwave laser */ + uint8_t fc_sw_laser_sl:1; + uint8_t fc_sw_laser_sn:1; + uint8_t fc_el_hi:1; /* Electrical enclosure high bit */ +}; + +struct sff_trasnceiver_codes_byte6 { + uint8_t fc_tm_sm:1; /* Single Mode */ + uint8_t reserved:1; + uint8_t fc_tm_m6:1; /* Multimode, 62.5um (M6) */ + uint8_t fc_tm_tv:1; /* Video Coax (TV) */ + uint8_t fc_tm_mi:1; /* Miniature Coax (MI) */ + uint8_t fc_tm_tp:1; /* Twisted Pair (TP) */ + uint8_t fc_tm_tw:1; /* Twin Axial Pair */ +}; + +struct sff_trasnceiver_codes_byte7 { + uint8_t fc_sp_100MB:1; /* 100 MB/sec */ + uint8_t reserve:1; + uint8_t fc_sp_200mb:1; /* 200 MB/sec */ + uint8_t fc_sp_3200MB:1; /* 3200 MB/sec */ + uint8_t fc_sp_400MB:1; /* 400 MB/sec */ + uint8_t fc_sp_1600MB:1; /* 1600 MB/sec */ + uint8_t fc_sp_800MB:1; /* 800 MB/sec */ + uint8_t fc_sp_1200MB:1; /* 1200 MB/sec */ +}; + +/* User writable non-volatile memory, SFF-8472 Table 3.20 */ +struct user_eeprom { + uint8_t vendor_name[16]; + uint8_t vendor_oui[3]; + uint8_t vendor_pn[816]; + uint8_t vendor_rev[4]; + uint8_t vendor_sn[16]; + uint8_t datecode[6]; + uint8_t lot_code[2]; + uint8_t reserved191[57]; +}; + struct lpfc_mbx_pc_sli4_params { uint32_t word1; #define qs_SHIFT 0 @@ -3021,6 +3248,7 @@ struct lpfc_mqe { struct lpfc_mbx_request_features req_ftrs; struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; struct lpfc_mbx_query_fw_config query_fw_cfg; + struct lpfc_mbx_set_beacon_config beacon_config; struct lpfc_mbx_supp_pages supp_pages; struct lpfc_mbx_pc_sli4_params sli4_params; struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; @@ -3031,6 +3259,7 @@ struct lpfc_mqe { struct lpfc_mbx_get_prof_cfg get_prof_cfg; struct lpfc_mbx_wr_object wr_object; struct lpfc_mbx_get_port_name get_port_name; + struct lpfc_mbx_memory_dump_type3 mem_dump_type3; struct lpfc_mbx_nop nop; } un; }; @@ -3041,8 +3270,8 @@ struct lpfc_mcqe { #define lpfc_mcqe_status_MASK 0x0000FFFF #define lpfc_mcqe_status_WORD word0 #define lpfc_mcqe_ext_status_SHIFT 16 -#define lpfc_mcqe_ext_status_MASK 0x0000FFFF -#define lpfc_mcqe_ext_status_WORD word0 +#define lpfc_mcqe_ext_status_MASK 0x0000FFFF +#define lpfc_mcqe_ext_status_WORD word0 uint32_t mcqe_tag0; uint32_t mcqe_tag1; uint32_t trailer; @@ -3176,6 +3405,7 @@ struct lpfc_acqe_fc_la { #define LPFC_FC_LA_SPEED_8G 0x8 #define LPFC_FC_LA_SPEED_10G 0xA #define LPFC_FC_LA_SPEED_16G 0x10 +#define LPFC_FC_LA_SPEED_32G 0x20 #define lpfc_acqe_fc_la_topology_SHIFT 16 #define lpfc_acqe_fc_la_topology_MASK 0x000000FF #define lpfc_acqe_fc_la_topology_WORD word0 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index e8c8c1ecc1f5..f962118da8ed 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3303,6 +3303,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; + shost->nr_hw_queues = phba->cfg_fcp_io_channel; if (phba->sli_rev == LPFC_SLI_REV4) { shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; @@ -4483,7 +4484,13 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, lpfc_destroy_vport_work_array(phba, vports); } - if (active_vlink_present) { + /* + * Don't re-instantiate if vport is marked for deletion. + * If we are here first then vport_delete is going to wait + * for discovery to complete. + */ + if (!(vport->load_flag & FC_UNLOADING) && + active_vlink_present) { /* * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. @@ -7500,6 +7507,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; + phba->sli4_hba.physical_port = + mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, @@ -8367,7 +8376,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba) /* vector-0 is associated to slow-path handler */ rc = request_irq(phba->msix_entries[0].vector, - &lpfc_sli_sp_intr_handler, IRQF_SHARED, + &lpfc_sli_sp_intr_handler, 0, LPFC_SP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -8378,7 +8387,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba) /* vector-1 is associated to fast-path handler */ rc = request_irq(phba->msix_entries[1].vector, - &lpfc_sli_fp_intr_handler, IRQF_SHARED, + &lpfc_sli_fp_intr_handler, 0, LPFC_FP_DRIVER_HANDLER_NAME, phba); if (rc) { @@ -8487,7 +8496,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba) } rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); + 0, LPFC_DRIVER_NAME, phba); if (rc) { pci_disable_msi(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -8944,13 +8953,13 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) if (phba->cfg_fof && (index == (vectors - 1))) rc = request_irq( phba->sli4_hba.msix_entries[index].vector, - &lpfc_sli4_fof_intr_handler, IRQF_SHARED, + &lpfc_sli4_fof_intr_handler, 0, (char *)&phba->sli4_hba.handler_name[index], &phba->sli4_hba.fcp_eq_hdl[index]); else rc = request_irq( phba->sli4_hba.msix_entries[index].vector, - &lpfc_sli4_hba_intr_handler, IRQF_SHARED, + &lpfc_sli4_hba_intr_handler, 0, (char *)&phba->sli4_hba.handler_name[index], &phba->sli4_hba.fcp_eq_hdl[index]); if (rc) { @@ -8972,7 +8981,8 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) phba->cfg_fcp_io_channel = vectors; } - lpfc_sli4_set_affinity(phba, vectors); + if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport))) + lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: @@ -9050,7 +9060,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) } rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); + 0, LPFC_DRIVER_NAME, phba); if (rc) { pci_disable_msi(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 816f596cda60..eb627724417e 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -2255,6 +2255,158 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox) return 0; } +void +lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + MAILBOX_t *mb; + int rc = FAILURE; + struct lpfc_rdp_context *rdp_context = + (struct lpfc_rdp_context *)(mboxq->context2); + + mb = &mboxq->u.mb; + if (mb->mbxStatus) + goto mbx_failed; + + memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR)); + + rc = SUCCESS; + +mbx_failed: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + rdp_context->cmpl(phba, rdp_context, rc); +} + +void +lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1; + struct lpfc_rdp_context *rdp_context = + (struct lpfc_rdp_context *)(mbox->context2); + + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) + goto error; + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, + DMP_SFF_PAGE_A2_SIZE); + + /* We don't need dma buffer for link stat. */ + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + + memset(mbox, 0, sizeof(*mbox)); + lpfc_read_lnk_stat(phba, mbox); + mbox->vport = rdp_context->ndlp->vport; + mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat; + mbox->context2 = (struct lpfc_rdp_context *) rdp_context; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED) + goto error; + + return; + +error: + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + lpfc_sli4_mbox_cmd_free(phba, mbox); + rdp_context->cmpl(phba, rdp_context, FAILURE); +} + +void +lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + int rc; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (mbox->context1); + struct lpfc_rdp_context *rdp_context = + (struct lpfc_rdp_context *)(mbox->context2); + + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) + goto error; + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, + DMP_SFF_PAGE_A0_SIZE); + + memset(mbox, 0, sizeof(*mbox)); + + memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); + INIT_LIST_HEAD(&mp->list); + + /* save address for completion */ + mbox->context1 = mp; + mbox->vport = rdp_context->ndlp->vport; + + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); + bf_set(lpfc_mbx_memory_dump_type3_type, + &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); + bf_set(lpfc_mbx_memory_dump_type3_link, + &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); + bf_set(lpfc_mbx_memory_dump_type3_page_no, + &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + + mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2; + mbox->context2 = (struct lpfc_rdp_context *) rdp_context; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto error; + + return; + +error: + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + lpfc_sli4_mbox_cmd_free(phba, mbox); + rdp_context->cmpl(phba, rdp_context, FAILURE); +} + + +/* + * lpfc_sli4_dump_sfp_pagea0 - Dump sli4 read SFP Diagnostic. + * @phba: pointer to the hba structure containing. + * @mbox: pointer to lpfc mbox command to initialize. + * + * This function create a SLI4 dump mailbox command to dump configure + * type 3 page 0xA0. + */ +int +lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_dmabuf *mp = NULL; + + memset(mbox, 0, sizeof(*mbox)); + + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (mp) + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + if (!mp || !mp->virt) { + kfree(mp); + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "3569 dump type 3 page 0xA0 allocation failed\n"); + return 1; + } + + memset(mp->virt, 0, LPFC_BPL_SIZE); + INIT_LIST_HEAD(&mp->list); + + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); + /* save address for completion */ + mbox->context1 = mp; + + bf_set(lpfc_mbx_memory_dump_type3_type, + &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); + bf_set(lpfc_mbx_memory_dump_type3_link, + &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); + bf_set(lpfc_mbx_memory_dump_type3_page_no, + &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0); + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + + return 0; +} + /** * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command * @phba: pointer to the hba structure containing the FCF index and RQ ID. diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4cb9882af157..af3b38aba65e 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -661,7 +661,13 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_destroy_vport_work_array(phba, vports); } - if (active_vlink_present) { + /* + * Don't re-instantiate if vport is marked for deletion. + * If we are here first then vport_delete is going to wait + * for discovery to complete. + */ + if (!(vport->load_flag & FC_UNLOADING) && + active_vlink_present) { /* * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. @@ -1868,7 +1874,7 @@ lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= NLP_LOGO_ACC; + ndlp->nlp_flag |= NLP_LOGO_ACC; spin_unlock_irq(shost->host_lock); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c140f99772ca..e5eb40d2c512 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3257,7 +3257,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) */ nseg = scsi_dma_map(scsi_cmnd); - if (unlikely(!nseg)) + if (unlikely(nseg <= 0)) return 1; sgl += 1; /* clear the last flag in the fcp_rsp map entry */ @@ -3846,6 +3846,49 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } /** + * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution + * @phba: Pointer to HBA context object. + * + * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index + * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock + * held. + * If scsi-mq is enabled, get the default block layer mapping of software queues + * to hardware queues. This information is saved in request tag. + * + * Return: index into SLI4 fast-path FCP queue index. + **/ +int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; + struct lpfc_vector_map_info *cpup; + int chann, cpu; + uint32_t tag; + uint16_t hwq; + + if (shost_use_blk_mq(cmnd->device->host)) { + tag = blk_mq_unique_tag(cmnd->request); + hwq = blk_mq_unique_tag_to_hwq(tag); + + return hwq; + } + + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU + && phba->cfg_fcp_io_channel > 1) { + cpu = smp_processor_id(); + if (cpu < phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpup += cpu; + return cpup->channel_id; + } + } + chann = atomic_add_return(1, &phba->fcp_qidx); + chann = (chann % phba->cfg_fcp_io_channel); + return chann; +} + + +/** * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine * @phba: The Hba for which this call is being executed. * @pIocbIn: The command IOCBQ for the scsi cmnd. @@ -4537,7 +4580,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (lpfc_cmd == NULL) { lpfc_rampdown_queue_depth(phba); - lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC, "0707 driver's buffer pool is empty, " "IO busied\n"); goto out_host_busy; @@ -4968,13 +5011,16 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, iocbq, iocbqrsp, lpfc_cmd->timeout); if ((status != IOCB_SUCCESS) || (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) " - "iocb_flag x%x\n", - lpfc_taskmgmt_name(task_mgmt_cmd), - tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, - iocbqrsp->iocb.un.ulpWord[4], - iocbq->iocb_flag); + if (status != IOCB_SUCCESS || + iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR) + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0727 TMF %s to TGT %d LUN %llu " + "failed (%d, %d) iocb_flag x%x\n", + lpfc_taskmgmt_name(task_mgmt_cmd), + tgt_id, lun_id, + iocbqrsp->iocb.ulpStatus, + iocbqrsp->iocb.un.ulpWord[4], + iocbq->iocb_flag); /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ if (status == IOCB_SUCCESS) { if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) @@ -4988,7 +5034,6 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, } else { ret = FAILED; } - lpfc_cmd->status = IOSTAT_DRIVER_REJECT; } else ret = SUCCESS; diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 474e30cdee6e..18b9260ccfac 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -184,3 +184,6 @@ struct lpfc_scsi_buf { #define FIND_FIRST_OAS_LUN 0 #define NO_MORE_OAS_LUN -1 #define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN + +int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 56f73682d4bd..4feb9312a447 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -2249,7 +2249,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport->vpi, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_usg_map, ndlp); - + ndlp->nlp_flag &= ~NLP_LOGO_ACC; lpfc_nlp_put(ndlp); } } @@ -8138,36 +8138,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, } /** - * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution - * @phba: Pointer to HBA context object. - * - * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index - * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock - * held. - * - * Return: index into SLI4 fast-path FCP queue index. - **/ -static inline int -lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) -{ - struct lpfc_vector_map_info *cpup; - int chann, cpu; - - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU - && phba->cfg_fcp_io_channel > 1) { - cpu = smp_processor_id(); - if (cpu < phba->sli4_hba.num_present_cpu) { - cpup = phba->sli4_hba.cpu_map; - cpup += cpu; - return cpup->channel_id; - } - } - chann = atomic_add_return(1, &phba->fcp_qidx); - chann = (chann % phba->cfg_fcp_io_channel); - return chann; -} - -/** * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. * @phba: Pointer to HBA context object. * @piocb: Pointer to command iocb. @@ -8792,32 +8762,44 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) return 0; } +/** + * lpfc_sli_calc_ring - Calculates which ring to use + * @phba: Pointer to HBA context object. + * @ring_number: Initial ring + * @piocb: Pointer to command iocb. + * + * For SLI4, FCP IO can deferred to one fo many WQs, based on + * fcp_wqidx, thus we need to calculate the corresponding ring. + * Since ABORTS must go on the same WQ of the command they are + * aborting, we use command's fcp_wqidx. + */ int lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb) { - uint32_t idx; + if (phba->sli_rev < LPFC_SLI_REV4) + return ring_number; - if (phba->sli_rev == LPFC_SLI_REV4) { - if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { + if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { + if (!(phba->cfg_fof) || + (!(piocb->iocb_flag & LPFC_IO_FOF))) { + if (unlikely(!phba->sli4_hba.fcp_wq)) + return LPFC_HBA_ERROR; /* - * fcp_wqidx should already be setup based on what - * completion queue we want to use. + * for abort iocb fcp_wqidx should already + * be setup based on what work queue we used. */ - if (!(phba->cfg_fof) || - (!(piocb->iocb_flag & LPFC_IO_FOF))) { - if (unlikely(!phba->sli4_hba.fcp_wq)) - return LPFC_HBA_ERROR; - idx = lpfc_sli4_scmd_to_wqidx_distr(phba); - piocb->fcp_wqidx = idx; - ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; - } else { - if (unlikely(!phba->sli4_hba.oas_wq)) - return LPFC_HBA_ERROR; - idx = 0; - piocb->fcp_wqidx = idx; - ring_number = LPFC_FCP_OAS_RING; - } + if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) + piocb->fcp_wqidx = + lpfc_sli4_scmd_to_wqidx_distr(phba, + piocb->context1); + ring_number = MAX_SLI3_CONFIGURED_RINGS + + piocb->fcp_wqidx; + } else { + if (unlikely(!phba->sli4_hba.oas_wq)) + return LPFC_HBA_ERROR; + piocb->fcp_wqidx = 0; + ring_number = LPFC_FCP_OAS_RING; } } return ring_number; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 6eca3b8124d3..d1a5b057c6f3 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -602,6 +602,7 @@ struct lpfc_sli4_hba { struct lpfc_iov iov; spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ + uint32_t physical_port; /* CPU to vector mapping information */ struct lpfc_vector_map_info *cpu_map; @@ -651,6 +652,26 @@ struct lpfc_rsrc_blks { uint16_t rsrc_used; }; +struct lpfc_rdp_context { + struct lpfc_nodelist *ndlp; + uint16_t ox_id; + uint16_t rx_id; + READ_LNK_VAR link_stat; + uint8_t page_a0[DMP_SFF_PAGE_A0_SIZE]; + uint8_t page_a2[DMP_SFF_PAGE_A2_SIZE]; + void (*cmpl)(struct lpfc_hba *, struct lpfc_rdp_context*, int); +}; + +struct lpfc_lcb_context { + uint8_t sub_command; + uint8_t type; + uint8_t frequency; + uint16_t ox_id; + uint16_t rx_id; + struct lpfc_nodelist *ndlp; +}; + + /* * SLI4 specific function prototypes */ diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c37bb9f91c3b..6258d3d7722a 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "10.5.0.0." +#define LPFC_DRIVER_VERSION "10.7.0.0." #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index a87ee33f4f2a..769012663a8f 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -567,8 +567,8 @@ int lpfc_vport_delete(struct fc_vport *fc_vport) { struct lpfc_nodelist *ndlp = NULL; - struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost; struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; long timeout; bool ns_ndlp_referenced = false; @@ -645,8 +645,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport) } /* Remove FC host and then SCSI host with the vport */ - fc_remove_host(lpfc_shost_from_vport(vport)); - scsi_remove_host(lpfc_shost_from_vport(vport)); + fc_remove_host(shost); + scsi_remove_host(shost); ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); @@ -772,7 +772,8 @@ skip_logo: * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) * does the scsi_host_put() to release the vport. */ - if (lpfc_mbx_unreg_vpi(vport)) + if (!(vport->vpi_state & LPFC_VPI_REGISTERED) || + lpfc_mbx_unreg_vpi(vport)) scsi_host_put(shost); } else scsi_host_put(shost); diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index 0adb2e015597..141226631429 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c @@ -403,7 +403,6 @@ static struct scsi_host_template mac53c94_template = { .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 14e5c7cea929..20c37541963f 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -35,7 +35,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.806.08.00-rc1" +#define MEGASAS_VERSION "06.807.10.00-rc1" +#define MEGASAS_RELDATE "March 6, 2015" /* * Device IDs @@ -153,6 +154,9 @@ #define MFI_FRAME_DIR_BOTH 0x0018 #define MFI_FRAME_IEEE 0x0020 +/* Driver internal */ +#define DRV_DCMD_POLLED_MODE 0x1 + /* * Definition for cmd_status */ @@ -408,7 +412,7 @@ enum MR_PD_STATE { * defines the physical drive address structure */ struct MR_PD_ADDRESS { - u16 deviceId; + __le16 deviceId; u16 enclDeviceId; union { @@ -433,8 +437,8 @@ struct MR_PD_ADDRESS { * defines the physical drive list structure */ struct MR_PD_LIST { - u32 size; - u32 count; + __le32 size; + __le32 count; struct MR_PD_ADDRESS addr[1]; } __packed; @@ -451,28 +455,28 @@ union MR_LD_REF { struct { u8 targetId; u8 reserved; - u16 seqNum; + __le16 seqNum; }; - u32 ref; + __le32 ref; } __packed; /* * defines the logical drive list structure */ struct MR_LD_LIST { - u32 ldCount; - u32 reserved; + __le32 ldCount; + __le32 reserved; struct { union MR_LD_REF ref; u8 state; u8 reserved[3]; - u64 size; + __le64 size; } ldList[MAX_LOGICAL_DRIVES_EXT]; } __packed; struct MR_LD_TARGETID_LIST { - u32 size; - u32 count; + __le32 size; + __le32 count; u8 pad[3]; u8 targetId[MAX_LOGICAL_DRIVES_EXT]; }; @@ -553,7 +557,7 @@ struct megasas_ctrl_prop { } OnOffProperties; u8 autoSnapVDSpace; u8 viewSpace; - u16 spinDownTime; + __le16 spinDownTime; u8 reserved[24]; } __packed; @@ -567,10 +571,10 @@ struct megasas_ctrl_info { */ struct { - u16 vendor_id; - u16 device_id; - u16 sub_vendor_id; - u16 sub_device_id; + __le16 vendor_id; + __le16 device_id; + __le16 sub_vendor_id; + __le16 sub_device_id; u8 reserved[24]; } __attribute__ ((packed)) pci; @@ -611,8 +615,8 @@ struct megasas_ctrl_info { /* * List of components residing in flash. All str are null terminated */ - u32 image_check_word; - u32 image_component_count; + __le32 image_check_word; + __le32 image_component_count; struct { @@ -629,7 +633,7 @@ struct megasas_ctrl_info { * empty if a flash operation has not occurred. All stings are null * terminated */ - u32 pending_image_component_count; + __le32 pending_image_component_count; struct { @@ -662,39 +666,39 @@ struct megasas_ctrl_info { } __attribute__ ((packed)) hw_present; - u32 current_fw_time; + __le32 current_fw_time; /* * Maximum data transfer sizes */ - u16 max_concurrent_cmds; - u16 max_sge_count; - u32 max_request_size; + __le16 max_concurrent_cmds; + __le16 max_sge_count; + __le32 max_request_size; /* * Logical and physical device counts */ - u16 ld_present_count; - u16 ld_degraded_count; - u16 ld_offline_count; + __le16 ld_present_count; + __le16 ld_degraded_count; + __le16 ld_offline_count; - u16 pd_present_count; - u16 pd_disk_present_count; - u16 pd_disk_pred_failure_count; - u16 pd_disk_failed_count; + __le16 pd_present_count; + __le16 pd_disk_present_count; + __le16 pd_disk_pred_failure_count; + __le16 pd_disk_failed_count; /* * Memory size information */ - u16 nvram_size; - u16 memory_size; - u16 flash_size; + __le16 nvram_size; + __le16 memory_size; + __le16 flash_size; /* * Error counters */ - u16 mem_correctable_error_count; - u16 mem_uncorrectable_error_count; + __le16 mem_correctable_error_count; + __le16 mem_uncorrectable_error_count; /* * Cluster information @@ -705,7 +709,7 @@ struct megasas_ctrl_info { /* * Additional max data transfer sizes */ - u16 max_strips_per_io; + __le16 max_strips_per_io; /* * Controller capabilities structures @@ -805,7 +809,7 @@ struct megasas_ctrl_info { * deviceInterface.portAddr, and the rest shall be * populated in deviceInterfacePortAddr2. */ - u64 deviceInterfacePortAddr2[8]; /*6a0h */ + __le64 deviceInterfacePortAddr2[8]; /*6a0h */ u8 reserved3[128]; /*6e0h */ struct { /*760h */ @@ -842,26 +846,26 @@ struct megasas_ctrl_info { u16 reserved[6]; } pdsForRaidLevels; - u16 maxPds; /*780h */ - u16 maxDedHSPs; /*782h */ - u16 maxGlobalHSPs; /*784h */ - u16 ddfSize; /*786h */ + __le16 maxPds; /*780h */ + __le16 maxDedHSPs; /*782h */ + __le16 maxGlobalHSP; /*784h */ + __le16 ddfSize; /*786h */ u8 maxLdsPerArray; /*788h */ u8 partitionsInDDF; /*789h */ u8 lockKeyBinding; /*78ah */ u8 maxPITsPerLd; /*78bh */ u8 maxViewsPerLd; /*78ch */ u8 maxTargetId; /*78dh */ - u16 maxBvlVdSize; /*78eh */ + __le16 maxBvlVdSize; /*78eh */ - u16 maxConfigurableSSCSize; /*790h */ - u16 currentSSCsize; /*792h */ + __le16 maxConfigurableSSCSize; /*790h */ + __le16 currentSSCsize; /*792h */ char expanderFwVersion[12]; /*794h */ - u16 PFKTrialTimeRemaining; /*7A0h */ + __le16 PFKTrialTimeRemaining; /*7A0h */ - u16 cacheMemorySize; /*7A2h */ + __le16 cacheMemorySize; /*7A2h */ struct { /*7A4h */ #if defined(__BIG_ENDIAN_BITFIELD) @@ -931,7 +935,7 @@ struct megasas_ctrl_info { u8 temperatureROC; /*7C9h */ u8 temperatureCtrl; /*7CAh */ u8 reserved4; /*7CBh */ - u16 maxConfigurablePds; /*7CCh */ + __le16 maxConfigurablePds; /*7CCh */ u8 reserved5[2]; /*0x7CDh */ @@ -1042,11 +1046,6 @@ struct megasas_ctrl_info { #define VD_EXT_DEBUG 0 -enum MR_MFI_MPT_PTHR_FLAGS { - MFI_MPT_DETACHED = 0, - MFI_LIST_ADDED = 1, - MFI_MPT_ATTACHED = 2, -}; enum MR_SCSI_CMD_TYPE { READ_WRITE_LDIO = 0, @@ -1084,6 +1083,7 @@ enum MR_SCSI_CMD_TYPE { #define MEGASAS_SKINNY_INT_CMDS 5 #define MEGASAS_FUSION_INTERNAL_CMDS 5 #define MEGASAS_FUSION_IOCTL_CMDS 3 +#define MEGASAS_MFI_IOCTL_CMDS 27 #define MEGASAS_MAX_MSIX_QUEUES 128 /* @@ -1172,22 +1172,22 @@ struct megasas_register_set { struct megasas_sge32 { - u32 phys_addr; - u32 length; + __le32 phys_addr; + __le32 length; } __attribute__ ((packed)); struct megasas_sge64 { - u64 phys_addr; - u32 length; + __le64 phys_addr; + __le32 length; } __attribute__ ((packed)); struct megasas_sge_skinny { - u64 phys_addr; - u32 length; - u32 flag; + __le64 phys_addr; + __le32 length; + __le32 flag; } __packed; union megasas_sgl { @@ -1210,12 +1210,12 @@ struct megasas_header { u8 cdb_len; /*06h */ u8 sge_count; /*07h */ - u32 context; /*08h */ - u32 pad_0; /*0Ch */ + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ - u16 flags; /*10h */ - u16 timeout; /*12h */ - u32 data_xferlen; /*14h */ + __le16 flags; /*10h */ + __le16 timeout; /*12h */ + __le32 data_xferlen; /*14h */ } __attribute__ ((packed)); @@ -1248,7 +1248,7 @@ typedef union _MFI_CAPABILITIES { u32 reserved:25; #endif } mfi_capabilities; - u32 reg; + __le32 reg; } MFI_CAPABILITIES; struct megasas_init_frame { @@ -1260,33 +1260,35 @@ struct megasas_init_frame { u8 reserved_1; /*03h */ MFI_CAPABILITIES driver_operations; /*04h*/ - u32 context; /*08h */ - u32 pad_0; /*0Ch */ - - u16 flags; /*10h */ - u16 reserved_3; /*12h */ - u32 data_xfer_len; /*14h */ + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ - u32 queue_info_new_phys_addr_lo; /*18h */ - u32 queue_info_new_phys_addr_hi; /*1Ch */ - u32 queue_info_old_phys_addr_lo; /*20h */ - u32 queue_info_old_phys_addr_hi; /*24h */ + __le16 flags; /*10h */ + __le16 reserved_3; /*12h */ + __le32 data_xfer_len; /*14h */ - u32 reserved_4[6]; /*28h */ + __le32 queue_info_new_phys_addr_lo; /*18h */ + __le32 queue_info_new_phys_addr_hi; /*1Ch */ + __le32 queue_info_old_phys_addr_lo; /*20h */ + __le32 queue_info_old_phys_addr_hi; /*24h */ + __le32 reserved_4[2]; /*28h */ + __le32 system_info_lo; /*30h */ + __le32 system_info_hi; /*34h */ + __le32 reserved_5[2]; /*38h */ } __attribute__ ((packed)); struct megasas_init_queue_info { - u32 init_flags; /*00h */ - u32 reply_queue_entries; /*04h */ + __le32 init_flags; /*00h */ + __le32 reply_queue_entries; /*04h */ - u32 reply_queue_start_phys_addr_lo; /*08h */ - u32 reply_queue_start_phys_addr_hi; /*0Ch */ - u32 producer_index_phys_addr_lo; /*10h */ - u32 producer_index_phys_addr_hi; /*14h */ - u32 consumer_index_phys_addr_lo; /*18h */ - u32 consumer_index_phys_addr_hi; /*1Ch */ + __le32 reply_queue_start_phys_addr_lo; /*08h */ + __le32 reply_queue_start_phys_addr_hi; /*0Ch */ + __le32 producer_index_phys_addr_lo; /*10h */ + __le32 producer_index_phys_addr_hi; /*14h */ + __le32 consumer_index_phys_addr_lo; /*18h */ + __le32 consumer_index_phys_addr_hi; /*1Ch */ } __attribute__ ((packed)); @@ -1302,18 +1304,18 @@ struct megasas_io_frame { u8 reserved_0; /*06h */ u8 sge_count; /*07h */ - u32 context; /*08h */ - u32 pad_0; /*0Ch */ + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ - u16 flags; /*10h */ - u16 timeout; /*12h */ - u32 lba_count; /*14h */ + __le16 flags; /*10h */ + __le16 timeout; /*12h */ + __le32 lba_count; /*14h */ - u32 sense_buf_phys_addr_lo; /*18h */ - u32 sense_buf_phys_addr_hi; /*1Ch */ + __le32 sense_buf_phys_addr_lo; /*18h */ + __le32 sense_buf_phys_addr_hi; /*1Ch */ - u32 start_lba_lo; /*20h */ - u32 start_lba_hi; /*24h */ + __le32 start_lba_lo; /*20h */ + __le32 start_lba_hi; /*24h */ union megasas_sgl sgl; /*28h */ @@ -1331,15 +1333,15 @@ struct megasas_pthru_frame { u8 cdb_len; /*06h */ u8 sge_count; /*07h */ - u32 context; /*08h */ - u32 pad_0; /*0Ch */ + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ - u16 flags; /*10h */ - u16 timeout; /*12h */ - u32 data_xfer_len; /*14h */ + __le16 flags; /*10h */ + __le16 timeout; /*12h */ + __le32 data_xfer_len; /*14h */ - u32 sense_buf_phys_addr_lo; /*18h */ - u32 sense_buf_phys_addr_hi; /*1Ch */ + __le32 sense_buf_phys_addr_lo; /*18h */ + __le32 sense_buf_phys_addr_hi; /*1Ch */ u8 cdb[16]; /*20h */ union megasas_sgl sgl; /*30h */ @@ -1354,19 +1356,19 @@ struct megasas_dcmd_frame { u8 reserved_1[4]; /*03h */ u8 sge_count; /*07h */ - u32 context; /*08h */ - u32 pad_0; /*0Ch */ + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ - u16 flags; /*10h */ - u16 timeout; /*12h */ + __le16 flags; /*10h */ + __le16 timeout; /*12h */ - u32 data_xfer_len; /*14h */ - u32 opcode; /*18h */ + __le32 data_xfer_len; /*14h */ + __le32 opcode; /*18h */ union { /*1Ch */ u8 b[12]; - u16 s[6]; - u32 w[3]; + __le16 s[6]; + __le32 w[3]; } mbox; union megasas_sgl sgl; /*28h */ @@ -1380,22 +1382,22 @@ struct megasas_abort_frame { u8 cmd_status; /*02h */ u8 reserved_1; /*03h */ - u32 reserved_2; /*04h */ + __le32 reserved_2; /*04h */ - u32 context; /*08h */ - u32 pad_0; /*0Ch */ + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ - u16 flags; /*10h */ - u16 reserved_3; /*12h */ - u32 reserved_4; /*14h */ + __le16 flags; /*10h */ + __le16 reserved_3; /*12h */ + __le32 reserved_4; /*14h */ - u32 abort_context; /*18h */ - u32 pad_1; /*1Ch */ + __le32 abort_context; /*18h */ + __le32 pad_1; /*1Ch */ - u32 abort_mfi_phys_addr_lo; /*20h */ - u32 abort_mfi_phys_addr_hi; /*24h */ + __le32 abort_mfi_phys_addr_lo; /*20h */ + __le32 abort_mfi_phys_addr_hi; /*24h */ - u32 reserved_5[6]; /*28h */ + __le32 reserved_5[6]; /*28h */ } __attribute__ ((packed)); @@ -1409,14 +1411,14 @@ struct megasas_smp_frame { u8 reserved_2[3]; /*04h */ u8 sge_count; /*07h */ - u32 context; /*08h */ - u32 pad_0; /*0Ch */ + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ - u16 flags; /*10h */ - u16 timeout; /*12h */ + __le16 flags; /*10h */ + __le16 timeout; /*12h */ - u32 data_xfer_len; /*14h */ - u64 sas_addr; /*18h */ + __le32 data_xfer_len; /*14h */ + __le64 sas_addr; /*18h */ union { struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */ @@ -1436,16 +1438,16 @@ struct megasas_stp_frame { u8 reserved_3[2]; /*05h */ u8 sge_count; /*07h */ - u32 context; /*08h */ - u32 pad_0; /*0Ch */ + __le32 context; /*08h */ + __le32 pad_0; /*0Ch */ - u16 flags; /*10h */ - u16 timeout; /*12h */ + __le16 flags; /*10h */ + __le16 timeout; /*12h */ - u32 data_xfer_len; /*14h */ + __le32 data_xfer_len; /*14h */ - u16 fis[10]; /*18h */ - u32 stp_flags; + __le16 fis[10]; /*18h */ + __le32 stp_flags; union { struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */ @@ -1489,18 +1491,18 @@ union megasas_evt_class_locale { } __attribute__ ((packed)); struct megasas_evt_log_info { - u32 newest_seq_num; - u32 oldest_seq_num; - u32 clear_seq_num; - u32 shutdown_seq_num; - u32 boot_seq_num; + __le32 newest_seq_num; + __le32 oldest_seq_num; + __le32 clear_seq_num; + __le32 shutdown_seq_num; + __le32 boot_seq_num; } __attribute__ ((packed)); struct megasas_progress { - u16 progress; - u16 elapsed_seconds; + __le16 progress; + __le16 elapsed_seconds; } __attribute__ ((packed)); @@ -1521,9 +1523,9 @@ struct megasas_evtarg_pd { struct megasas_evt_detail { - u32 seq_num; - u32 time_stamp; - u32 code; + __le32 seq_num; + __le32 time_stamp; + __le32 code; union megasas_evt_class_locale cl; u8 arg_type; u8 reserved1[15]; @@ -1542,18 +1544,18 @@ struct megasas_evt_detail { struct { struct megasas_evtarg_ld ld; - u64 count; + __le64 count; } __attribute__ ((packed)) ld_count; struct { - u64 lba; + __le64 lba; struct megasas_evtarg_ld ld; } __attribute__ ((packed)) ld_lba; struct { struct megasas_evtarg_ld ld; - u32 prevOwner; - u32 newOwner; + __le32 prevOwner; + __le32 newOwner; } __attribute__ ((packed)) ld_owner; struct { @@ -1610,7 +1612,7 @@ struct megasas_evt_detail { struct { u16 vendorId; - u16 deviceId; + __le16 deviceId; u16 subVendorId; u16 subDeviceId; } __attribute__ ((packed)) pci; @@ -1630,9 +1632,9 @@ struct megasas_evt_detail { } __attribute__ ((packed)) ecc; u8 b[96]; - u16 s[48]; - u32 w[24]; - u64 d[12]; + __le16 s[48]; + __le32 w[24]; + __le64 d[12]; } args; char description[128]; @@ -1649,12 +1651,22 @@ struct megasas_irq_context { u32 MSIxIndex; }; +struct MR_DRV_SYSTEM_INFO { + u8 infoVersion; + u8 systemIdLength; + u16 reserved0; + u8 systemId[64]; + u8 reserved[1980]; +}; + struct megasas_instance { - u32 *producer; + __le32 *producer; dma_addr_t producer_h; - u32 *consumer; + __le32 *consumer; dma_addr_t consumer_h; + struct MR_DRV_SYSTEM_INFO *system_info_buf; + dma_addr_t system_info_h; struct MR_LD_VF_AFFILIATION *vf_affiliation; dma_addr_t vf_affiliation_h; struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111; @@ -1662,7 +1674,7 @@ struct megasas_instance { struct MR_CTRL_HB_HOST_MEM *hb_host_mem; dma_addr_t hb_host_mem_h; - u32 *reply_queue; + __le32 *reply_queue; dma_addr_t reply_queue_h; u32 *crash_dump_buf; @@ -1681,7 +1693,7 @@ struct megasas_instance { spinlock_t crashdump_lock; struct megasas_register_set __iomem *reg_set; - u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; + u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; u8 ld_ids[MEGASAS_MAX_LD_IDS]; @@ -1769,6 +1781,7 @@ struct megasas_instance { u16 throttlequeuedepth; u8 mask_interrupts; u8 is_imr; + bool dev_handle; }; struct MR_LD_VF_MAP { u32 size; @@ -1864,9 +1877,13 @@ struct megasas_instance_template { #define MEGASAS_IS_LOGICAL(scp) \ (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 -#define MEGASAS_DEV_INDEX(inst, scp) \ - ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ - scp->device->id +#define MEGASAS_DEV_INDEX(scp) \ + (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ + scp->device->id) + +#define MEGASAS_PD_INDEX(scp) \ + ((scp->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + \ + scp->device->id) struct megasas_cmd { @@ -1877,17 +1894,14 @@ struct megasas_cmd { u32 index; u8 sync_cmd; - u8 cmd_status; + u8 cmd_status_drv; u8 abort_aen; u8 retry_for_fw_reset; struct list_head list; struct scsi_cmnd *scmd; - - void *mpt_pthr_cmd_blocked; - atomic_t mfi_mpt_pthr; - u8 is_wait_event; + u8 flags; struct megasas_instance *instance; union { @@ -1963,10 +1977,10 @@ u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map); struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map); -u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map); +__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map); u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); -u16 get_updated_dev_handle(struct megasas_instance *instance, +__le16 get_updated_dev_handle(struct megasas_instance *instance, struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 890637fdd61e..71b884dae27c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -94,8 +94,8 @@ MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Defau MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); -MODULE_AUTHOR("megaraidlinux@lsi.com"); -MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); +MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com"); +MODULE_DESCRIPTION("Avago MegaRAID SAS Driver"); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); static int megasas_get_pd_list(struct megasas_instance *instance); @@ -215,7 +215,6 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance cmd = list_entry((&instance->cmd_pool)->next, struct megasas_cmd, list); list_del_init(&cmd->list); - atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED); } else { printk(KERN_ERR "megasas: Command pool empty!\n"); } @@ -225,52 +224,41 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance } /** - * __megasas_return_cmd - Return a cmd to free command pool + * megasas_return_cmd - Return a cmd to free command pool * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ inline void -__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) +megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { - /* - * Don't go ahead and free the MFI frame, if corresponding - * MPT frame is not freed(valid for only fusion adapters). - * In case of MFI adapters, anyways for any allocated MFI - * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED + unsigned long flags; + u32 blk_tags; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion = instance->ctrl_context; + + /* This flag is used only for fusion adapter. + * Wait for Interrupt for Polled mode DCMD */ - if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED) + if (cmd->flags & DRV_DCMD_POLLED_MODE) return; + spin_lock_irqsave(&instance->mfi_pool_lock, flags); + + if (fusion) { + blk_tags = instance->max_scsi_cmds + cmd->index; + cmd_fusion = fusion->cmd_list[blk_tags]; + megasas_return_cmd_fusion(instance, cmd_fusion); + } cmd->scmd = NULL; cmd->frame_count = 0; - cmd->is_wait_event = 0; - cmd->mpt_pthr_cmd_blocked = NULL; - - if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && - (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && - (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) && - (reset_devices)) + cmd->flags = 0; + if (!fusion && reset_devices) cmd->frame->hdr.cmd = MFI_CMD_INVALID; - - atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED); list_add(&cmd->list, (&instance->cmd_pool)->next); -} -/** - * megasas_return_cmd - Return a cmd to free command pool - * @instance: Adapter soft state - * @cmd: Command packet to be returned to free command pool - */ -inline void -megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) -{ - unsigned long flags; - - spin_lock_irqsave(&instance->mfi_pool_lock, flags); - __megasas_return_cmd(instance, cmd); spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); -} +} /** * The following functions are defined for xscale @@ -814,8 +802,8 @@ megasas_adp_reset_gen2(struct megasas_instance *instance, { u32 retry = 0 ; u32 HostDiag; - u32 *seq_offset = ®_set->seq_offset; - u32 *hostdiag_offset = ®_set->host_diag; + u32 __iomem *seq_offset = ®_set->seq_offset; + u32 __iomem *hostdiag_offset = ®_set->host_diag; if (instance->instancet == &megasas_instance_template_skinny) { seq_offset = ®_set->fusion_seq_offset; @@ -910,7 +898,7 @@ extern struct megasas_instance_template megasas_instance_template_fusion; * @instance: Adapter soft state * @cmd: Command packet to be issued * - * For polling, MFI requires the cmd_status to be set to 0xFF before posting. + * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting. */ int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) @@ -952,20 +940,20 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, int timeout) { int ret = 0; - cmd->cmd_status = ENODATA; + cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; - cmd->is_wait_event = 1; instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->int_cmd_wait_q, - cmd->cmd_status != ENODATA, timeout * HZ); + cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); if (!ret) return 1; } else wait_event(instance->int_cmd_wait_q, - cmd->cmd_status != ENODATA); + cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); - return 0; + return (cmd->cmd_status_drv == MFI_STAT_OK) ? + 0 : 1; } /** @@ -998,7 +986,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, * Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; - abort_fr->cmd_status = 0xFF; + abort_fr->cmd_status = MFI_STAT_INVALID_STATUS; abort_fr->flags = cpu_to_le16(0); abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); abort_fr->abort_mfi_phys_addr_lo = @@ -1007,13 +995,13 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; - cmd->cmd_status = ENODATA; + cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->abort_cmd_wait_q, - cmd->cmd_status != ENODATA, timeout * HZ); + cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); if (!ret) { dev_err(&instance->pdev->dev, "Command timedout" "from %s\n", __func__); @@ -1021,7 +1009,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, } } else wait_event(instance->abort_cmd_wait_q, - cmd->cmd_status != ENODATA); + cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); cmd->sync_cmd = 0; @@ -1196,7 +1184,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_pthru_frame *pthru; is_logical = MEGASAS_IS_LOGICAL(scp); - device_id = MEGASAS_DEV_INDEX(instance, scp); + device_id = MEGASAS_DEV_INDEX(scp); pthru = (struct megasas_pthru_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) @@ -1232,7 +1220,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, */ if (scp->device->type == TYPE_TAPE) { if ((scp->request->timeout / HZ) > 0xFFFF) - pthru->timeout = 0xFFFF; + pthru->timeout = cpu_to_le16(0xFFFF); else pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); } @@ -1294,7 +1282,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, u16 flags = 0; struct megasas_io_frame *ldio; - device_id = MEGASAS_DEV_INDEX(instance, scp); + device_id = MEGASAS_DEV_INDEX(scp); ldio = (struct megasas_io_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) @@ -1698,7 +1686,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev) * @instance: Adapter soft state * */ -void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) +static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) { int i; struct megasas_cmd *cmd_mfi; @@ -1922,22 +1910,24 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance, memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; - dcmd->cmd_status = 0xFF; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_BOTH; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111); - dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111; + dcmd->data_xfer_len = + cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111); if (initial) dcmd->sgl.sge32[0].phys_addr = - instance->vf_affiliation_111_h; + cpu_to_le32(instance->vf_affiliation_111_h); else - dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h; + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(new_affiliation_111_h); - dcmd->sgl.sge32[0].length = - sizeof(struct MR_LD_VF_AFFILIATION_111); + dcmd->sgl.sge32[0].length = cpu_to_le32( + sizeof(struct MR_LD_VF_AFFILIATION_111)); printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " "scsi%d\n", instance->host->host_no); @@ -1976,11 +1966,7 @@ out: new_affiliation_111_h); } - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return retval; } @@ -2037,22 +2023,24 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance, memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; - dcmd->cmd_status = 0xFF; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_BOTH; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) * - sizeof(struct MR_LD_VF_AFFILIATION); - dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS; + dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS); if (initial) - dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h; + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(instance->vf_affiliation_h); else - dcmd->sgl.sge32[0].phys_addr = new_affiliation_h; + dcmd->sgl.sge32[0].phys_addr = + cpu_to_le32(new_affiliation_h); - dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) * - sizeof(struct MR_LD_VF_AFFILIATION); + dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION)); printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " "scsi%d\n", instance->host->host_no); @@ -2147,11 +2135,7 @@ out: (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION), new_affiliation, new_affiliation_h); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return retval; } @@ -2204,39 +2188,33 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance, memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); - dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM); + dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM)); dcmd->cmd = MFI_CMD_DCMD; - dcmd->cmd_status = 0xFF; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_BOTH; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM); - dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC; - dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h; - dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM); + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n", instance->host->host_no); - if (!megasas_issue_polled(instance, cmd)) { - retval = 0; - } else { - printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST" - "_MEM_ALLOC DCMD timed out for scsi%d\n", - instance->host->host_no); - retval = 1; - goto out; - } - + if (instance->ctrl_context && !instance->mask_interrupts) + retval = megasas_issue_blocked_cmd(instance, cmd, + MEGASAS_ROUTINE_WAIT_TIME_VF); + else + retval = megasas_issue_polled(instance, cmd); - if (dcmd->cmd_status) { - printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST" - "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n", - dcmd->cmd_status, - instance->host->host_no); + if (retval) { + dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST" + "_MEM_ALLOC DCMD %s for scsi%d\n", + (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ? + "timed out" : "failed", instance->host->host_no); retval = 1; - goto out; } out: @@ -2332,7 +2310,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) "reset queue\n", reset_cmd); - reset_cmd->cmd_status = ENODATA; + reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; instance->instancet->fire_cmd(instance, reset_cmd->frame_phys_addr, 0, instance->reg_set); @@ -2612,11 +2590,7 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) instance->aen_cmd = NULL; - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); if ((instance->unload == 0) && ((instance->issuepend_done == 1))) { @@ -2786,7 +2760,7 @@ struct device_attribute *megaraid_host_attrs[] = { static struct scsi_host_template megasas_template = { .module = THIS_MODULE, - .name = "LSI SAS based MegaRAID driver", + .name = "Avago SAS based MegaRAID driver", .proc_name = "megaraid_sas", .slave_configure = megasas_slave_configure, .slave_alloc = megasas_slave_alloc, @@ -2815,11 +2789,7 @@ static void megasas_complete_int_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { - cmd->cmd_status = cmd->frame->io.cmd_status; - - if (cmd->cmd_status == ENODATA) { - cmd->cmd_status = 0; - } + cmd->cmd_status_drv = cmd->frame->io.cmd_status; wake_up(&instance->int_cmd_wait_q); } @@ -2838,7 +2808,7 @@ megasas_complete_abort(struct megasas_instance *instance, { if (cmd->sync_cmd) { cmd->sync_cmd = 0; - cmd->cmd_status = 0; + cmd->cmd_status_drv = 0; wake_up(&instance->abort_cmd_wait_q); } @@ -2978,8 +2948,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, "failed, status = 0x%x.\n", cmd->frame->hdr.cmd_status); else { - megasas_return_mfi_mpt_pthr(instance, - cmd, cmd->mpt_pthr_cmd_blocked); + megasas_return_cmd(instance, cmd); spin_unlock_irqrestore( instance->host->host_lock, flags); @@ -2987,8 +2956,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, } } else instance->map_id++; - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); + megasas_return_cmd(instance, cmd); /* * Set fast path IO to ZERO. @@ -3086,7 +3054,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance) printk(KERN_NOTICE "megasas: %p synchronous cmd" "on the internal reset queue," "issue it again.\n", cmd); - cmd->cmd_status = ENODATA; + cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; instance->instancet->fire_cmd(instance, cmd->frame_phys_addr , 0, instance->reg_set); @@ -3766,7 +3734,6 @@ int megasas_alloc_cmds(struct megasas_instance *instance) cmd = instance->cmd_list[i]; memset(cmd, 0, sizeof(struct megasas_cmd)); cmd->index = i; - atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED); cmd->scmd = NULL; cmd->instance = instance; @@ -3827,7 +3794,7 @@ megasas_get_pd_list(struct megasas_instance *instance) dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; - dcmd->cmd_status = 0xFF; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; @@ -3874,11 +3841,7 @@ megasas_get_pd_list(struct megasas_instance *instance) MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), ci, ci_h); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return ret; } @@ -3927,7 +3890,7 @@ megasas_get_ld_list(struct megasas_instance *instance) if (instance->supportmax256vd) dcmd->mbox.b[0] = 1; dcmd->cmd = MFI_CMD_DCMD; - dcmd->cmd_status = 0xFF; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; @@ -3965,11 +3928,7 @@ megasas_get_ld_list(struct megasas_instance *instance) ci, ci_h); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return ret; } @@ -4020,7 +3979,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) dcmd->mbox.b[2] = 1; dcmd->cmd = MFI_CMD_DCMD; - dcmd->cmd_status = 0xFF; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; @@ -4050,11 +4009,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), ci, ci_h); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return ret; } @@ -4091,12 +4046,11 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance) instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } - dev_info(&instance->pdev->dev, "Firmware supports %d VD %d PD\n", - instance->fw_supported_vd_count, - instance->fw_supported_pd_count); - dev_info(&instance->pdev->dev, "Driver supports %d VD %d PD\n", - instance->drv_supported_vd_count, - instance->drv_supported_pd_count); + + dev_info(&instance->pdev->dev, + "firmware type\t: %s\n", + instance->supportmax256vd ? "Extended VD(240 VD)firmware" : + "Legacy(64 VD) firmware"); old_map_sz = sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) * @@ -4158,7 +4112,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance) memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; - dcmd->cmd_status = 0xFF; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; @@ -4181,16 +4135,17 @@ megasas_get_ctrl_info(struct megasas_instance *instance) le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); megasas_update_ext_vd_details(instance); + instance->is_imr = (ctrl_info->memory_size ? 0 : 1); + dev_info(&instance->pdev->dev, + "controller type\t: %s(%dMB)\n", + instance->is_imr ? "iMR" : "MR", + le16_to_cpu(ctrl_info->memory_size)); } pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), ci, ci_h); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return ret; } @@ -4229,7 +4184,7 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance, memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = crash_buf_state; dcmd->cmd = MFI_CMD_DCMD; - dcmd->cmd_status = 0xFF; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; dcmd->sge_count = 1; dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; @@ -4245,11 +4200,7 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance, else ret = megasas_issue_polled(instance, cmd); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return ret; } @@ -4262,7 +4213,7 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance, static int megasas_issue_init_mfi(struct megasas_instance *instance) { - u32 context; + __le32 context; struct megasas_cmd *cmd; @@ -4300,7 +4251,7 @@ megasas_issue_init_mfi(struct megasas_instance *instance) initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); init_frame->cmd = MFI_CMD_INIT; - init_frame->cmd_status = 0xFF; + init_frame->cmd_status = MFI_STAT_INVALID_STATUS; init_frame->queue_info_new_phys_addr_lo = cpu_to_le32(lower_32_bits(initq_info_h)); init_frame->queue_info_new_phys_addr_hi = @@ -4354,6 +4305,21 @@ megasas_init_adapter_mfi(struct megasas_instance *instance) instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 0x10; /* + * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands + * are reserved for IOCTL + driver's internal DCMDs. + */ + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + instance->max_scsi_cmds = (instance->max_fw_cmds - + MEGASAS_SKINNY_INT_CMDS); + sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); + } else { + instance->max_scsi_cmds = (instance->max_fw_cmds - + MEGASAS_INT_CMDS); + sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS)); + } + + /* * Create a pool of commands */ if (megasas_alloc_cmds(instance)) @@ -4414,6 +4380,107 @@ fail_alloc_cmds: return 1; } +/* + * megasas_setup_irqs_msix - register legacy interrupts. + * @instance: Adapter soft state + * + * Do not enable interrupt, only setup ISRs. + * + * Return 0 on success. + */ +static int +megasas_setup_irqs_ioapic(struct megasas_instance *instance) +{ + struct pci_dev *pdev; + + pdev = instance->pdev; + instance->irq_context[0].instance = instance; + instance->irq_context[0].MSIxIndex = 0; + if (request_irq(pdev->irq, instance->instancet->service_isr, + IRQF_SHARED, "megasas", &instance->irq_context[0])) { + dev_err(&instance->pdev->dev, + "Failed to register IRQ from %s %d\n", + __func__, __LINE__); + return -1; + } + return 0; +} + +/** + * megasas_setup_irqs_msix - register MSI-x interrupts. + * @instance: Adapter soft state + * @is_probe: Driver probe check + * + * Do not enable interrupt, only setup ISRs. + * + * Return 0 on success. + */ +static int +megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) +{ + int i, j, cpu; + struct pci_dev *pdev; + + pdev = instance->pdev; + + /* Try MSI-x */ + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < instance->msix_vectors; i++) { + instance->irq_context[i].instance = instance; + instance->irq_context[i].MSIxIndex = i; + if (request_irq(instance->msixentry[i].vector, + instance->instancet->service_isr, 0, "megasas", + &instance->irq_context[i])) { + dev_err(&instance->pdev->dev, + "Failed to register IRQ for vector %d.\n", i); + for (j = 0; j < i; j++) { + if (smp_affinity_enable) + irq_set_affinity_hint( + instance->msixentry[j].vector, NULL); + free_irq(instance->msixentry[j].vector, + &instance->irq_context[j]); + } + /* Retry irq register for IO_APIC*/ + instance->msix_vectors = 0; + if (is_probe) + return megasas_setup_irqs_ioapic(instance); + else + return -1; + } + if (smp_affinity_enable) { + if (irq_set_affinity_hint(instance->msixentry[i].vector, + get_cpu_mask(cpu))) + dev_err(&instance->pdev->dev, + "Failed to set affinity hint" + " for cpu %d\n", cpu); + cpu = cpumask_next(cpu, cpu_online_mask); + } + } + return 0; +} + +/* + * megasas_destroy_irqs- unregister interrupts. + * @instance: Adapter soft state + * return: void + */ +static void +megasas_destroy_irqs(struct megasas_instance *instance) { + + int i; + + if (instance->msix_vectors) + for (i = 0; i < instance->msix_vectors; i++) { + if (smp_affinity_enable) + irq_set_affinity_hint( + instance->msixentry[i].vector, NULL); + free_irq(instance->msixentry[i].vector, + &instance->irq_context[i]); + } + else + free_irq(instance->pdev->irq, &instance->irq_context[0]); +} + /** * megasas_init_fw - Initializes the FW * @instance: Adapter soft state @@ -4499,7 +4566,7 @@ static int megasas_init_fw(struct megasas_instance *instance) * It is used for all MPT based Adapters. */ instance->reply_post_host_index_addr[0] = - (u32 *)((u8 *)instance->reg_set + + (u32 __iomem *)((u8 __iomem *)instance->reg_set + MPI2_REPLY_POST_HOST_INDEX_OFFSET); /* Check if MSI-X is supported while in ready state */ @@ -4531,7 +4598,8 @@ static int megasas_init_fw(struct megasas_instance *instance) */ for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { instance->reply_post_host_index_addr[loop] = - (u32 *)((u8 *)instance->reg_set + + (u32 __iomem *) + ((u8 __iomem *)instance->reg_set + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + (loop * 0x10)); } @@ -4551,14 +4619,19 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->msix_vectors = i; else instance->msix_vectors = 0; - - dev_info(&instance->pdev->dev, "[scsi%d]: FW supports" - "<%d> MSIX vector,Online CPUs: <%d>," - "Current MSIX <%d>\n", instance->host->host_no, - fw_msix_count, (unsigned int)num_online_cpus(), - instance->msix_vectors); } + dev_info(&instance->pdev->dev, + "firmware supports msix\t: (%d)", fw_msix_count); + dev_info(&instance->pdev->dev, + "current msix/online cpus\t: (%d/%d)\n", + instance->msix_vectors, (unsigned int)num_online_cpus()); + + if (instance->msix_vectors ? + megasas_setup_irqs_msix(instance, 1) : + megasas_setup_irqs_ioapic(instance)) + goto fail_setup_irqs; + instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); if (instance->ctrl_info == NULL) @@ -4574,6 +4647,11 @@ static int megasas_init_fw(struct megasas_instance *instance) if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; + tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, + (unsigned long)instance); + + instance->instancet->enable_intr(instance); + printk(KERN_ERR "megasas: INIT adapter done\n"); /** for passthrough @@ -4584,7 +4662,7 @@ static int megasas_init_fw(struct megasas_instance *instance) (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); if (megasas_get_pd_list(instance) < 0) { printk(KERN_ERR "megasas: failed to get PD list\n"); - goto fail_init_adapter; + goto fail_get_pd_list; } memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); @@ -4610,17 +4688,6 @@ static int megasas_init_fw(struct megasas_instance *instance) tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); - /*Check whether controller is iMR or MR */ - if (ctrl_info->memory_size) { - instance->is_imr = 0; - dev_info(&instance->pdev->dev, "Controller type: MR," - "Memory size is: %dMB\n", - le16_to_cpu(ctrl_info->memory_size)); - } else { - instance->is_imr = 1; - dev_info(&instance->pdev->dev, - "Controller type: iMR\n"); - } instance->disableOnlineCtrlReset = ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; instance->mpio = ctrl_info->adapterOperations2.mpio; @@ -4628,9 +4695,6 @@ static int megasas_init_fw(struct megasas_instance *instance) ctrl_info->adapterOperations2.supportUnevenSpans; if (instance->UnevenSpanSupport) { struct fusion_context *fusion = instance->ctrl_context; - - dev_info(&instance->pdev->dev, "FW supports: " - "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport); if (MR_ValidateMapInfo(instance)) fusion->fast_path_io = 1; else @@ -4657,13 +4721,11 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->crash_dump_drv_support = (instance->crash_dump_fw_support && instance->crash_dump_buf); - if (instance->crash_dump_drv_support) { - dev_info(&instance->pdev->dev, "Firmware Crash dump " - "feature is supported\n"); + if (instance->crash_dump_drv_support) megasas_set_crash_dump_params(instance, MR_CRASH_BUF_TURN_OFF); - } else { + else { if (instance->crash_dump_buf) pci_free_consistent(instance->pdev, CRASH_DMA_BUF_SIZE, @@ -4674,37 +4736,28 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->secure_jbod_support = ctrl_info->adapterOperations3.supportSecurityonJBOD; - if (instance->secure_jbod_support) - dev_info(&instance->pdev->dev, "Firmware supports Secure JBOD\n"); + + dev_info(&instance->pdev->dev, + "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n", + le16_to_cpu(ctrl_info->pci.vendor_id), + le16_to_cpu(ctrl_info->pci.device_id), + le16_to_cpu(ctrl_info->pci.sub_vendor_id), + le16_to_cpu(ctrl_info->pci.sub_device_id)); + dev_info(&instance->pdev->dev, "unevenspan support : %s\n", + instance->UnevenSpanSupport ? "yes" : "no"); + dev_info(&instance->pdev->dev, "disable ocr : %s\n", + instance->disableOnlineCtrlReset ? "yes" : "no"); + dev_info(&instance->pdev->dev, "firmware crash dump : %s\n", + instance->crash_dump_drv_support ? "yes" : "no"); + dev_info(&instance->pdev->dev, "secure jbod : %s\n", + instance->secure_jbod_support ? "yes" : "no"); + + instance->max_sectors_per_req = instance->max_num_sge * PAGE_SIZE / 512; if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) instance->max_sectors_per_req = tmp_sectors; - /* - * 1. For fusion adapters, 3 commands for IOCTL and 5 commands - * for driver's internal DCMDs. - * 2. For MFI skinny adapters, 5 commands for IOCTL + driver's - * internal DCMDs. - * 3. For rest of MFI adapters, 27 commands reserved for IOCTLs - * and 5 commands for drivers's internal DCMD. - */ - if (instance->ctrl_context) { - instance->max_scsi_cmds = instance->max_fw_cmds - - (MEGASAS_FUSION_INTERNAL_CMDS + - MEGASAS_FUSION_IOCTL_CMDS); - sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); - } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { - instance->max_scsi_cmds = instance->max_fw_cmds - - MEGASAS_SKINNY_INT_CMDS; - sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); - } else { - instance->max_scsi_cmds = instance->max_fw_cmds - - MEGASAS_INT_CMDS; - sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5)); - } - /* Check for valid throttlequeuedepth module parameter */ if (throttlequeuedepth && throttlequeuedepth <= instance->max_scsi_cmds) @@ -4713,12 +4766,6 @@ static int megasas_init_fw(struct megasas_instance *instance) instance->throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; - /* - * Setup tasklet for cmd completion - */ - - tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, - (unsigned long)instance); /* Launch SR-IOV heartbeat timer */ if (instance->requestorId) { @@ -4733,7 +4780,14 @@ static int megasas_init_fw(struct megasas_instance *instance) return 0; +fail_get_pd_list: + instance->instancet->disable_intr(instance); fail_init_adapter: + megasas_destroy_irqs(instance); +fail_setup_irqs: + if (instance->msix_vectors) + pci_disable_msix(instance->pdev); + instance->msix_vectors = 0; fail_ready_state: kfree(instance->ctrl_info); instance->ctrl_info = NULL; @@ -4747,7 +4801,7 @@ fail_ready_state: /** * megasas_release_mfi - Reverses the FW initialization - * @intance: Adapter soft state + * @instance: Adapter soft state */ static void megasas_release_mfi(struct megasas_instance *instance) { @@ -4822,21 +4876,17 @@ megasas_get_seq_num(struct megasas_instance *instance, /* * Copy the data back into callers buffer */ - eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); - eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); - eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); - eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); - eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); + eli->newest_seq_num = el_info->newest_seq_num; + eli->oldest_seq_num = el_info->oldest_seq_num; + eli->clear_seq_num = el_info->clear_seq_num; + eli->shutdown_seq_num = el_info->shutdown_seq_num; + eli->boot_seq_num = el_info->boot_seq_num; } pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return 0; } @@ -4877,8 +4927,8 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, if (instance->aen_cmd) { - prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; - prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale); + prev_aen.word = + le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]); /* * A class whose enum value is smaller is inclusive of all @@ -4990,7 +5040,7 @@ static int megasas_start_aen(struct megasas_instance *instance) class_locale.members.class = MR_EVT_CLASS_DEBUG; return megasas_register_aen(instance, - eli.newest_seq_num + 1, + le32_to_cpu(eli.newest_seq_num) + 1, class_locale.word); } @@ -5001,6 +5051,7 @@ static int megasas_start_aen(struct megasas_instance *instance) static int megasas_io_attach(struct megasas_instance *instance) { struct Scsi_Host *host = instance->host; + u32 error; /* * Export parameters required by SCSI mid-layer @@ -5050,12 +5101,21 @@ static int megasas_io_attach(struct megasas_instance *instance) host->hostt->eh_device_reset_handler = NULL; host->hostt->eh_bus_reset_handler = NULL; } + error = scsi_init_shared_tag_map(host, host->can_queue); + if (error) { + dev_err(&instance->pdev->dev, + "Failed to shared tag from %s %d\n", + __func__, __LINE__); + return -ENODEV; + } /* * Notify the mid-layer about the new controller */ if (scsi_add_host(host, &instance->pdev->dev)) { - printk(KERN_DEBUG "megasas: scsi_add_host failed\n"); + dev_err(&instance->pdev->dev, + "Failed to add host from %s %d\n", + __func__, __LINE__); return -ENODEV; } @@ -5106,7 +5166,7 @@ fail_set_dma_mask: static int megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { - int rval, pos, i, j, cpu; + int rval, pos; struct Scsi_Host *host; struct megasas_instance *instance; u16 control = 0; @@ -5129,16 +5189,6 @@ static int megasas_probe_one(struct pci_dev *pdev, } /* - * Announce PCI information - */ - printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", - pdev->vendor, pdev->device, pdev->subsystem_vendor, - pdev->subsystem_device); - - printk("bus %d:slot %d:func %d\n", - pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); - - /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); @@ -5183,8 +5233,6 @@ static int megasas_probe_one(struct pci_dev *pdev, fusion = instance->ctrl_context; memset(fusion, 0, ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); - INIT_LIST_HEAD(&fusion->cmd_pool); - spin_lock_init(&fusion->mpt_pool_lock); } break; default: /* For all other supported controllers */ @@ -5207,6 +5255,13 @@ static int megasas_probe_one(struct pci_dev *pdev, break; } + instance->system_info_buf = pci_zalloc_consistent(pdev, + sizeof(struct MR_DRV_SYSTEM_INFO), + &instance->system_info_h); + + if (!instance->system_info_buf) + dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n"); + /* Crash dump feature related initialisation*/ instance->drv_buf_index = 0; instance->drv_buf_alloc = 0; @@ -5315,55 +5370,6 @@ static int megasas_probe_one(struct pci_dev *pdev, } } -retry_irq_register: - /* - * Register IRQ - */ - if (instance->msix_vectors) { - cpu = cpumask_first(cpu_online_mask); - for (i = 0; i < instance->msix_vectors; i++) { - instance->irq_context[i].instance = instance; - instance->irq_context[i].MSIxIndex = i; - if (request_irq(instance->msixentry[i].vector, - instance->instancet->service_isr, 0, - "megasas", - &instance->irq_context[i])) { - printk(KERN_DEBUG "megasas: Failed to " - "register IRQ for vector %d.\n", i); - for (j = 0; j < i; j++) { - if (smp_affinity_enable) - irq_set_affinity_hint( - instance->msixentry[j].vector, NULL); - free_irq( - instance->msixentry[j].vector, - &instance->irq_context[j]); - } - /* Retry irq register for IO_APIC */ - instance->msix_vectors = 0; - goto retry_irq_register; - } - if (smp_affinity_enable) { - if (irq_set_affinity_hint(instance->msixentry[i].vector, - get_cpu_mask(cpu))) - dev_err(&instance->pdev->dev, - "Error setting affinity hint " - "for cpu %d\n", cpu); - cpu = cpumask_next(cpu, cpu_online_mask); - } - } - } else { - instance->irq_context[0].instance = instance; - instance->irq_context[0].MSIxIndex = 0; - if (request_irq(pdev->irq, instance->instancet->service_isr, - IRQF_SHARED, "megasas", - &instance->irq_context[0])) { - printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); - goto fail_irq; - } - } - - instance->instancet->enable_intr(instance); - /* * Store instance in PCI softstate */ @@ -5410,17 +5416,8 @@ retry_irq_register: megasas_mgmt_info.max_index--; instance->instancet->disable_intr(instance); - if (instance->msix_vectors) - for (i = 0; i < instance->msix_vectors; i++) { - if (smp_affinity_enable) - irq_set_affinity_hint( - instance->msixentry[i].vector, NULL); - free_irq(instance->msixentry[i].vector, - &instance->irq_context[i]); - } - else - free_irq(instance->pdev->irq, &instance->irq_context[0]); -fail_irq: + megasas_destroy_irqs(instance); + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || @@ -5428,9 +5425,9 @@ fail_irq: megasas_release_fusion(instance); else megasas_release_mfi(instance); - fail_init_mfi: if (instance->msix_vectors) pci_disable_msix(instance->pdev); +fail_init_mfi: fail_alloc_dma_buf: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), @@ -5487,11 +5484,7 @@ static void megasas_flush_cache(struct megasas_instance *instance) dev_err(&instance->pdev->dev, "Command timedout" " from %s\n", __func__); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return; } @@ -5538,11 +5531,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, dev_err(&instance->pdev->dev, "Command timedout" "from %s\n", __func__); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return; } @@ -5558,7 +5547,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host; struct megasas_instance *instance; - int i; instance = pci_get_drvdata(pdev); host = instance->host; @@ -5583,16 +5571,8 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) pci_set_drvdata(instance->pdev, instance); instance->instancet->disable_intr(instance); - if (instance->msix_vectors) - for (i = 0; i < instance->msix_vectors; i++) { - if (smp_affinity_enable) - irq_set_affinity_hint( - instance->msixentry[i].vector, NULL); - free_irq(instance->msixentry[i].vector, - &instance->irq_context[i]); - } - else - free_irq(instance->pdev->irq, &instance->irq_context[0]); + megasas_destroy_irqs(instance); + if (instance->msix_vectors) pci_disable_msix(instance->pdev); @@ -5611,7 +5591,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) static int megasas_resume(struct pci_dev *pdev) { - int rval, i, j, cpu; + int rval; struct Scsi_Host *host; struct megasas_instance *instance; @@ -5681,50 +5661,10 @@ megasas_resume(struct pci_dev *pdev) tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); - /* - * Register IRQ - */ - if (instance->msix_vectors) { - cpu = cpumask_first(cpu_online_mask); - for (i = 0 ; i < instance->msix_vectors; i++) { - instance->irq_context[i].instance = instance; - instance->irq_context[i].MSIxIndex = i; - if (request_irq(instance->msixentry[i].vector, - instance->instancet->service_isr, 0, - "megasas", - &instance->irq_context[i])) { - printk(KERN_DEBUG "megasas: Failed to " - "register IRQ for vector %d.\n", i); - for (j = 0; j < i; j++) { - if (smp_affinity_enable) - irq_set_affinity_hint( - instance->msixentry[j].vector, NULL); - free_irq( - instance->msixentry[j].vector, - &instance->irq_context[j]); - } - goto fail_irq; - } - - if (smp_affinity_enable) { - if (irq_set_affinity_hint(instance->msixentry[i].vector, - get_cpu_mask(cpu))) - dev_err(&instance->pdev->dev, "Error " - "setting affinity hint for cpu " - "%d\n", cpu); - cpu = cpumask_next(cpu, cpu_online_mask); - } - } - } else { - instance->irq_context[0].instance = instance; - instance->irq_context[0].MSIxIndex = 0; - if (request_irq(pdev->irq, instance->instancet->service_isr, - IRQF_SHARED, "megasas", - &instance->irq_context[0])) { - printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); - goto fail_irq; - } - } + if (instance->msix_vectors ? + megasas_setup_irqs_msix(instance, 0) : + megasas_setup_irqs_ioapic(instance)) + goto fail_init_mfi; /* Re-launch SR-IOV heartbeat timer */ if (instance->requestorId) { @@ -5733,8 +5673,10 @@ megasas_resume(struct pci_dev *pdev) &instance->sriov_heartbeat_timer, megasas_sriov_heartbeat_handler, MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); - else + else { instance->skip_heartbeat_timer_del = 1; + goto fail_init_mfi; + } } instance->instancet->enable_intr(instance); @@ -5748,7 +5690,6 @@ megasas_resume(struct pci_dev *pdev) return 0; -fail_irq: fail_init_mfi: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), @@ -5829,16 +5770,8 @@ static void megasas_detach_one(struct pci_dev *pdev) instance->instancet->disable_intr(instance); - if (instance->msix_vectors) - for (i = 0; i < instance->msix_vectors; i++) { - if (smp_affinity_enable) - irq_set_affinity_hint( - instance->msixentry[i].vector, NULL); - free_irq(instance->msixentry[i].vector, - &instance->irq_context[i]); - } - else - free_irq(instance->pdev->irq, &instance->irq_context[0]); + megasas_destroy_irqs(instance); + if (instance->msix_vectors) pci_disable_msix(instance->pdev); @@ -5899,6 +5832,10 @@ static void megasas_detach_one(struct pci_dev *pdev) pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE, instance->crash_dump_buf, instance->crash_dump_h); + if (instance->system_info_buf) + pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO), + instance->system_info_buf, instance->system_info_h); + scsi_host_put(host); pci_disable_device(pdev); @@ -5912,23 +5849,14 @@ static void megasas_detach_one(struct pci_dev *pdev) */ static void megasas_shutdown(struct pci_dev *pdev) { - int i; struct megasas_instance *instance = pci_get_drvdata(pdev); instance->unload = 1; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); instance->instancet->disable_intr(instance); - if (instance->msix_vectors) - for (i = 0; i < instance->msix_vectors; i++) { - if (smp_affinity_enable) - irq_set_affinity_hint( - instance->msixentry[i].vector, NULL); - free_irq(instance->msixentry[i].vector, - &instance->irq_context[i]); - } - else - free_irq(instance->pdev->irq, &instance->irq_context[0]); + megasas_destroy_irqs(instance); + if (instance->msix_vectors) pci_disable_msix(instance->pdev); } @@ -6211,11 +6139,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, kbuff_arr[i] = NULL; } - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return error; } @@ -6502,6 +6426,15 @@ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); static ssize_t +megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) +{ + return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", + MEGASAS_RELDATE); +} + +static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); + +static ssize_t megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_poll_for_event); @@ -6841,6 +6774,11 @@ static int __init megasas_init(void) goto err_dcf_attr_ver; rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_release_date); + if (rval) + goto err_dcf_rel_date; + + rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); if (rval) goto err_dcf_support_poll_for_event; @@ -6863,6 +6801,9 @@ err_dcf_dbg_lvl: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); err_dcf_support_poll_for_event: + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_release_date); +err_dcf_rel_date: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); err_dcf_attr_ver: pci_unregister_driver(&megasas_pci_driver); @@ -6882,6 +6823,8 @@ static void __exit megasas_exit(void) &driver_attr_support_poll_for_event); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_release_date); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); pci_unregister_driver(&megasas_pci_driver); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 4f72287860ee..be57b18675a4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -66,7 +66,15 @@ MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding " #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) #define MR_LD_STATE_OPTIMAL 3 + +#ifdef FALSE +#undef FALSE +#endif #define FALSE 0 + +#ifdef TRUE +#undef TRUE +#endif #define TRUE 1 #define SPAN_DEBUG 0 @@ -142,7 +150,7 @@ u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map) return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); } -u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) +__le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) { return map->raidMap.devHndlInfo[pd].curDevHdl; } @@ -735,7 +743,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, u8 retval = TRUE; u8 do_invader = 0; u64 *pdBlock = &io_info->pdBlock; - u16 *pDevHandle = &io_info->devHandle; + __le16 *pDevHandle = &io_info->devHandle; u32 logArm, rowMod, armQ, arm; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || @@ -769,7 +777,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, if (pd != MR_PD_INVALID) *pDevHandle = MR_PdDevHandleGet(pd, map); else { - *pDevHandle = MR_PD_INVALID; + *pDevHandle = cpu_to_le16(MR_PD_INVALID); if ((raid->level >= 5) && (!do_invader || (do_invader && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) @@ -817,7 +825,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, u8 retval = TRUE; u8 do_invader = 0; u64 *pdBlock = &io_info->pdBlock; - u16 *pDevHandle = &io_info->devHandle; + __le16 *pDevHandle = &io_info->devHandle; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) @@ -864,7 +872,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, /* Get dev handle from Pd. */ *pDevHandle = MR_PdDevHandleGet(pd, map); else { - *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ + /* set dev handle as invalid. */ + *pDevHandle = cpu_to_le16(MR_PD_INVALID); if ((raid->level >= 5) && (!do_invader || (do_invader && (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) @@ -1109,7 +1118,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, ref_in_start_stripe, io_info, pRAID_Context, map); /* If IO on an invalid Pd, then FP is not possible.*/ - if (io_info->devHandle == MR_PD_INVALID) + if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID)) io_info->fpOkForIo = FALSE; return retval; } else if (isRead) { @@ -1341,11 +1350,11 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance, return io_info->pd_after_lb; } -u16 get_updated_dev_handle(struct megasas_instance *instance, +__le16 get_updated_dev_handle(struct megasas_instance *instance, struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info) { u8 arm_pd; - u16 devHandle; + __le16 devHandle; struct fusion_context *fusion; struct MR_DRV_RAID_MAP_ALL *drv_map; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 5a0800d19970..46a0f8f4f677 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -53,10 +53,12 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_dbg.h> +#include <linux/dmi.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" + extern void megasas_free_cmds(struct megasas_instance *instance); extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance *instance); @@ -156,28 +158,15 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs) * megasas_get_cmd_fusion - Get a command from the free pool * @instance: Adapter soft state * - * Returns a free command from the pool + * Returns a blk_tag indexed mpt frame */ -struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance - *instance) +inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance + *instance, u32 blk_tag) { - unsigned long flags; - struct fusion_context *fusion = - (struct fusion_context *)instance->ctrl_context; - struct megasas_cmd_fusion *cmd = NULL; - - spin_lock_irqsave(&fusion->mpt_pool_lock, flags); - - if (!list_empty(&fusion->cmd_pool)) { - cmd = list_entry((&fusion->cmd_pool)->next, - struct megasas_cmd_fusion, list); - list_del_init(&cmd->list); - } else { - printk(KERN_ERR "megasas: Command pool (fusion) empty!\n"); - } + struct fusion_context *fusion; - spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); - return cmd; + fusion = instance->ctrl_context; + return fusion->cmd_list[blk_tag]; } /** @@ -188,47 +177,35 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance inline void megasas_return_cmd_fusion(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd) { - unsigned long flags; - struct fusion_context *fusion = - (struct fusion_context *)instance->ctrl_context; - - spin_lock_irqsave(&fusion->mpt_pool_lock, flags); - cmd->scmd = NULL; - cmd->sync_cmd_idx = (u32)ULONG_MAX; memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); - list_add(&cmd->list, (&fusion->cmd_pool)->next); - - spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); } /** - * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool - * @instance: Adapter soft state - * @cmd_mfi: MFI Command packet to be returned to free command pool - * @cmd_mpt: MPT Command packet to be returned to free command pool + * megasas_fire_cmd_fusion - Sends command to the FW */ -inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, - struct megasas_cmd *cmd_mfi, - struct megasas_cmd_fusion *cmd_fusion) +static void +megasas_fire_cmd_fusion(struct megasas_instance *instance, + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) { +#if defined(writeq) && defined(CONFIG_64BIT) + u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | + le32_to_cpu(req_desc->u.low)); + + writeq(req_data, &instance->reg_set->inbound_low_queue_port); +#else unsigned long flags; - /* - * TO DO: optimize this code and use only one lock instead of two - * locks being used currently- mpt_pool_lock is acquired - * inside mfi_pool_lock - */ - spin_lock_irqsave(&instance->mfi_pool_lock, flags); - megasas_return_cmd_fusion(instance, cmd_fusion); - if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED) - dev_err(&instance->pdev->dev, "Possible bug from %s %d\n", - __func__, __LINE__); - atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED); - __megasas_return_cmd(instance, cmd_mfi); - spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); + spin_lock_irqsave(&instance->hba_lock, flags); + writel(le32_to_cpu(req_desc->u.low), + &instance->reg_set->inbound_low_queue_port); + writel(le32_to_cpu(req_desc->u.high), + &instance->reg_set->inbound_high_queue_port); + spin_unlock_irqrestore(&instance->hba_lock, flags); +#endif } + /** * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool * @instance: Adapter soft state @@ -326,7 +303,6 @@ megasas_free_cmds_fusion(struct megasas_instance *instance) kfree(fusion->cmd_list); fusion->cmd_list = NULL; - INIT_LIST_HEAD(&fusion->cmd_pool); } /** @@ -464,7 +440,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) reply_desc = fusion->reply_frames_desc; for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) - reply_desc->Words = ULLONG_MAX; + reply_desc->Words = cpu_to_le64(ULLONG_MAX); io_frames_sz = fusion->io_frames_alloc_sz; @@ -535,7 +511,9 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); cmd->index = i + 1; cmd->scmd = NULL; - cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */ + cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ? + (i - instance->max_scsi_cmds) : + (u32)ULONG_MAX; /* Set to Invalid */ cmd->instance = instance; cmd->io_request = (struct MPI2_RAID_SCSI_IO_REQUEST *) @@ -543,8 +521,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; - - list_add_tail(&cmd->list, &fusion->cmd_pool); } /* @@ -605,14 +581,11 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, msleep(20); } - if (frame_hdr->cmd_status == 0xff) { - if (fusion) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); + if (frame_hdr->cmd_status == 0xff) return -ETIME; - } - return 0; + return (frame_hdr->cmd_status == MFI_STAT_OK) ? + 0 : 1; } /** @@ -633,6 +606,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; int i; struct megasas_header *frame_hdr; + const char *sys_info; fusion = instance->ctrl_context; @@ -673,7 +647,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = 0xFF; - frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); + frame_hdr->flags = cpu_to_le16( + le16_to_cpu(frame_hdr->flags) | + MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; @@ -695,6 +671,16 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) /* Convert capability to LE32 */ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); + sys_info = dmi_get_system_info(DMI_PRODUCT_UUID); + if (instance->system_info_buf && sys_info) { + memcpy(instance->system_info_buf->systemId, sys_info, + strlen(sys_info) > 64 ? 64 : strlen(sys_info)); + instance->system_info_buf->systemIdLength = + strlen(sys_info) > 64 ? 64 : strlen(sys_info); + init_frame->system_info_lo = instance->system_info_h; + init_frame->system_info_hi = 0; + } + init_frame->queue_info_new_phys_addr_hi = cpu_to_le32(upper_32_bits(ioc_init_handle)); init_frame->queue_info_new_phys_addr_lo = @@ -719,8 +705,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) break; } - instance->instancet->fire_cmd(instance, req_desc.u.low, - req_desc.u.high, instance->reg_set); + megasas_fire_cmd_fusion(instance, &req_desc); wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); @@ -820,11 +805,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance) else ret = megasas_issue_polled(instance, cmd); - if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) - megasas_return_mfi_mpt_pthr(instance, cmd, - cmd->mpt_pthr_cmd_blocked); - else - megasas_return_cmd(instance, cmd); + megasas_return_cmd(instance, cmd); return ret; } @@ -1061,6 +1042,15 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) fusion->last_reply_idx[i] = 0; /* + * For fusion adapters, 3 commands for IOCTL and 5 commands + * for driver's internal DCMDs. + */ + instance->max_scsi_cmds = instance->max_fw_cmds - + (MEGASAS_FUSION_INTERNAL_CMDS + + MEGASAS_FUSION_IOCTL_CMDS); + sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); + + /* * Allocate memory for descriptors * Create a pool of commands */ @@ -1131,34 +1121,6 @@ fail_alloc_mfi_cmds: } /** - * megasas_fire_cmd_fusion - Sends command to the FW - * @frame_phys_addr : Physical address of cmd - * @frame_count : Number of frames for the command - * @regs : MFI register set - */ -void -megasas_fire_cmd_fusion(struct megasas_instance *instance, - dma_addr_t req_desc_lo, - u32 req_desc_hi, - struct megasas_register_set __iomem *regs) -{ -#if defined(writeq) && defined(CONFIG_64BIT) - u64 req_data = (((u64)le32_to_cpu(req_desc_hi) << 32) | - le32_to_cpu(req_desc_lo)); - - writeq(req_data, &(regs)->inbound_low_queue_port); -#else - unsigned long flags; - - spin_lock_irqsave(&instance->hba_lock, flags); - - writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port); - writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port); - spin_unlock_irqrestore(&instance->hba_lock, flags); -#endif -} - -/** * map_cmd_status - Maps FW cmd status to OS cmd status * @cmd : Pointer to cmd * @status : status of cmd returned by FW @@ -1497,7 +1459,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, struct MR_DRV_RAID_MAP_ALL *local_map_ptr; u8 *raidLUN; - device_id = MEGASAS_DEV_INDEX(instance, scp); + device_id = MEGASAS_DEV_INDEX(scp); fusion = instance->ctrl_context; @@ -1621,6 +1583,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->pd_r1_lb = io_info.pd_after_lb; } else scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; + + if ((raidLUN[0] == 1) && + (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 2)) { + instance->dev_handle = !(instance->dev_handle); + io_info.devHandle = + local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle]; + } + cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle; /* populate the LUN field */ @@ -1650,121 +1620,68 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, } /** - * megasas_build_dcdb_fusion - Prepares IOs to devices + * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * - * Prepares the io_request frame for non-io cmds + * Prepares the io_request frame for non-rw io cmds for vd. */ -static void -megasas_build_dcdb_fusion(struct megasas_instance *instance, - struct scsi_cmnd *scmd, - struct megasas_cmd_fusion *cmd) +static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, + struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd) { u32 device_id; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; u16 pd_index = 0; - u16 os_timeout_value; - u16 timeout_limit; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; struct fusion_context *fusion = instance->ctrl_context; u8 span, physArm; - u16 devHandle; + __le16 devHandle; u32 ld, arRef, pd; struct MR_LD_RAID *raid; struct RAID_CONTEXT *pRAID_Context; + u8 fp_possible = 1; io_request = cmd->io_request; - device_id = MEGASAS_DEV_INDEX(instance, scmd); - pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) - +scmd->device->id; + device_id = MEGASAS_DEV_INDEX(scmd); + pd_index = MEGASAS_PD_INDEX(scmd); local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; - io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + /* get RAID_Context pointer */ + pRAID_Context = &io_request->RaidContext; + /* Check with FW team */ + pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); + pRAID_Context->regLockRowLBA = 0; + pRAID_Context->regLockLength = 0; - if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && - instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { - if (fusion->fast_path_io) - io_request->DevHandle = - local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; - io_request->RaidContext.RAIDFlags = - MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD - << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; - cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; - cmd->request_desc->SCSIIO.MSIxIndex = - instance->msix_vectors ? - raw_smp_processor_id() % - instance->msix_vectors : - 0; - os_timeout_value = scmd->request->timeout / HZ; - - if (instance->secure_jbod_support && - (megasas_cmd_type(scmd) == NON_READ_WRITE_SYSPDIO)) { - /* system pd firmware path */ - io_request->Function = - MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; - cmd->request_desc->SCSIIO.RequestFlags = - (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << - MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - io_request->RaidContext.timeoutValue = - cpu_to_le16(os_timeout_value); - } else { - /* system pd Fast Path */ - io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; - io_request->RaidContext.regLockFlags = 0; - io_request->RaidContext.regLockRowLBA = 0; - io_request->RaidContext.regLockLength = 0; - timeout_limit = (scmd->device->type == TYPE_DISK) ? - 255 : 0xFFFF; - io_request->RaidContext.timeoutValue = - cpu_to_le16((os_timeout_value > timeout_limit) ? - timeout_limit : os_timeout_value); - if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) - io_request->IoFlags |= - cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); - - cmd->request_desc->SCSIIO.RequestFlags = - (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << - MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - } - } else { - if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) - goto NonFastPath; - - /* - * For older firmware, Driver should not access ldTgtIdToLd - * beyond index 127 and for Extended VD firmware, ldTgtIdToLd - * should not go beyond 255. - */ - - if ((!fusion->fast_path_io) || - (device_id >= instance->fw_supported_vd_count)) - goto NonFastPath; + if (fusion->fast_path_io && ( + device_id < instance->fw_supported_vd_count)) { ld = MR_TargetIdToLdGet(device_id, local_map_ptr); - if (ld >= instance->fw_supported_vd_count) - goto NonFastPath; + fp_possible = 0; raid = MR_LdRaidGet(ld, local_map_ptr); - - /* check if this LD is FP capable */ if (!(raid->capability.fpNonRWCapable)) - /* not FP capable, send as non-FP */ - goto NonFastPath; + fp_possible = 0; + } else + fp_possible = 0; - /* get RAID_Context pointer */ - pRAID_Context = &io_request->RaidContext; + if (!fp_possible) { + io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; + io_request->DevHandle = cpu_to_le16(device_id); + io_request->LUN[1] = scmd->device->lun; + pRAID_Context->timeoutValue = + cpu_to_le16 (scmd->request->timeout / HZ); + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + } else { /* set RAID context values */ - pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; - pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd); - pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); - pRAID_Context->regLockRowLBA = 0; - pRAID_Context->regLockLength = 0; - pRAID_Context->configSeqNum = raid->seqNum; + pRAID_Context->configSeqNum = raid->seqNum; + pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; + pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd); /* get the DevHandle for the PD (since this is fpNonRWCapable, this is a single disk RAID0) */ @@ -1776,7 +1693,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, /* build request descriptor */ cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << - MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); cmd->request_desc->SCSIIO.DevHandle = devHandle; /* populate the LUN field */ @@ -1785,18 +1702,87 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, /* build the raidScsiIO structure */ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; io_request->DevHandle = devHandle; + } +} - return; +/** + * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd + * @instance: Adapter soft state + * @scp: SCSI command + * @cmd: Command to be prepared + * @fp_possible: parameter to detect fast path or firmware path io. + * + * Prepares the io_request frame for rw/non-rw io cmds for syspds + */ +static void +megasas_build_syspd_fusion(struct megasas_instance *instance, + struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible) +{ + u32 device_id; + struct MPI2_RAID_SCSI_IO_REQUEST *io_request; + u16 pd_index = 0; + u16 os_timeout_value; + u16 timeout_limit; + struct MR_DRV_RAID_MAP_ALL *local_map_ptr; + struct RAID_CONTEXT *pRAID_Context; + struct fusion_context *fusion = instance->ctrl_context; + + device_id = MEGASAS_DEV_INDEX(scmd); + pd_index = MEGASAS_PD_INDEX(scmd); + os_timeout_value = scmd->request->timeout / HZ; -NonFastPath: + io_request = cmd->io_request; + /* get RAID_Context pointer */ + pRAID_Context = &io_request->RaidContext; + io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + io_request->LUN[1] = scmd->device->lun; + pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD + << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; + + pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); + pRAID_Context->configSeqNum = 0; + local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; + io_request->DevHandle = + local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; + + cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; + cmd->request_desc->SCSIIO.MSIxIndex = + instance->msix_vectors ? + (raw_smp_processor_id() % instance->msix_vectors) : 0; + + + if (!fp_possible) { + /* system pd firmware path */ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; - io_request->DevHandle = cpu_to_le16(device_id); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << - MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value); + } else { + /* system pd Fast Path */ + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + pRAID_Context->regLockFlags = 0; + pRAID_Context->regLockRowLBA = 0; + pRAID_Context->regLockLength = 0; + timeout_limit = (scmd->device->type == TYPE_DISK) ? + 255 : 0xFFFF; + pRAID_Context->timeoutValue = + cpu_to_le16((os_timeout_value > timeout_limit) ? + timeout_limit : os_timeout_value); + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { + cmd->request_desc->SCSIIO.RequestFlags |= + (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + pRAID_Context->Type = MPI2_TYPE_CUDA; + pRAID_Context->nseg = 0x1; + io_request->IoFlags |= + cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); + } + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } - io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); - int_to_scsilun(scmd->device->lun, (struct scsi_lun *)io_request->LUN); } /** @@ -1813,11 +1799,10 @@ megasas_build_io_fusion(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd_fusion *cmd) { - u32 device_id, sge_count; + u32 sge_count; + u8 cmd_type; struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; - device_id = MEGASAS_DEV_INDEX(instance, scp); - /* Zero out some fields so they don't get reused */ memset(io_request->LUN, 0x0, 8); io_request->CDB.EEDP32.PrimaryReferenceTag = 0; @@ -1837,10 +1822,24 @@ megasas_build_io_fusion(struct megasas_instance *instance, */ io_request->IoFlags = cpu_to_le16(scp->cmd_len); - if (megasas_cmd_type(scp) == READ_WRITE_LDIO) + switch (cmd_type = megasas_cmd_type(scp)) { + case READ_WRITE_LDIO: megasas_build_ldio_fusion(instance, scp, cmd); - else - megasas_build_dcdb_fusion(instance, scp, cmd); + break; + case NON_READ_WRITE_LDIO: + megasas_build_ld_nonrw_fusion(instance, scp, cmd); + break; + case READ_WRITE_SYSPDIO: + case NON_READ_WRITE_SYSPDIO: + if (instance->secure_jbod_support && + (cmd_type == NON_READ_WRITE_SYSPDIO)) + megasas_build_syspd_fusion(instance, scp, cmd, 0); + else + megasas_build_syspd_fusion(instance, scp, cmd, 1); + break; + default: + break; + } /* * Construct SGL @@ -1915,9 +1914,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, fusion = instance->ctrl_context; - cmd = megasas_get_cmd_fusion(instance); - if (!cmd) - return SCSI_MLQUEUE_HOST_BUSY; + cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); index = cmd->index; @@ -1948,9 +1945,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, */ atomic_inc(&instance->fw_outstanding); - instance->instancet->fire_cmd(instance, - req_desc->u.low, req_desc->u.high, - instance->reg_set); + megasas_fire_cmd_fusion(instance, req_desc); return 0; } @@ -1975,6 +1970,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) union desc_value d_val; struct LD_LOAD_BALANCE_INFO *lbinfo; int threshold_reply_count = 0; + struct scsi_cmnd *scmd_local = NULL; fusion = instance->ctrl_context; @@ -1998,7 +1994,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) num_completed = 0; - while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { + while (d_val.u.low != cpu_to_le32(UINT_MAX) && + d_val.u.high != cpu_to_le32(UINT_MAX)) { smid = le16_to_cpu(reply_desc->SMID); cmd_fusion = fusion->cmd_list[smid - 1]; @@ -2010,14 +2007,14 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) if (cmd_fusion->scmd) cmd_fusion->scmd->SCp.ptr = NULL; + scmd_local = cmd_fusion->scmd; status = scsi_io_req->RaidContext.status; extStatus = scsi_io_req->RaidContext.exStatus; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ /* Update load balancing info */ - device_id = MEGASAS_DEV_INDEX(instance, - cmd_fusion->scmd); + device_id = MEGASAS_DEV_INDEX(scmd_local); lbinfo = &fusion->load_balance_info[device_id]; if (cmd_fusion->scmd->SCp.Status & MEGASAS_LOAD_BALANCE_FLAG) { @@ -2035,29 +2032,25 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ /* Map the FW Cmd Status */ map_cmd_status(cmd_fusion, status, extStatus); - scsi_dma_unmap(cmd_fusion->scmd); - cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); scsi_io_req->RaidContext.status = 0; scsi_io_req->RaidContext.exStatus = 0; megasas_return_cmd_fusion(instance, cmd_fusion); + scsi_dma_unmap(scmd_local); + scmd_local->scsi_done(scmd_local); atomic_dec(&instance->fw_outstanding); break; case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; - if (!cmd_mfi->mpt_pthr_cmd_blocked) { - if (megasas_dbg_lvl == 5) - dev_info(&instance->pdev->dev, - "freeing mfi/mpt pass-through " - "from %s %d\n", - __func__, __LINE__); - megasas_return_mfi_mpt_pthr(instance, cmd_mfi, - cmd_fusion); - } - - megasas_complete_cmd(instance, cmd_mfi, DID_OK); - cmd_fusion->flags = 0; + /* Poll mode. Dummy free. + * In case of Interrupt mode, caller has reverse check. + */ + if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { + cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; + megasas_return_cmd(instance, cmd_mfi); + } else + megasas_complete_cmd(instance, cmd_mfi, DID_OK); break; } @@ -2066,7 +2059,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) fusion->reply_q_depth) fusion->last_reply_idx[MSIxIndex] = 0; - desc->Words = ULLONG_MAX; + desc->Words = cpu_to_le64(ULLONG_MAX); num_completed++; threshold_reply_count++; @@ -2217,27 +2210,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd; struct fusion_context *fusion; struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; - u32 opcode; - cmd = megasas_get_cmd_fusion(instance); - if (!cmd) - return 1; + fusion = instance->ctrl_context; + + cmd = megasas_get_cmd_fusion(instance, + instance->max_scsi_cmds + mfi_cmd->index); /* Save the smid. To be used for returning the cmd */ mfi_cmd->context.smid = cmd->index; - cmd->sync_cmd_idx = mfi_cmd->index; - - /* Set this only for Blocked commands */ - opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode); - if ((opcode == MR_DCMD_LD_MAP_GET_INFO) - && (mfi_cmd->frame->dcmd.mbox.b[1] == 1)) - mfi_cmd->is_wait_event = 1; - - if (opcode == MR_DCMD_CTRL_EVENT_WAIT) - mfi_cmd->is_wait_event = 1; - - if (mfi_cmd->is_wait_event) - mfi_cmd->mpt_pthr_cmd_blocked = cmd; /* * For cmds where the flag is set, store the flag and check @@ -2246,9 +2226,8 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, */ if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) - cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + mfi_cmd->flags |= DRV_DCMD_POLLED_MODE; - fusion = instance->ctrl_context; io_req = cmd->io_request; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || @@ -2327,14 +2306,12 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance, printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); return; } - atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED); - instance->instancet->fire_cmd(instance, req_desc->u.low, - req_desc->u.high, instance->reg_set); + megasas_fire_cmd_fusion(instance, req_desc); } /** * megasas_release_fusion - Reverses the FW initialization - * @intance: Adapter soft state + * @instance: Adapter soft state */ void megasas_release_fusion(struct megasas_instance *instance) @@ -2508,7 +2485,42 @@ void megasas_reset_reply_desc(struct megasas_instance *instance) fusion->last_reply_idx[i] = 0; reply_desc = fusion->reply_frames_desc; for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++) - reply_desc->Words = ULLONG_MAX; + reply_desc->Words = cpu_to_le64(ULLONG_MAX); +} + +/* + * megasas_refire_mgmt_cmd : Re-fire management commands + * @instance: Controller's soft instance +*/ +void megasas_refire_mgmt_cmd(struct megasas_instance *instance) +{ + int j; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion; + struct megasas_cmd *cmd_mfi; + union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + u16 smid; + + fusion = instance->ctrl_context; + + /* Re-fire management commands. + * Do not traverse complet MPT frame pool. Start from max_scsi_cmds. + */ + for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) { + cmd_fusion = fusion->cmd_list[j]; + cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; + smid = le16_to_cpu(cmd_mfi->context.smid); + + if (!smid) + continue; + req_desc = megasas_get_request_descriptor + (instance, smid - 1); + if (req_desc && (cmd_mfi->frame->dcmd.opcode != + cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO))) + megasas_fire_cmd_fusion(instance, req_desc); + else + megasas_return_cmd(instance, cmd_mfi); + } } /* Check for a second path that is currently UP */ @@ -2538,14 +2550,13 @@ out: /* Core fusion reset function */ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) { - int retval = SUCCESS, i, j, retry = 0, convert = 0; + int retval = SUCCESS, i, retry = 0, convert = 0; struct megasas_instance *instance; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion; - struct megasas_cmd *cmd_mfi; - union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u32 host_diag, abs_state, status_reg, reset_adapter; u32 io_timeout_in_crash_mode = 0; + struct scsi_cmnd *scmd_local = NULL; instance = (struct megasas_instance *)shost->hostdata; fusion = instance->ctrl_context; @@ -2613,15 +2624,16 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) iotimeout = 0; /* Now return commands back to the OS */ - for (i = 0 ; i < instance->max_fw_cmds; i++) { + for (i = 0 ; i < instance->max_scsi_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; + scmd_local = cmd_fusion->scmd; if (cmd_fusion->scmd) { - scsi_dma_unmap(cmd_fusion->scmd); - cmd_fusion->scmd->result = + scmd_local->result = megasas_check_mpio_paths(instance, - cmd_fusion->scmd); - cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); + scmd_local); megasas_return_cmd_fusion(instance, cmd_fusion); + scsi_dma_unmap(scmd_local); + scmd_local->scsi_done(scmd_local); atomic_dec(&instance->fw_outstanding); } } @@ -2790,44 +2802,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) continue; } - /* Re-fire management commands */ - for (j = 0 ; j < instance->max_fw_cmds; j++) { - cmd_fusion = fusion->cmd_list[j]; - if (cmd_fusion->sync_cmd_idx != - (u32)ULONG_MAX) { - cmd_mfi = - instance-> - cmd_list[cmd_fusion->sync_cmd_idx]; - if (cmd_mfi->frame->dcmd.opcode == - cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) { - megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion); - } else { - req_desc = - megasas_get_request_descriptor( - instance, - cmd_mfi->context.smid - -1); - if (!req_desc) { - printk(KERN_WARNING - "req_desc NULL" - " for scsi%d\n", - instance->host->host_no); - /* Return leaked MPT - frame */ - megasas_return_cmd_fusion(instance, cmd_fusion); - } else { - instance->instancet-> - fire_cmd(instance, - req_desc-> - u.low, - req_desc-> - u.high, - instance-> - reg_set); - } - } - } - } + megasas_refire_mgmt_cmd(instance); if (megasas_get_ctrl_info(instance)) { dev_info(&instance->pdev->dev, @@ -2978,7 +2953,6 @@ void megasas_fusion_ocr_wq(struct work_struct *work) } struct megasas_instance_template megasas_instance_template_fusion = { - .fire_cmd = megasas_fire_cmd_fusion, .enable_intr = megasas_enable_intr_fusion, .disable_intr = megasas_disable_intr_fusion, .clear_intr = megasas_clear_intr_fusion, diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 56e6db2d5874..ced6dc0cf8e8 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -104,18 +104,18 @@ struct RAID_CONTEXT { u8 nseg:4; #endif u8 resvd0; - u16 timeoutValue; + __le16 timeoutValue; u8 regLockFlags; u8 resvd1; - u16 VirtualDiskTgtId; - u64 regLockRowLBA; - u32 regLockLength; - u16 nextLMId; + __le16 VirtualDiskTgtId; + __le64 regLockRowLBA; + __le32 regLockLength; + __le16 nextLMId; u8 exStatus; u8 status; u8 RAIDFlags; u8 numSGE; - u16 configSeqNum; + __le16 configSeqNum; u8 spanArm; u8 resvd2[3]; }; @@ -182,61 +182,61 @@ enum REGION_TYPE { #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) struct MPI25_IEEE_SGE_CHAIN64 { - u64 Address; - u32 Length; - u16 Reserved1; + __le64 Address; + __le32 Length; + __le16 Reserved1; u8 NextChainOffset; u8 Flags; }; struct MPI2_SGE_SIMPLE_UNION { - u32 FlagsLength; + __le32 FlagsLength; union { - u32 Address32; - u64 Address64; + __le32 Address32; + __le64 Address64; } u; }; struct MPI2_SCSI_IO_CDB_EEDP32 { u8 CDB[20]; /* 0x00 */ - u32 PrimaryReferenceTag; /* 0x14 */ - u16 PrimaryApplicationTag; /* 0x18 */ - u16 PrimaryApplicationTagMask; /* 0x1A */ - u32 TransferLength; /* 0x1C */ + __be32 PrimaryReferenceTag; /* 0x14 */ + __be16 PrimaryApplicationTag; /* 0x18 */ + __be16 PrimaryApplicationTagMask; /* 0x1A */ + __le32 TransferLength; /* 0x1C */ }; struct MPI2_SGE_CHAIN_UNION { - u16 Length; + __le16 Length; u8 NextChainOffset; u8 Flags; union { - u32 Address32; - u64 Address64; + __le32 Address32; + __le64 Address64; } u; }; struct MPI2_IEEE_SGE_SIMPLE32 { - u32 Address; - u32 FlagsLength; + __le32 Address; + __le32 FlagsLength; }; struct MPI2_IEEE_SGE_CHAIN32 { - u32 Address; - u32 FlagsLength; + __le32 Address; + __le32 FlagsLength; }; struct MPI2_IEEE_SGE_SIMPLE64 { - u64 Address; - u32 Length; - u16 Reserved1; + __le64 Address; + __le32 Length; + __le16 Reserved1; u8 Reserved2; u8 Flags; }; struct MPI2_IEEE_SGE_CHAIN64 { - u64 Address; - u32 Length; - u16 Reserved1; + __le64 Address; + __le32 Length; + __le16 Reserved1; u8 Reserved2; u8 Flags; }; @@ -269,34 +269,34 @@ union MPI2_SCSI_IO_CDB_UNION { * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST */ struct MPI2_RAID_SCSI_IO_REQUEST { - u16 DevHandle; /* 0x00 */ + __le16 DevHandle; /* 0x00 */ u8 ChainOffset; /* 0x02 */ u8 Function; /* 0x03 */ - u16 Reserved1; /* 0x04 */ + __le16 Reserved1; /* 0x04 */ u8 Reserved2; /* 0x06 */ u8 MsgFlags; /* 0x07 */ u8 VP_ID; /* 0x08 */ u8 VF_ID; /* 0x09 */ - u16 Reserved3; /* 0x0A */ - u32 SenseBufferLowAddress; /* 0x0C */ - u16 SGLFlags; /* 0x10 */ + __le16 Reserved3; /* 0x0A */ + __le32 SenseBufferLowAddress; /* 0x0C */ + __le16 SGLFlags; /* 0x10 */ u8 SenseBufferLength; /* 0x12 */ u8 Reserved4; /* 0x13 */ u8 SGLOffset0; /* 0x14 */ u8 SGLOffset1; /* 0x15 */ u8 SGLOffset2; /* 0x16 */ u8 SGLOffset3; /* 0x17 */ - u32 SkipCount; /* 0x18 */ - u32 DataLength; /* 0x1C */ - u32 BidirectionalDataLength; /* 0x20 */ - u16 IoFlags; /* 0x24 */ - u16 EEDPFlags; /* 0x26 */ - u32 EEDPBlockSize; /* 0x28 */ - u32 SecondaryReferenceTag; /* 0x2C */ - u16 SecondaryApplicationTag; /* 0x30 */ - u16 ApplicationTagTranslationMask; /* 0x32 */ + __le32 SkipCount; /* 0x18 */ + __le32 DataLength; /* 0x1C */ + __le32 BidirectionalDataLength; /* 0x20 */ + __le16 IoFlags; /* 0x24 */ + __le16 EEDPFlags; /* 0x26 */ + __le32 EEDPBlockSize; /* 0x28 */ + __le32 SecondaryReferenceTag; /* 0x2C */ + __le16 SecondaryApplicationTag; /* 0x30 */ + __le16 ApplicationTagTranslationMask; /* 0x32 */ u8 LUN[8]; /* 0x34 */ - u32 Control; /* 0x3C */ + __le32 Control; /* 0x3C */ union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ struct RAID_CONTEXT RaidContext; /* 0x60 */ union MPI2_SGE_IO_UNION SGL; /* 0x80 */ @@ -315,45 +315,45 @@ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { struct MPI2_DEFAULT_REQUEST_DESCRIPTOR { u8 RequestFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 SMID; /* 0x02 */ - u16 LMID; /* 0x04 */ - u16 DescriptorTypeDependent; /* 0x06 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 DescriptorTypeDependent; /* 0x06 */ }; /* High Priority Request Descriptor */ struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { u8 RequestFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 SMID; /* 0x02 */ - u16 LMID; /* 0x04 */ - u16 Reserved1; /* 0x06 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 Reserved1; /* 0x06 */ }; /* SCSI IO Request Descriptor */ struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR { u8 RequestFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 SMID; /* 0x02 */ - u16 LMID; /* 0x04 */ - u16 DevHandle; /* 0x06 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 DevHandle; /* 0x06 */ }; /* SCSI Target Request Descriptor */ struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { u8 RequestFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 SMID; /* 0x02 */ - u16 LMID; /* 0x04 */ - u16 IoIndex; /* 0x06 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 IoIndex; /* 0x06 */ }; /* RAID Accelerator Request Descriptor */ struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { u8 RequestFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 SMID; /* 0x02 */ - u16 LMID; /* 0x04 */ - u16 Reserved; /* 0x06 */ + __le16 SMID; /* 0x02 */ + __le16 LMID; /* 0x04 */ + __le16 Reserved; /* 0x06 */ }; /* union of Request Descriptors */ @@ -366,10 +366,10 @@ union MEGASAS_REQUEST_DESCRIPTOR_UNION { struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo; union { struct { - u32 low; - u32 high; + __le32 low; + __le32 high; } u; - u64 Words; + __le64 Words; }; }; @@ -377,35 +377,35 @@ union MEGASAS_REQUEST_DESCRIPTOR_UNION { struct MPI2_DEFAULT_REPLY_DESCRIPTOR { u8 ReplyFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 DescriptorTypeDependent1; /* 0x02 */ - u32 DescriptorTypeDependent2; /* 0x04 */ + __le16 DescriptorTypeDependent1; /* 0x02 */ + __le32 DescriptorTypeDependent2; /* 0x04 */ }; /* Address Reply Descriptor */ struct MPI2_ADDRESS_REPLY_DESCRIPTOR { u8 ReplyFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 SMID; /* 0x02 */ - u32 ReplyFrameAddress; /* 0x04 */ + __le16 SMID; /* 0x02 */ + __le32 ReplyFrameAddress; /* 0x04 */ }; /* SCSI IO Success Reply Descriptor */ struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { u8 ReplyFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 SMID; /* 0x02 */ - u16 TaskTag; /* 0x04 */ - u16 Reserved1; /* 0x06 */ + __le16 SMID; /* 0x02 */ + __le16 TaskTag; /* 0x04 */ + __le16 Reserved1; /* 0x06 */ }; /* TargetAssist Success Reply Descriptor */ struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { u8 ReplyFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 SMID; /* 0x02 */ + __le16 SMID; /* 0x02 */ u8 SequenceNumber; /* 0x04 */ u8 Reserved1; /* 0x05 */ - u16 IoIndex; /* 0x06 */ + __le16 IoIndex; /* 0x06 */ }; /* Target Command Buffer Reply Descriptor */ @@ -414,16 +414,16 @@ struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { u8 MSIxIndex; /* 0x01 */ u8 VP_ID; /* 0x02 */ u8 Flags; /* 0x03 */ - u16 InitiatorDevHandle; /* 0x04 */ - u16 IoIndex; /* 0x06 */ + __le16 InitiatorDevHandle; /* 0x04 */ + __le16 IoIndex; /* 0x06 */ }; /* RAID Accelerator Success Reply Descriptor */ struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { u8 ReplyFlags; /* 0x00 */ u8 MSIxIndex; /* 0x01 */ - u16 SMID; /* 0x02 */ - u32 Reserved; /* 0x04 */ + __le16 SMID; /* 0x02 */ + __le32 Reserved; /* 0x04 */ }; /* union of Reply Descriptors */ @@ -435,7 +435,7 @@ union MPI2_REPLY_DESCRIPTORS_UNION { struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; - u64 Words; + __le64 Words; }; /* IOCInit Request message */ @@ -444,28 +444,28 @@ struct MPI2_IOC_INIT_REQUEST { u8 Reserved1; /* 0x01 */ u8 ChainOffset; /* 0x02 */ u8 Function; /* 0x03 */ - u16 Reserved2; /* 0x04 */ + __le16 Reserved2; /* 0x04 */ u8 Reserved3; /* 0x06 */ u8 MsgFlags; /* 0x07 */ u8 VP_ID; /* 0x08 */ u8 VF_ID; /* 0x09 */ - u16 Reserved4; /* 0x0A */ - u16 MsgVersion; /* 0x0C */ - u16 HeaderVersion; /* 0x0E */ + __le16 Reserved4; /* 0x0A */ + __le16 MsgVersion; /* 0x0C */ + __le16 HeaderVersion; /* 0x0E */ u32 Reserved5; /* 0x10 */ - u16 Reserved6; /* 0x14 */ + __le16 Reserved6; /* 0x14 */ u8 Reserved7; /* 0x16 */ u8 HostMSIxVectors; /* 0x17 */ - u16 Reserved8; /* 0x18 */ - u16 SystemRequestFrameSize; /* 0x1A */ - u16 ReplyDescriptorPostQueueDepth; /* 0x1C */ - u16 ReplyFreeQueueDepth; /* 0x1E */ - u32 SenseBufferAddressHigh; /* 0x20 */ - u32 SystemReplyAddressHigh; /* 0x24 */ - u64 SystemRequestFrameBaseAddress; /* 0x28 */ - u64 ReplyDescriptorPostQueueAddress;/* 0x30 */ - u64 ReplyFreeQueueAddress; /* 0x38 */ - u64 TimeStamp; /* 0x40 */ + __le16 Reserved8; /* 0x18 */ + __le16 SystemRequestFrameSize; /* 0x1A */ + __le16 ReplyDescriptorPostQueueDepth; /* 0x1C */ + __le16 ReplyFreeQueueDepth; /* 0x1E */ + __le32 SenseBufferAddressHigh; /* 0x20 */ + __le32 SystemReplyAddressHigh; /* 0x24 */ + __le64 SystemRequestFrameBaseAddress; /* 0x28 */ + __le64 ReplyDescriptorPostQueueAddress;/* 0x30 */ + __le64 ReplyFreeQueueAddress; /* 0x38 */ + __le64 TimeStamp; /* 0x40 */ }; /* mrpriv defines */ @@ -491,41 +491,41 @@ struct MPI2_IOC_INIT_REQUEST { #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200 struct MR_DEV_HANDLE_INFO { - u16 curDevHdl; + __le16 curDevHdl; u8 validHandles; u8 reserved; - u16 devHandle[2]; + __le16 devHandle[2]; }; struct MR_ARRAY_INFO { - u16 pd[MAX_RAIDMAP_ROW_SIZE]; + __le16 pd[MAX_RAIDMAP_ROW_SIZE]; }; struct MR_QUAD_ELEMENT { - u64 logStart; - u64 logEnd; - u64 offsetInSpan; - u32 diff; - u32 reserved1; + __le64 logStart; + __le64 logEnd; + __le64 offsetInSpan; + __le32 diff; + __le32 reserved1; }; struct MR_SPAN_INFO { - u32 noElements; - u32 reserved1; + __le32 noElements; + __le32 reserved1; struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH]; }; struct MR_LD_SPAN { - u64 startBlk; - u64 numBlks; - u16 arrayRef; + __le64 startBlk; + __le64 numBlks; + __le16 arrayRef; u8 spanRowSize; u8 spanRowDataSize; u8 reserved[4]; }; struct MR_SPAN_BLOCK_INFO { - u64 num_rows; + __le64 num_rows; struct MR_LD_SPAN span; struct MR_SPAN_INFO block_span_info; }; @@ -558,8 +558,8 @@ struct MR_LD_RAID { u32 reserved4:7; #endif } capability; - u32 reserved6; - u64 size; + __le32 reserved6; + __le64 size; u8 spanDepth; u8 level; u8 stripeShift; @@ -568,12 +568,12 @@ struct MR_LD_RAID { u8 writeMode; u8 PRL; u8 SRL; - u16 targetId; + __le16 targetId; u8 ldState; u8 regTypeReqOnWrite; u8 modFactor; u8 regTypeReqOnRead; - u16 seqNum; + __le16 seqNum; struct { u32 ldSyncRequired:1; @@ -592,20 +592,20 @@ struct MR_LD_SPAN_MAP { }; struct MR_FW_RAID_MAP { - u32 totalSize; + __le32 totalSize; union { struct { - u32 maxLd; - u32 maxSpanDepth; - u32 maxRowSize; - u32 maxPdCount; - u32 maxArrays; + __le32 maxLd; + __le32 maxSpanDepth; + __le32 maxRowSize; + __le32 maxPdCount; + __le32 maxArrays; } validationInfo; - u32 version[5]; + __le32 version[5]; }; - u32 ldCount; - u32 Reserved1; + __le32 ldCount; + __le32 Reserved1; u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+ MAX_RAIDMAP_VIEWS]; u8 fpPdIoTimeoutSec; @@ -620,7 +620,7 @@ struct IO_REQUEST_INFO { u32 numBlocks; u16 ldTgtId; u8 isRead; - u16 devHandle; + __le16 devHandle; u64 pdBlock; u8 fpOkForIo; u8 IoforUnevenSpan; @@ -634,7 +634,7 @@ struct IO_REQUEST_INFO { struct MR_LD_TARGET_SYNC { u8 targetId; u8 reserved; - u16 seqNum; + __le16 seqNum; }; #define IEEE_SGE_FLAGS_ADDR_MASK (0x03) @@ -679,7 +679,6 @@ struct megasas_cmd_fusion { */ u32 sync_cmd_idx; u32 index; - u8 flags; u8 pd_r1_lb; }; @@ -720,27 +719,27 @@ struct MR_DRV_RAID_MAP { * This feild will be manupulated by driver for ext raid map, * else pick the value from firmware raid map. */ - u32 totalSize; + __le32 totalSize; union { struct { - u32 maxLd; - u32 maxSpanDepth; - u32 maxRowSize; - u32 maxPdCount; - u32 maxArrays; + __le32 maxLd; + __le32 maxSpanDepth; + __le32 maxRowSize; + __le32 maxPdCount; + __le32 maxArrays; } validationInfo; - u32 version[5]; + __le32 version[5]; }; /* timeout value used by driver in FP IOs*/ u8 fpPdIoTimeoutSec; u8 reserved2[7]; - u16 ldCount; - u16 arCount; - u16 spanCount; - u16 reserve3; + __le16 ldCount; + __le16 arCount; + __le16 spanCount; + __le16 reserve3; struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; @@ -779,10 +778,10 @@ struct MR_FW_RAID_MAP_EXT { u8 fpPdIoTimeoutSec; u8 reserved2[7]; - u16 ldCount; - u16 arCount; - u16 spanCount; - u16 reserve3; + __le16 ldCount; + __le16 arCount; + __le16 spanCount; + __le16 reserve3; struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT]; @@ -792,10 +791,6 @@ struct MR_FW_RAID_MAP_EXT { struct fusion_context { struct megasas_cmd_fusion **cmd_list; - struct list_head cmd_pool; - - spinlock_t mpt_pool_lock; - dma_addr_t req_frames_desc_phys; u8 *req_frames_desc; @@ -839,10 +834,10 @@ struct fusion_context { }; union desc_value { - u64 word; + __le64 word; struct { - u32 low; - u32 high; + __le32 low; + __le32 high; } u; }; diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 53030b0e8015..d40d734aa53a 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -56,7 +56,6 @@ static struct scsi_host_template mvs_sht = { .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, .can_queue = 1, - .cmd_per_lun = 1, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index c6077cefbeca..53c84771f0e8 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -274,7 +274,6 @@ static struct scsi_host_template nsp32_template = { .can_queue = 1, .sg_tablesize = NSP32_SG_SIZE, .max_sectors = 128, - .cmd_per_lun = 1, .this_id = NSP32_HOST_SCSIID, .use_clustering = DISABLE_CLUSTERING, .eh_abort_handler = nsp32_eh_abort, diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 1b6c8833a304..5fb6eefc6541 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -86,7 +86,6 @@ static struct scsi_host_template nsp_driver_template = { .can_queue = 1, .this_id = NSP_INITIATOR_ID, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c index bcaf89fe0c9e..c670dc704c74 100644 --- a/drivers/scsi/pcmcia/qlogic_stub.c +++ b/drivers/scsi/pcmcia/qlogic_stub.c @@ -72,7 +72,6 @@ static struct scsi_host_template qlogicfas_driver_template = { .can_queue = 1, .this_id = -1, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c index 155f9573021f..20011c8afbb5 100644 --- a/drivers/scsi/pcmcia/sym53c500_cs.c +++ b/drivers/scsi/pcmcia/sym53c500_cs.c @@ -680,7 +680,6 @@ static struct scsi_host_template sym53c500_driver_template = { .can_queue = 1, .this_id = 7, .sg_tablesize = 32, - .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = SYM53C500_shost_attrs }; diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 65555916d3b8..a132f2664d2f 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -78,7 +78,6 @@ static struct scsi_host_template pm8001_sht = { .change_queue_depth = sas_change_queue_depth, .bios_param = sas_bios_param, .can_queue = 1, - .cmd_per_lun = 1, .this_id = -1, .sg_tablesize = SG_ALL, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 1db8b26063b4..ee00e27ba396 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -974,7 +974,6 @@ static struct scsi_host_template ppa_template = { .bios_param = ppa_biosparam, .this_id = -1, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, .can_queue = 1, .slave_alloc = ppa_adjust_queue, diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index 5298def33733..4924424d20fe 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c @@ -347,7 +347,6 @@ static struct scsi_host_template ps3rom_host_template = { .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .emulated = 1, /* only sg driver uses this */ .max_sectors = PS3ROM_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index c68a66e8cfc1..5d0ec42a9317 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -4217,7 +4217,6 @@ static struct scsi_host_template qla1280_driver_template = { .can_queue = 0xfffff, .this_id = -1, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 285cb204f300..664013115c9d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -708,7 +708,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00d4, "Unable to initialize ISP84XX.\n"); - qla84xx_put_chip(vha); + qla84xx_put_chip(vha); } } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index a1ab25fca874..36fbd4c7af8f 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2797,10 +2797,10 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) handle = req->current_outstanding_cmd; for (index = 1; index < req->num_outstanding_cmds; index++) { handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; } if (index == req->num_outstanding_cmds) { diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 6dc14cd782b2..5559d5e75bbf 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1580,7 +1580,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) ql_log(ql_log_warn, fcport->vha, 0x503c, "Async-%s error - hdl=%x response(%x).\n", type, sp->handle, sts->data[3]); - iocb->u.tmf.data = QLA_FUNCTION_FAILED; + iocb->u.tmf.data = QLA_FUNCTION_FAILED; } } @@ -1979,7 +1979,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, rval = EXT_STATUS_ERR; break; } - bsg_job->reply->reply_payload_rcv_len = 0; + bsg_job->reply->reply_payload_rcv_len = 0; done: /* Return the vendor specific reply to API */ diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 7d2b18f2675c..1620b0ec977b 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1843,7 +1843,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha) ptab_desc = qla82xx_get_table_desc(unirom, QLA82XX_URI_DIR_SECT_PRODUCT_TBL); - if (!ptab_desc) + if (!ptab_desc) return -1; entries = cpu_to_le32(ptab_desc->num_entries); diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index ed4d6b6b53e3..000c57e4d033 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -397,11 +397,11 @@ qla8044_idc_lock(struct qla_hw_data *ha) * has the lock, wait for 2secs * and retry */ - ql_dbg(ql_dbg_p3p, vha, 0xb08a, - "%s: IDC lock Recovery by %d " - "failed, Retrying timeout\n", __func__, - ha->portnum); - timeout = 0; + ql_dbg(ql_dbg_p3p, vha, 0xb08a, + "%s: IDC lock Recovery by %d " + "failed, Retrying timeout\n", __func__, + ha->portnum); + timeout = 0; } } msleep(QLA8044_DRV_LOCK_MSLEEP); @@ -3141,8 +3141,7 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, goto error; addr7 = addr2 - (4 * stride1); - data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, - mask, addr7); + data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7); if (data == -1) goto error; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 7462dd70b150..a28815b8276f 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -4418,7 +4418,10 @@ retry_lock2: void qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id) { - uint16_t options = (requester_id << 15) | BIT_7, retry; +#if 0 + uint16_t options = (requester_id << 15) | BIT_7; +#endif + uint16_t retry; uint32_t data; struct qla_hw_data *ha = base_vha->hw; @@ -4454,6 +4457,7 @@ retry_unlock: return; +#if 0 /* XXX: IDC-unlock implementation using access-control mbx */ retry = 0; retry_unlock2: @@ -4469,6 +4473,7 @@ retry_unlock2: } return; +#endif } int diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index fe8a8d157e22..4a484d60be0d 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3712,6 +3712,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) { +#if 1 + /* + * FIXME: Reject non zero SRR relative offset until we can test + * this code properly. + */ + pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); + return -1; +#else struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; size_t first_offset = 0, rem_offset = offset, tmp = 0; int i, sg_srr_cnt, bufflen = 0; @@ -3721,13 +3729,6 @@ static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) "cmd->sg_cnt: %u, direction: %d\n", cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); - /* - * FIXME: Reject non zero SRR relative offset until we can test - * this code properly. - */ - pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); - return -1; - if (!cmd->sg || !cmd->sg_cnt) { ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, "Missing cmd->sg or zero cmd->sg_cnt in" @@ -3810,6 +3811,7 @@ static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) BUG(); return 0; +#endif } static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c index 556c1525f881..5d4f8e67fb25 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.c +++ b/drivers/scsi/qla4xxx/ql4_83xx.c @@ -828,7 +828,7 @@ void qla4_83xx_read_reset_template(struct scsi_qla_host *ha) ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff, tmplt_hdr_def_size); if (ret_val != QLA_SUCCESS) { - ql4_printk(KERN_ERR, ha, "%s: Failed to read reset tempelate\n", + ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n", __func__); goto exit_read_template_error; } diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c index 9f92cbf96477..415ee5eb3fc7 100644 --- a/drivers/scsi/qla4xxx/ql4_bsg.c +++ b/drivers/scsi/qla4xxx/ql4_bsg.c @@ -571,7 +571,7 @@ static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha, if ((config & ENABLE_INTERNAL_LOOPBACK) || (config & ENABLE_EXTERNAL_LOOPBACK)) { - ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid requiest\n", + ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid request\n", __func__); goto exit_pre_loopback_config; } diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c index a22bb1b40ce2..61cac87fb86f 100644 --- a/drivers/scsi/qlogicfas.c +++ b/drivers/scsi/qlogicfas.c @@ -193,7 +193,6 @@ static struct scsi_host_template qlogicfas_driver_template = { .can_queue = 1, .this_id = -1, .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, }; diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index fe122700cad8..676385ff28ef 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -1287,7 +1287,6 @@ static struct scsi_host_template qpti_template = { .can_queue = QLOGICPTI_REQ_QUEUE_LEN, .this_id = 7, .sg_tablesize = QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), - .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3833bf59fb66..207d6a7a1bd0 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -98,52 +98,6 @@ EXPORT_SYMBOL(scsi_sd_probe_domain); ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain); EXPORT_SYMBOL(scsi_sd_pm_domain); -/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. - * You may not alter any existing entry (although adding new ones is - * encouraged once assigned by ANSI/INCITS T10 - */ -static const char *const scsi_device_types[] = { - "Direct-Access ", - "Sequential-Access", - "Printer ", - "Processor ", - "WORM ", - "CD-ROM ", - "Scanner ", - "Optical Device ", - "Medium Changer ", - "Communications ", - "ASC IT8 ", - "ASC IT8 ", - "RAID ", - "Enclosure ", - "Direct-Access-RBC", - "Optical card ", - "Bridge controller", - "Object storage ", - "Automation/Drive ", - "Security Manager ", - "Direct-Access-ZBC", -}; - -/** - * scsi_device_type - Return 17 char string indicating device type. - * @type: type number to look up - */ - -const char * scsi_device_type(unsigned type) -{ - if (type == 0x1e) - return "Well-known LUN "; - if (type == 0x1f) - return "No Device "; - if (type >= ARRAY_SIZE(scsi_device_types)) - return "Unknown "; - return scsi_device_types[type]; -} - -EXPORT_SYMBOL(scsi_device_type); - struct scsi_host_cmd_pool { struct kmem_cache *cmd_slab; struct kmem_cache *sense_slab; diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c new file mode 100644 index 000000000000..2ff092252b76 --- /dev/null +++ b/drivers/scsi/scsi_common.c @@ -0,0 +1,178 @@ +/* + * SCSI functions used by both the initiator and the target code. + */ + +#include <linux/bug.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <scsi/scsi_common.h> + +/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. + * You may not alter any existing entry (although adding new ones is + * encouraged once assigned by ANSI/INCITS T10 + */ +static const char *const scsi_device_types[] = { + "Direct-Access ", + "Sequential-Access", + "Printer ", + "Processor ", + "WORM ", + "CD-ROM ", + "Scanner ", + "Optical Device ", + "Medium Changer ", + "Communications ", + "ASC IT8 ", + "ASC IT8 ", + "RAID ", + "Enclosure ", + "Direct-Access-RBC", + "Optical card ", + "Bridge controller", + "Object storage ", + "Automation/Drive ", + "Security Manager ", + "Direct-Access-ZBC", +}; + +/** + * scsi_device_type - Return 17 char string indicating device type. + * @type: type number to look up + */ +const char *scsi_device_type(unsigned type) +{ + if (type == 0x1e) + return "Well-known LUN "; + if (type == 0x1f) + return "No Device "; + if (type >= ARRAY_SIZE(scsi_device_types)) + return "Unknown "; + return scsi_device_types[type]; +} +EXPORT_SYMBOL(scsi_device_type); + +/** + * scsilun_to_int - convert a scsi_lun to an int + * @scsilun: struct scsi_lun to be converted. + * + * Description: + * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered + * integer, and return the result. The caller must check for + * truncation before using this function. + * + * Notes: + * For a description of the LUN format, post SCSI-3 see the SCSI + * Architecture Model, for SCSI-3 see the SCSI Controller Commands. + * + * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function + * returns the integer: 0x0b03d204 + * + * This encoding will return a standard integer LUN for LUNs smaller + * than 256, which typically use a single level LUN structure with + * addressing method 0. + */ +u64 scsilun_to_int(struct scsi_lun *scsilun) +{ + int i; + u64 lun; + + lun = 0; + for (i = 0; i < sizeof(lun); i += 2) + lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) | + ((u64)scsilun->scsi_lun[i + 1] << (i * 8))); + return lun; +} +EXPORT_SYMBOL(scsilun_to_int); + +/** + * int_to_scsilun - reverts an int into a scsi_lun + * @lun: integer to be reverted + * @scsilun: struct scsi_lun to be set. + * + * Description: + * Reverts the functionality of the scsilun_to_int, which packed + * an 8-byte lun value into an int. This routine unpacks the int + * back into the lun value. + * + * Notes: + * Given an integer : 0x0b03d204, this function returns a + * struct scsi_lun of: d2 04 0b 03 00 00 00 00 + * + */ +void int_to_scsilun(u64 lun, struct scsi_lun *scsilun) +{ + int i; + + memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun)); + + for (i = 0; i < sizeof(lun); i += 2) { + scsilun->scsi_lun[i] = (lun >> 8) & 0xFF; + scsilun->scsi_lun[i+1] = lun & 0xFF; + lun = lun >> 16; + } +} +EXPORT_SYMBOL(int_to_scsilun); + +/** + * scsi_normalize_sense - normalize main elements from either fixed or + * descriptor sense data format into a common format. + * + * @sense_buffer: byte array containing sense data returned by device + * @sb_len: number of valid bytes in sense_buffer + * @sshdr: pointer to instance of structure that common + * elements are written to. + * + * Notes: + * The "main elements" from sense data are: response_code, sense_key, + * asc, ascq and additional_length (only for descriptor format). + * + * Typically this function can be called after a device has + * responded to a SCSI command with the CHECK_CONDITION status. + * + * Return value: + * true if valid sense data information found, else false; + */ +bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, + struct scsi_sense_hdr *sshdr) +{ + if (!sense_buffer || !sb_len) + return false; + + memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); + + sshdr->response_code = (sense_buffer[0] & 0x7f); + + if (!scsi_sense_valid(sshdr)) + return false; + + if (sshdr->response_code >= 0x72) { + /* + * descriptor format + */ + if (sb_len > 1) + sshdr->sense_key = (sense_buffer[1] & 0xf); + if (sb_len > 2) + sshdr->asc = sense_buffer[2]; + if (sb_len > 3) + sshdr->ascq = sense_buffer[3]; + if (sb_len > 7) + sshdr->additional_length = sense_buffer[7]; + } else { + /* + * fixed format + */ + if (sb_len > 2) + sshdr->sense_key = (sense_buffer[2] & 0xf); + if (sb_len > 7) { + sb_len = (sb_len < (sense_buffer[7] + 8)) ? + sb_len : (sense_buffer[7] + 8); + if (sb_len > 12) + sshdr->asc = sense_buffer[12]; + if (sb_len > 13) + sshdr->ascq = sense_buffer[13]; + } + } + + return true; +} +EXPORT_SYMBOL(scsi_normalize_sense); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index c95a4e943fc6..106884a5444e 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -2399,70 +2399,6 @@ out_put_autopm_host: } EXPORT_SYMBOL(scsi_ioctl_reset); -/** - * scsi_normalize_sense - normalize main elements from either fixed or - * descriptor sense data format into a common format. - * - * @sense_buffer: byte array containing sense data returned by device - * @sb_len: number of valid bytes in sense_buffer - * @sshdr: pointer to instance of structure that common - * elements are written to. - * - * Notes: - * The "main elements" from sense data are: response_code, sense_key, - * asc, ascq and additional_length (only for descriptor format). - * - * Typically this function can be called after a device has - * responded to a SCSI command with the CHECK_CONDITION status. - * - * Return value: - * true if valid sense data information found, else false; - */ -bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, - struct scsi_sense_hdr *sshdr) -{ - if (!sense_buffer || !sb_len) - return false; - - memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); - - sshdr->response_code = (sense_buffer[0] & 0x7f); - - if (!scsi_sense_valid(sshdr)) - return false; - - if (sshdr->response_code >= 0x72) { - /* - * descriptor format - */ - if (sb_len > 1) - sshdr->sense_key = (sense_buffer[1] & 0xf); - if (sb_len > 2) - sshdr->asc = sense_buffer[2]; - if (sb_len > 3) - sshdr->ascq = sense_buffer[3]; - if (sb_len > 7) - sshdr->additional_length = sense_buffer[7]; - } else { - /* - * fixed format - */ - if (sb_len > 2) - sshdr->sense_key = (sense_buffer[2] & 0xf); - if (sb_len > 7) { - sb_len = (sb_len < (sense_buffer[7] + 8)) ? - sb_len : (sense_buffer[7] + 8); - if (sb_len > 12) - sshdr->asc = sense_buffer[12]; - if (sb_len > 13) - sshdr->ascq = sense_buffer[13]; - } - } - - return true; -} -EXPORT_SYMBOL(scsi_normalize_sense); - bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, struct scsi_sense_hdr *sshdr) { diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 6efab1c455e1..f9f3f8203d42 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -280,7 +280,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, sdev->host->cmd_per_lun, shost->bqt, shost->hostt->tag_alloc_policy); } - scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun); + scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ? + sdev->host->cmd_per_lun : 1); scsi_sysfs_device_initialize(sdev); @@ -1269,68 +1270,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, } /** - * scsilun_to_int - convert a scsi_lun to an int - * @scsilun: struct scsi_lun to be converted. - * - * Description: - * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered - * integer, and return the result. The caller must check for - * truncation before using this function. - * - * Notes: - * For a description of the LUN format, post SCSI-3 see the SCSI - * Architecture Model, for SCSI-3 see the SCSI Controller Commands. - * - * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function - * returns the integer: 0x0b03d204 - * - * This encoding will return a standard integer LUN for LUNs smaller - * than 256, which typically use a single level LUN structure with - * addressing method 0. - **/ -u64 scsilun_to_int(struct scsi_lun *scsilun) -{ - int i; - u64 lun; - - lun = 0; - for (i = 0; i < sizeof(lun); i += 2) - lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) | - ((u64)scsilun->scsi_lun[i + 1] << (i * 8))); - return lun; -} -EXPORT_SYMBOL(scsilun_to_int); - -/** - * int_to_scsilun - reverts an int into a scsi_lun - * @lun: integer to be reverted - * @scsilun: struct scsi_lun to be set. - * - * Description: - * Reverts the functionality of the scsilun_to_int, which packed - * an 8-byte lun value into an int. This routine unpacks the int - * back into the lun value. - * - * Notes: - * Given an integer : 0x0b03d204, this function returns a - * struct scsi_lun of: d2 04 0b 03 00 00 00 00 - * - **/ -void int_to_scsilun(u64 lun, struct scsi_lun *scsilun) -{ - int i; - - memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun)); - - for (i = 0; i < sizeof(lun); i += 2) { - scsilun->scsi_lun[i] = (lun >> 8) & 0xFF; - scsilun->scsi_lun[i+1] = lun & 0xFF; - lun = lun >> 16; - } -} -EXPORT_SYMBOL(int_to_scsilun); - -/** * scsi_report_lun_scan - Scan using SCSI REPORT LUN results * @starget: which target * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 67d43e35693d..55647aae065c 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -204,6 +204,8 @@ iscsi_create_endpoint(int dd_size) iscsi_match_epid); if (!dev) break; + else + put_device(dev); } if (id == ISCSI_MAX_EPID) { printk(KERN_ERR "Too many connections. Max supported %u\n", diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index ae45bd99baed..a85292b1d09d 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -61,6 +61,11 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) return dev_to_shost(r->dev.parent); } +static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) +{ + return transport_class_to_srp_rport(&shost->shost_gendev); +} + /** * srp_tmo_valid() - check timeout combination validity * @reconnect_delay: Reconnect delay in seconds. @@ -396,6 +401,36 @@ static void srp_reconnect_work(struct work_struct *work) } } +/** + * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() + * @shost: SCSI host for which to count the number of scsi_request_fn() callers. + * + * To do: add support for scsi-mq in this function. + */ +static int scsi_request_fn_active(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + struct request_queue *q; + int request_fn_active = 0; + + shost_for_each_device(sdev, shost) { + q = sdev->request_queue; + + spin_lock_irq(q->queue_lock); + request_fn_active += q->request_fn_active; + spin_unlock_irq(q->queue_lock); + } + + return request_fn_active; +} + +/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */ +static void srp_wait_for_queuecommand(struct Scsi_Host *shost) +{ + while (scsi_request_fn_active(shost)) + msleep(20); +} + static void __rport_fail_io_fast(struct srp_rport *rport) { struct Scsi_Host *shost = rport_to_shost(rport); @@ -409,8 +444,10 @@ static void __rport_fail_io_fast(struct srp_rport *rport) /* Involve the LLD if possible to terminate all I/O on the rport. */ i = to_srp_internal(shost->transportt); - if (i->f->terminate_rport_io) + if (i->f->terminate_rport_io) { + srp_wait_for_queuecommand(shost); i->f->terminate_rport_io(rport); + } } /** @@ -504,27 +541,6 @@ void srp_start_tl_fail_timers(struct srp_rport *rport) EXPORT_SYMBOL(srp_start_tl_fail_timers); /** - * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() - * @shost: SCSI host for which to count the number of scsi_request_fn() callers. - */ -static int scsi_request_fn_active(struct Scsi_Host *shost) -{ - struct scsi_device *sdev; - struct request_queue *q; - int request_fn_active = 0; - - shost_for_each_device(sdev, shost) { - q = sdev->request_queue; - - spin_lock_irq(q->queue_lock); - request_fn_active += q->request_fn_active; - spin_unlock_irq(q->queue_lock); - } - - return request_fn_active; -} - -/** * srp_reconnect_rport() - reconnect to an SRP target port * @rport: SRP target port. * @@ -559,8 +575,7 @@ int srp_reconnect_rport(struct srp_rport *rport) if (res) goto out; scsi_target_block(&shost->shost_gendev); - while (scsi_request_fn_active(shost)) - msleep(20); + srp_wait_for_queuecommand(shost); res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; pr_debug("%s (state %d): transport.reconnect() returned %d\n", dev_name(&shost->shost_gendev), rport->state, res); @@ -618,9 +633,11 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; struct srp_internal *i = to_srp_internal(shost->transportt); + struct srp_rport *rport = shost_to_rport(shost); pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); - return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? + return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && + i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 7f9d65fe4fd9..3b2fcb4fada0 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2988,7 +2988,8 @@ static int sd_probe(struct device *dev) sdkp->dev.class = &sd_disk_class; dev_set_name(&sdkp->dev, "%s", dev_name(dev)); - if (device_add(&sdkp->dev)) + error = device_add(&sdkp->dev); + if (error) goto out_free_index; get_device(dev); diff --git a/drivers/scsi/snic/Makefile b/drivers/scsi/snic/Makefile new file mode 100644 index 000000000000..ef7c0dd47f40 --- /dev/null +++ b/drivers/scsi/snic/Makefile @@ -0,0 +1,17 @@ +obj-$(CONFIG_SCSI_SNIC) += snic.o + +snic-y := \ + snic_attrs.o \ + snic_main.o \ + snic_res.o \ + snic_isr.o \ + snic_ctl.o \ + snic_io.o \ + snic_scsi.o \ + snic_disc.o \ + vnic_cq.o \ + vnic_intr.o \ + vnic_dev.o \ + vnic_wq.o + +snic-$(CONFIG_SCSI_SNIC_DEBUG_FS) += snic_debugfs.o snic_trc.o diff --git a/drivers/scsi/snic/cq_desc.h b/drivers/scsi/snic/cq_desc.h new file mode 100644 index 000000000000..a5290562c1fa --- /dev/null +++ b/drivers/scsi/snic/cq_desc.h @@ -0,0 +1,77 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _CQ_DESC_H_ +#define _CQ_DESC_H_ + +/* + * Completion queue descriptor types + */ +enum cq_desc_types { + CQ_DESC_TYPE_WQ_ENET = 0, + CQ_DESC_TYPE_DESC_COPY = 1, + CQ_DESC_TYPE_WQ_EXCH = 2, + CQ_DESC_TYPE_RQ_ENET = 3, + CQ_DESC_TYPE_RQ_FCP = 4, +}; + +/* Completion queue descriptor: 16B + * + * All completion queues have this basic layout. The + * type_specific area is unique for each completion + * queue type. + */ +struct cq_desc { + __le16 completed_index; + __le16 q_number; + u8 type_specific[11]; + u8 type_color; +}; + +#define CQ_DESC_TYPE_BITS 4 +#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) +#define CQ_DESC_COLOR_MASK 1 +#define CQ_DESC_COLOR_SHIFT 7 +#define CQ_DESC_Q_NUM_BITS 10 +#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) +#define CQ_DESC_COMP_NDX_BITS 12 +#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) + +static inline void cq_desc_dec(const struct cq_desc *desc_arg, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + const struct cq_desc *desc = desc_arg; + const u8 type_color = desc->type_color; + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *type = type_color & CQ_DESC_TYPE_MASK; + *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index) & + CQ_DESC_COMP_NDX_MASK; +} + +#endif /* _CQ_DESC_H_ */ diff --git a/drivers/scsi/snic/cq_enet_desc.h b/drivers/scsi/snic/cq_enet_desc.h new file mode 100644 index 000000000000..0a1be2ed0288 --- /dev/null +++ b/drivers/scsi/snic/cq_enet_desc.h @@ -0,0 +1,38 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _CQ_ENET_DESC_H_ +#define _CQ_ENET_DESC_H_ + +#include "cq_desc.h" + +/* Ethernet completion queue descriptor: 16B */ +struct cq_enet_wq_desc { + __le16 completed_index; + __le16 q_number; + u8 reserved[11]; + u8 type_color; +}; + +static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, + u8 *type, u8 *color, u16 *q_number, u16 *completed_index) +{ + cq_desc_dec((struct cq_desc *)desc, type, + color, q_number, completed_index); +} + +#endif /* _CQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h new file mode 100644 index 000000000000..d7f5ba6ba84c --- /dev/null +++ b/drivers/scsi/snic/snic.h @@ -0,0 +1,414 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _SNIC_H_ +#define _SNIC_H_ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/mempool.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> + +#include "snic_disc.h" +#include "snic_io.h" +#include "snic_res.h" +#include "snic_trc.h" +#include "snic_stats.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "vnic_snic.h" + +#define SNIC_DRV_NAME "snic" +#define SNIC_DRV_DESCRIPTION "Cisco SCSI NIC Driver" +#define SNIC_DRV_VERSION "0.0.1.18" +#define PFX SNIC_DRV_NAME ":" +#define DFX SNIC_DRV_NAME "%d: " + +#define DESC_CLEAN_LOW_WATERMARK 8 +#define SNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ +#define SNIC_MAX_IO_REQ 50 /* scsi_cmnd tag map entries */ +#define SNIC_MIN_IO_REQ 8 /* Min IO throttle count */ +#define SNIC_IO_LOCKS 64 /* IO locks: power of 2 */ +#define SNIC_DFLT_QUEUE_DEPTH 32 /* Default Queue Depth */ +#define SNIC_MAX_QUEUE_DEPTH 64 /* Max Queue Depth */ +#define SNIC_DFLT_CMD_TIMEOUT 90 /* Extended tmo for FW */ + +/* + * Tag bits used for special requests. + */ +#define SNIC_TAG_ABORT BIT(30) /* Tag indicating abort */ +#define SNIC_TAG_DEV_RST BIT(29) /* Tag for device reset */ +#define SNIC_TAG_IOCTL_DEV_RST BIT(28) /* Tag for User Device Reset */ +#define SNIC_TAG_MASK (BIT(24) - 1) /* Mask for lookup */ +#define SNIC_NO_TAG -1 + +/* + * Command flags to identify the type of command and for other future use + */ +#define SNIC_NO_FLAGS 0 +#define SNIC_IO_INITIALIZED BIT(0) +#define SNIC_IO_ISSUED BIT(1) +#define SNIC_IO_DONE BIT(2) +#define SNIC_IO_REQ_NULL BIT(3) +#define SNIC_IO_ABTS_PENDING BIT(4) +#define SNIC_IO_ABORTED BIT(5) +#define SNIC_IO_ABTS_ISSUED BIT(6) +#define SNIC_IO_TERM_ISSUED BIT(7) +#define SNIC_IO_ABTS_TIMEDOUT BIT(8) +#define SNIC_IO_ABTS_TERM_DONE BIT(9) +#define SNIC_IO_ABTS_TERM_REQ_NULL BIT(10) +#define SNIC_IO_ABTS_TERM_TIMEDOUT BIT(11) +#define SNIC_IO_INTERNAL_TERM_PENDING BIT(12) +#define SNIC_IO_INTERNAL_TERM_ISSUED BIT(13) +#define SNIC_DEVICE_RESET BIT(14) +#define SNIC_DEV_RST_ISSUED BIT(15) +#define SNIC_DEV_RST_TIMEDOUT BIT(16) +#define SNIC_DEV_RST_ABTS_ISSUED BIT(17) +#define SNIC_DEV_RST_TERM_ISSUED BIT(18) +#define SNIC_DEV_RST_DONE BIT(19) +#define SNIC_DEV_RST_REQ_NULL BIT(20) +#define SNIC_DEV_RST_ABTS_DONE BIT(21) +#define SNIC_DEV_RST_TERM_DONE BIT(22) +#define SNIC_DEV_RST_ABTS_PENDING BIT(23) +#define SNIC_DEV_RST_PENDING BIT(24) +#define SNIC_DEV_RST_NOTSUP BIT(25) +#define SNIC_SCSI_CLEANUP BIT(26) +#define SNIC_HOST_RESET_ISSUED BIT(27) + +#define SNIC_ABTS_TIMEOUT 30000 /* msec */ +#define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */ +#define SNIC_HOST_RESET_TIMEOUT 30000 /* msec */ + + +/* + * These are protected by the hashed req_lock. + */ +#define CMD_SP(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->rqi) +#define CMD_STATE(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->state) +#define CMD_ABTS_STATUS(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->abts_status) +#define CMD_LR_STATUS(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->lr_status) +#define CMD_FLAGS(Cmnd) \ + (((struct snic_internal_io_state *)scsi_cmd_priv(Cmnd))->flags) + +#define SNIC_INVALID_CODE 0x100 /* Hdr Status val unused by firmware */ + +#define SNIC_MAX_TARGET 256 +#define SNIC_FLAGS_NONE (0) + +/* snic module params */ +extern unsigned int snic_max_qdepth; + +/* snic debugging */ +extern unsigned int snic_log_level; + +#define SNIC_MAIN_LOGGING 0x1 +#define SNIC_SCSI_LOGGING 0x2 +#define SNIC_ISR_LOGGING 0x8 +#define SNIC_DESC_LOGGING 0x10 + +#define SNIC_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(snic_log_level & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0) + +#define SNIC_MAIN_DBG(host, fmt, args...) \ + SNIC_CHECK_LOGGING(SNIC_MAIN_LOGGING, \ + shost_printk(KERN_INFO, host, fmt, ## args);) + +#define SNIC_SCSI_DBG(host, fmt, args...) \ + SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \ + shost_printk(KERN_INFO, host, fmt, ##args);) + +#define SNIC_DISC_DBG(host, fmt, args...) \ + SNIC_CHECK_LOGGING(SNIC_SCSI_LOGGING, \ + shost_printk(KERN_INFO, host, fmt, ##args);) + +#define SNIC_ISR_DBG(host, fmt, args...) \ + SNIC_CHECK_LOGGING(SNIC_ISR_LOGGING, \ + shost_printk(KERN_INFO, host, fmt, ##args);) + +#define SNIC_HOST_ERR(host, fmt, args...) \ + shost_printk(KERN_ERR, host, fmt, ##args) + +#define SNIC_HOST_INFO(host, fmt, args...) \ + shost_printk(KERN_INFO, host, fmt, ##args) + +#define SNIC_INFO(fmt, args...) \ + pr_info(PFX fmt, ## args) + +#define SNIC_DBG(fmt, args...) \ + pr_info(PFX fmt, ## args) + +#define SNIC_ERR(fmt, args...) \ + pr_err(PFX fmt, ## args) + +#ifdef DEBUG +#define SNIC_BUG_ON(EXPR) \ + ({ \ + if (EXPR) { \ + SNIC_ERR("SNIC BUG(%s)\n", #EXPR); \ + BUG_ON(EXPR); \ + } \ + }) +#else +#define SNIC_BUG_ON(EXPR) \ + ({ \ + if (EXPR) { \ + SNIC_ERR("SNIC BUG(%s) at %s : %d\n", \ + #EXPR, __func__, __LINE__); \ + WARN_ON_ONCE(EXPR); \ + } \ + }) +#endif + +/* Soft assert */ +#define SNIC_ASSERT_NOT_IMPL(EXPR) \ + ({ \ + if (EXPR) {\ + SNIC_INFO("Functionality not impl'ed at %s:%d\n", \ + __func__, __LINE__); \ + WARN_ON_ONCE(EXPR); \ + } \ + }) + + +extern const char *snic_state_str[]; + +enum snic_intx_intr_index { + SNIC_INTX_WQ_RQ_COPYWQ, + SNIC_INTX_ERR, + SNIC_INTX_NOTIFY, + SNIC_INTX_INTR_MAX, +}; + +enum snic_msix_intr_index { + SNIC_MSIX_WQ, + SNIC_MSIX_IO_CMPL, + SNIC_MSIX_ERR_NOTIFY, + SNIC_MSIX_INTR_MAX, +}; + +struct snic_msix_entry { + int requested; + char devname[IFNAMSIZ]; + irqreturn_t (*isr)(int, void *); + void *devid; +}; + +enum snic_state { + SNIC_INIT = 0, + SNIC_ERROR, + SNIC_ONLINE, + SNIC_OFFLINE, + SNIC_FWRESET, +}; + +#define SNIC_WQ_MAX 1 +#define SNIC_CQ_IO_CMPL_MAX 1 +#define SNIC_CQ_MAX (SNIC_WQ_MAX + SNIC_CQ_IO_CMPL_MAX) + +/* firmware version information */ +struct snic_fw_info { + u32 fw_ver; + u32 hid; /* u16 hid | u16 vnic id */ + u32 max_concur_ios; /* max concurrent ios */ + u32 max_sgs_per_cmd; /* max sgls per IO */ + u32 max_io_sz; /* max io size supported */ + u32 hba_cap; /* hba capabilities */ + u32 max_tgts; /* max tgts supported */ + u16 io_tmo; /* FW Extended timeout */ + struct completion *wait; /* protected by snic lock*/ +}; + +/* + * snic_work item : defined to process asynchronous events + */ +struct snic_work { + struct work_struct work; + u16 ev_id; + u64 *ev_data; +}; + +/* + * snic structure to represent SCSI vNIC + */ +struct snic { + /* snic specific members */ + struct list_head list; + char name[IFNAMSIZ]; + atomic_t state; + spinlock_t snic_lock; + struct completion *remove_wait; + bool in_remove; + bool stop_link_events; /* stop processing link events */ + + /* discovery related */ + struct snic_disc disc; + + /* Scsi Host info */ + struct Scsi_Host *shost; + + /* vnic related structures */ + struct vnic_dev_bar bar0; + + struct vnic_stats *stats; + unsigned long stats_time; + unsigned long stats_reset_time; + + struct vnic_dev *vdev; + + /* hw resource info */ + unsigned int wq_count; + unsigned int cq_count; + unsigned int intr_count; + unsigned int err_intr_offset; + + int link_status; /* retrieved from svnic_dev_link_status() */ + u32 link_down_cnt; + + /* pci related */ + struct pci_dev *pdev; + struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX]; + struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX]; + + /* io related info */ + mempool_t *req_pool[SNIC_REQ_MAX_CACHES]; /* (??) */ + ____cacheline_aligned spinlock_t io_req_lock[SNIC_IO_LOCKS]; + + /* Maintain snic specific commands, cmds with no tag in spl_cmd_list */ + ____cacheline_aligned spinlock_t spl_cmd_lock; + struct list_head spl_cmd_list; + + unsigned int max_tag_id; + atomic_t ios_inflight; /* io in flight counter */ + + struct vnic_snic_config config; + + struct work_struct link_work; + + /* firmware information */ + struct snic_fw_info fwinfo; + + /* Work for processing Target related work */ + struct work_struct tgt_work; + + /* Work for processing Discovery */ + struct work_struct disc_work; + + /* stats related */ + unsigned int reset_stats; + atomic64_t io_cmpl_skip; + struct snic_stats s_stats; /* Per SNIC driver stats */ + + /* platform specific */ +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + struct dentry *stats_host; /* Per snic debugfs root */ + struct dentry *stats_file; /* Per snic debugfs file */ + struct dentry *reset_stats_file;/* Per snic reset stats file */ +#endif + + /* completion queue cache line section */ + ____cacheline_aligned struct vnic_cq cq[SNIC_CQ_MAX]; + + /* work queue cache line section */ + ____cacheline_aligned struct vnic_wq wq[SNIC_WQ_MAX]; + spinlock_t wq_lock[SNIC_WQ_MAX]; + + /* interrupt resource cache line section */ + ____cacheline_aligned struct vnic_intr intr[SNIC_MSIX_INTR_MAX]; +}; /* end of snic structure */ + +/* + * SNIC Driver's Global Data + */ +struct snic_global { + struct list_head snic_list; + spinlock_t snic_list_lock; + + struct kmem_cache *req_cache[SNIC_REQ_MAX_CACHES]; + + struct workqueue_struct *event_q; + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + /* debugfs related global data */ + struct dentry *trc_root; + struct dentry *stats_root; + + struct snic_trc trc ____cacheline_aligned; +#endif +}; + +extern struct snic_global *snic_glob; + +int snic_glob_init(void); +void snic_glob_cleanup(void); + +extern struct workqueue_struct *snic_event_queue; +extern struct device_attribute *snic_attrs[]; + +int snic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); +int snic_abort_cmd(struct scsi_cmnd *); +int snic_device_reset(struct scsi_cmnd *); +int snic_host_reset(struct scsi_cmnd *); +int snic_reset(struct Scsi_Host *, struct scsi_cmnd *); +void snic_shutdown_scsi_cleanup(struct snic *); + + +int snic_request_intr(struct snic *); +void snic_free_intr(struct snic *); +int snic_set_intr_mode(struct snic *); +void snic_clear_intr_mode(struct snic *); + +int snic_fwcq_cmpl_handler(struct snic *, int); +int snic_wq_cmpl_handler(struct snic *, int); +void snic_free_wq_buf(struct vnic_wq *, struct vnic_wq_buf *); + + +void snic_log_q_error(struct snic *); +void snic_handle_link_event(struct snic *); +void snic_handle_link(struct work_struct *); + +int snic_queue_exch_ver_req(struct snic *); +int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *); + +int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len); + +void snic_handle_untagged_req(struct snic *, struct snic_req_info *); +void snic_release_untagged_req(struct snic *, struct snic_req_info *); +void snic_free_all_untagged_reqs(struct snic *); +int snic_get_conf(struct snic *); +void snic_set_state(struct snic *, enum snic_state); +int snic_get_state(struct snic *); +const char *snic_state_to_str(unsigned int); +void snic_hex_dump(char *, char *, int); +void snic_print_desc(const char *fn, char *os_buf, int len); +const char *show_opcode_name(int val); +#endif /* _SNIC_H */ diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c new file mode 100644 index 000000000000..32d5d556b6f8 --- /dev/null +++ b/drivers/scsi/snic/snic_attrs.c @@ -0,0 +1,77 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/string.h> +#include <linux/device.h> + +#include "snic.h" + +static ssize_t +snic_show_sym_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct snic *snic = shost_priv(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%s\n", snic->name); +} + +static ssize_t +snic_show_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct snic *snic = shost_priv(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%s\n", + snic_state_str[snic_get_state(snic)]); +} + +static ssize_t +snic_show_drv_version(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", SNIC_DRV_VERSION); +} + +static ssize_t +snic_show_link_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct snic *snic = shost_priv(class_to_shost(dev)); + + if (snic->config.xpt_type == SNIC_DAS) + snic->link_status = svnic_dev_link_status(snic->vdev); + + return snprintf(buf, PAGE_SIZE, "%s\n", + (snic->link_status) ? "Link Up" : "Link Down"); +} + +static DEVICE_ATTR(snic_sym_name, S_IRUGO, snic_show_sym_name, NULL); +static DEVICE_ATTR(snic_state, S_IRUGO, snic_show_state, NULL); +static DEVICE_ATTR(drv_version, S_IRUGO, snic_show_drv_version, NULL); +static DEVICE_ATTR(link_state, S_IRUGO, snic_show_link_state, NULL); + +struct device_attribute *snic_attrs[] = { + &dev_attr_snic_sym_name, + &dev_attr_snic_state, + &dev_attr_drv_version, + &dev_attr_link_state, + NULL, +}; diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c new file mode 100644 index 000000000000..aebe75320ed3 --- /dev/null +++ b/drivers/scsi/snic/snic_ctl.c @@ -0,0 +1,279 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/slab.h> + +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/mempool.h> +#include <scsi/scsi_tcq.h> +#include <linux/ctype.h> + +#include "snic_io.h" +#include "snic.h" +#include "cq_enet_desc.h" +#include "snic_fwint.h" + +/* + * snic_handle_link : Handles link flaps. + */ +void +snic_handle_link(struct work_struct *work) +{ + struct snic *snic = container_of(work, struct snic, link_work); + + if (snic->config.xpt_type != SNIC_DAS) { + SNIC_HOST_INFO(snic->shost, "Link Event Received.\n"); + SNIC_ASSERT_NOT_IMPL(1); + + return; + } + + snic->link_status = svnic_dev_link_status(snic->vdev); + snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev); + SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n", + ((snic->link_status) ? "Up" : "Down")); +} + + +/* + * snic_ver_enc : Encodes version str to int + * version string is similar to netmask string + */ +static int +snic_ver_enc(const char *s) +{ + int v[4] = {0}; + int i = 0, x = 0; + char c; + const char *p = s; + + /* validate version string */ + if ((strlen(s) > 15) || (strlen(s) < 7)) + goto end; + + while ((c = *p++)) { + if (c == '.') { + i++; + continue; + } + + if (i > 4 || !isdigit(c)) + goto end; + + v[i] = v[i] * 10 + (c - '0'); + } + + /* validate sub version numbers */ + for (i = 3; i >= 0; i--) + if (v[i] > 0xff) + goto end; + + x |= (v[0] << 24) | v[1] << 16 | v[2] << 8 | v[3]; + +end: + if (x == 0) { + SNIC_ERR("Invalid version string [%s].\n", s); + + return -1; + } + + return x; +} /* end of snic_ver_enc */ + +/* + * snic_qeueue_exch_ver_req : + * + * Queues Exchange Version Request, to communicate host information + * in return, it gets firmware version details + */ +int +snic_queue_exch_ver_req(struct snic *snic) +{ + struct snic_req_info *rqi = NULL; + struct snic_host_req *req = NULL; + u32 ver = 0; + int ret = 0; + + SNIC_HOST_INFO(snic->shost, "Exch Ver Req Preparing...\n"); + + rqi = snic_req_init(snic, 0); + if (!rqi) { + SNIC_HOST_ERR(snic->shost, + "Queuing Exch Ver Req failed, err = %d\n", + ret); + + ret = -ENOMEM; + goto error; + } + + req = rqi_to_req(rqi); + + /* Initialize snic_host_req */ + snic_io_hdr_enc(&req->hdr, SNIC_REQ_EXCH_VER, 0, SCSI_NO_TAG, + snic->config.hid, 0, (ulong)rqi); + ver = snic_ver_enc(SNIC_DRV_VERSION); + req->u.exch_ver.drvr_ver = cpu_to_le32(ver); + req->u.exch_ver.os_type = cpu_to_le32(SNIC_OS_LINUX); + + snic_handle_untagged_req(snic, rqi); + + ret = snic_queue_wq_desc(snic, req, sizeof(*req)); + if (ret) { + snic_release_untagged_req(snic, rqi); + SNIC_HOST_ERR(snic->shost, + "Queuing Exch Ver Req failed, err = %d\n", + ret); + goto error; + } + + SNIC_HOST_INFO(snic->shost, "Exch Ver Req is issued. ret = %d\n", ret); + +error: + return ret; +} /* end of snic_queue_exch_ver_req */ + +/* + * snic_io_exch_ver_cmpl_handler + */ +int +snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + struct snic_req_info *rqi = NULL; + struct snic_exch_ver_rsp *exv_cmpl = &fwreq->u.exch_ver_cmpl; + u8 typ, hdr_stat; + u32 cmnd_id, hid, max_sgs; + ulong ctx = 0; + unsigned long flags; + int ret = 0; + + SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n"); + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + SNIC_BUG_ON(snic->config.hid != hid); + rqi = (struct snic_req_info *) ctx; + + if (hdr_stat) { + SNIC_HOST_ERR(snic->shost, + "Exch Ver Completed w/ err status %d\n", + hdr_stat); + + goto exch_cmpl_end; + } + + spin_lock_irqsave(&snic->snic_lock, flags); + snic->fwinfo.fw_ver = le32_to_cpu(exv_cmpl->version); + snic->fwinfo.hid = le32_to_cpu(exv_cmpl->hid); + snic->fwinfo.max_concur_ios = le32_to_cpu(exv_cmpl->max_concur_ios); + snic->fwinfo.max_sgs_per_cmd = le32_to_cpu(exv_cmpl->max_sgs_per_cmd); + snic->fwinfo.max_io_sz = le32_to_cpu(exv_cmpl->max_io_sz); + snic->fwinfo.max_tgts = le32_to_cpu(exv_cmpl->max_tgts); + snic->fwinfo.io_tmo = le16_to_cpu(exv_cmpl->io_timeout); + + SNIC_HOST_INFO(snic->shost, + "vers %u hid %u max_concur_ios %u max_sgs_per_cmd %u max_io_sz %u max_tgts %u fw tmo %u\n", + snic->fwinfo.fw_ver, + snic->fwinfo.hid, + snic->fwinfo.max_concur_ios, + snic->fwinfo.max_sgs_per_cmd, + snic->fwinfo.max_io_sz, + snic->fwinfo.max_tgts, + snic->fwinfo.io_tmo); + + SNIC_HOST_INFO(snic->shost, + "HBA Capabilities = 0x%x\n", + le32_to_cpu(exv_cmpl->hba_cap)); + + /* Updating SGList size */ + max_sgs = snic->fwinfo.max_sgs_per_cmd; + if (max_sgs && max_sgs < SNIC_MAX_SG_DESC_CNT) { + snic->shost->sg_tablesize = max_sgs; + SNIC_HOST_INFO(snic->shost, "Max SGs set to %d\n", + snic->shost->sg_tablesize); + } else if (max_sgs > snic->shost->sg_tablesize) { + SNIC_HOST_INFO(snic->shost, + "Target type %d Supports Larger Max SGList %d than driver's Max SG List %d.\n", + snic->config.xpt_type, max_sgs, + snic->shost->sg_tablesize); + } + + if (snic->shost->can_queue > snic->fwinfo.max_concur_ios) + snic->shost->can_queue = snic->fwinfo.max_concur_ios; + + snic->shost->max_sectors = snic->fwinfo.max_io_sz >> 9; + if (snic->fwinfo.wait) + complete(snic->fwinfo.wait); + + spin_unlock_irqrestore(&snic->snic_lock, flags); + +exch_cmpl_end: + snic_release_untagged_req(snic, rqi); + + SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat); + + return ret; +} /* end of snic_io_exch_ver_cmpl_handler */ + +/* + * snic_get_conf + * + * Synchronous call, and Retrieves snic params. + */ +int +snic_get_conf(struct snic *snic) +{ + DECLARE_COMPLETION_ONSTACK(wait); + unsigned long flags; + int ret; + int nr_retries = 3; + + SNIC_HOST_INFO(snic->shost, "Retrieving snic params.\n"); + spin_lock_irqsave(&snic->snic_lock, flags); + memset(&snic->fwinfo, 0, sizeof(snic->fwinfo)); + snic->fwinfo.wait = &wait; + spin_unlock_irqrestore(&snic->snic_lock, flags); + + /* Additional delay to handle HW Resource initialization. */ + msleep(50); + + /* + * Exch ver req can be ignored by FW, if HW Resource initialization + * is in progress, Hence retry. + */ + do { + ret = snic_queue_exch_ver_req(snic); + if (ret) + return ret; + + wait_for_completion_timeout(&wait, msecs_to_jiffies(2000)); + spin_lock_irqsave(&snic->snic_lock, flags); + ret = (snic->fwinfo.fw_ver != 0) ? 0 : -ETIMEDOUT; + if (ret) + SNIC_HOST_ERR(snic->shost, + "Failed to retrieve snic params,\n"); + + /* Unset fwinfo.wait, on success or on last retry */ + if (ret == 0 || nr_retries == 1) + snic->fwinfo.wait = NULL; + + spin_unlock_irqrestore(&snic->snic_lock, flags); + } while (ret && --nr_retries); + + return ret; +} /* end of snic_get_info */ diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c new file mode 100644 index 000000000000..1686f0196251 --- /dev/null +++ b/drivers/scsi/snic/snic_debugfs.c @@ -0,0 +1,560 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/debugfs.h> + +#include "snic.h" + +/* + * snic_debugfs_init - Initialize debugfs for snic debug logging + * + * Description: + * When Debugfs is configured this routine sets up fnic debugfs + * filesystem. If not already created. this routine will crate the + * fnic directory and statistics directory for trace buffer and + * stats logging + */ + +int +snic_debugfs_init(void) +{ + int rc = -1; + struct dentry *de = NULL; + + de = debugfs_create_dir("snic", NULL); + if (!de) { + SNIC_DBG("Cannot create debugfs root\n"); + + return rc; + } + snic_glob->trc_root = de; + + de = debugfs_create_dir("statistics", snic_glob->trc_root); + if (!de) { + SNIC_DBG("Cannot create Statistics directory\n"); + + return rc; + } + snic_glob->stats_root = de; + + rc = 0; + + return rc; +} /* end of snic_debugfs_init */ + +/* + * snic_debugfs_term - Tear down debugfs intrastructure + * + * Description: + * When Debufs is configured this routine removes debugfs file system + * elements that are specific to snic + */ +void +snic_debugfs_term(void) +{ + debugfs_remove(snic_glob->stats_root); + snic_glob->stats_root = NULL; + + debugfs_remove(snic_glob->trc_root); + snic_glob->trc_root = NULL; +} + +/* + * snic_reset_stats_open - Open the reset_stats file + */ +static int +snic_reset_stats_open(struct inode *inode, struct file *filp) +{ + SNIC_BUG_ON(!inode->i_private); + filp->private_data = inode->i_private; + + return 0; +} + +/* + * snic_reset_stats_read - Read a reset_stats debugfs file + * @filp: The file pointer to read from. + * @ubuf: The buffer tocopy the data to. + * @cnt: The number of bytes to read. + * @ppos: The position in the file to start reading frm. + * + * Description: + * This routine reads value of variable reset_stats + * and stores into local @buf. It will start reading file @ppos and + * copy up to @cnt of data to @ubuf from @buf. + * + * Returns: + * This function returns the amount of data that was read. + */ +static ssize_t +snic_reset_stats_read(struct file *filp, + char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct snic *snic = (struct snic *) filp->private_data; + char buf[64]; + int len; + + len = sprintf(buf, "%u\n", snic->reset_stats); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); +} + +/* + * snic_reset_stats_write - Write to reset_stats debugfs file + * @filp: The file pointer to write from + * @ubuf: The buffer to copy the data from. + * @cnt: The number of bytes to write. + * @ppos: The position in the file to start writing to. + * + * Description: + * This routine writes data from user buffer @ubuf to buffer @buf and + * resets cumulative stats of snic. + * + * Returns: + * This function returns the amount of data that was written. + */ +static ssize_t +snic_reset_stats_write(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct snic *snic = (struct snic *) filp->private_data; + struct snic_stats *stats = &snic->s_stats; + u64 *io_stats_p = (u64 *) &stats->io; + u64 *fw_stats_p = (u64 *) &stats->fw; + char buf[64]; + unsigned long val; + int ret; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = '\0'; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + snic->reset_stats = val; + + if (snic->reset_stats) { + /* Skip variable is used to avoid descrepancies to Num IOs + * and IO Completions stats. Skip incrementing No IO Compls + * for pending active IOs after reset_stats + */ + atomic64_set(&snic->io_cmpl_skip, + atomic64_read(&stats->io.active)); + memset(&stats->abts, 0, sizeof(struct snic_abort_stats)); + memset(&stats->reset, 0, sizeof(struct snic_reset_stats)); + memset(&stats->misc, 0, sizeof(struct snic_misc_stats)); + memset(io_stats_p+1, + 0, + sizeof(struct snic_io_stats) - sizeof(u64)); + memset(fw_stats_p+1, + 0, + sizeof(struct snic_fw_stats) - sizeof(u64)); + } + + (*ppos)++; + + SNIC_HOST_INFO(snic->shost, "Reset Op: Driver statistics.\n"); + + return cnt; +} + +static int +snic_reset_stats_release(struct inode *inode, struct file *filp) +{ + filp->private_data = NULL; + + return 0; +} + +/* + * snic_stats_show - Formats and prints per host specific driver stats. + */ +static int +snic_stats_show(struct seq_file *sfp, void *data) +{ + struct snic *snic = (struct snic *) sfp->private; + struct snic_stats *stats = &snic->s_stats; + struct timespec last_isr_tms, last_ack_tms; + u64 maxio_tm; + int i; + + /* Dump IO Stats */ + seq_printf(sfp, + "------------------------------------------\n" + "\t\t IO Statistics\n" + "------------------------------------------\n"); + + maxio_tm = (u64) atomic64_read(&stats->io.max_time); + seq_printf(sfp, + "Active IOs : %lld\n" + "Max Active IOs : %lld\n" + "Total IOs : %lld\n" + "IOs Completed : %lld\n" + "IOs Failed : %lld\n" + "IOs Not Found : %lld\n" + "Memory Alloc Failures : %lld\n" + "REQs Null : %lld\n" + "SCSI Cmd Pointers Null : %lld\n" + "Max SGL for any IO : %lld\n" + "Max IO Size : %lld Sectors\n" + "Max Queuing Time : %lld\n" + "Max Completion Time : %lld\n" + "Max IO Process Time(FW) : %lld (%u msec)\n", + (u64) atomic64_read(&stats->io.active), + (u64) atomic64_read(&stats->io.max_active), + (u64) atomic64_read(&stats->io.num_ios), + (u64) atomic64_read(&stats->io.compl), + (u64) atomic64_read(&stats->io.fail), + (u64) atomic64_read(&stats->io.io_not_found), + (u64) atomic64_read(&stats->io.alloc_fail), + (u64) atomic64_read(&stats->io.req_null), + (u64) atomic64_read(&stats->io.sc_null), + (u64) atomic64_read(&stats->io.max_sgl), + (u64) atomic64_read(&stats->io.max_io_sz), + (u64) atomic64_read(&stats->io.max_qtime), + (u64) atomic64_read(&stats->io.max_cmpl_time), + maxio_tm, + jiffies_to_msecs(maxio_tm)); + + seq_puts(sfp, "\nSGL Counters\n"); + + for (i = 0; i < SNIC_MAX_SG_DESC_CNT; i++) { + seq_printf(sfp, + "%10lld ", + (u64) atomic64_read(&stats->io.sgl_cnt[i])); + + if ((i + 1) % 8 == 0) + seq_puts(sfp, "\n"); + } + + /* Dump Abort Stats */ + seq_printf(sfp, + "\n-------------------------------------------\n" + "\t\t Abort Statistics\n" + "---------------------------------------------\n"); + + seq_printf(sfp, + "Aborts : %lld\n" + "Aborts Fail : %lld\n" + "Aborts Driver Timeout : %lld\n" + "Abort FW Timeout : %lld\n" + "Abort IO NOT Found : %lld\n", + (u64) atomic64_read(&stats->abts.num), + (u64) atomic64_read(&stats->abts.fail), + (u64) atomic64_read(&stats->abts.drv_tmo), + (u64) atomic64_read(&stats->abts.fw_tmo), + (u64) atomic64_read(&stats->abts.io_not_found)); + + /* Dump Reset Stats */ + seq_printf(sfp, + "\n-------------------------------------------\n" + "\t\t Reset Statistics\n" + "---------------------------------------------\n"); + + seq_printf(sfp, + "HBA Resets : %lld\n" + "HBA Reset Cmpls : %lld\n" + "HBA Reset Fail : %lld\n", + (u64) atomic64_read(&stats->reset.hba_resets), + (u64) atomic64_read(&stats->reset.hba_reset_cmpl), + (u64) atomic64_read(&stats->reset.hba_reset_fail)); + + /* Dump Firmware Stats */ + seq_printf(sfp, + "\n-------------------------------------------\n" + "\t\t Firmware Statistics\n" + "---------------------------------------------\n"); + + seq_printf(sfp, + "Active FW Requests : %lld\n" + "Max FW Requests : %lld\n" + "FW Out Of Resource Errs : %lld\n" + "FW IO Errors : %lld\n" + "FW SCSI Errors : %lld\n", + (u64) atomic64_read(&stats->fw.actv_reqs), + (u64) atomic64_read(&stats->fw.max_actv_reqs), + (u64) atomic64_read(&stats->fw.out_of_res), + (u64) atomic64_read(&stats->fw.io_errs), + (u64) atomic64_read(&stats->fw.scsi_errs)); + + + /* Dump Miscellenous Stats */ + seq_printf(sfp, + "\n---------------------------------------------\n" + "\t\t Other Statistics\n" + "\n---------------------------------------------\n"); + + jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms); + jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms); + + seq_printf(sfp, + "Last ISR Time : %llu (%8lu.%8lu)\n" + "Last Ack Time : %llu (%8lu.%8lu)\n" + "ISRs : %llu\n" + "Max CQ Entries : %lld\n" + "Data Count Mismatch : %lld\n" + "IOs w/ Timeout Status : %lld\n" + "IOs w/ Aborted Status : %lld\n" + "IOs w/ SGL Invalid Stat : %lld\n" + "WQ Desc Alloc Fail : %lld\n" + "Queue Full : %lld\n" + "Target Not Ready : %lld\n", + (u64) stats->misc.last_isr_time, + last_isr_tms.tv_sec, last_isr_tms.tv_nsec, + (u64)stats->misc.last_ack_time, + last_ack_tms.tv_sec, last_ack_tms.tv_nsec, + (u64) atomic64_read(&stats->misc.isr_cnt), + (u64) atomic64_read(&stats->misc.max_cq_ents), + (u64) atomic64_read(&stats->misc.data_cnt_mismat), + (u64) atomic64_read(&stats->misc.io_tmo), + (u64) atomic64_read(&stats->misc.io_aborted), + (u64) atomic64_read(&stats->misc.sgl_inval), + (u64) atomic64_read(&stats->misc.wq_alloc_fail), + (u64) atomic64_read(&stats->misc.qfull), + (u64) atomic64_read(&stats->misc.tgt_not_rdy)); + + return 0; +} + +/* + * snic_stats_open - Open the stats file for specific host + * + * Description: + * This routine opens a debugfs file stats of specific host + */ +static int +snic_stats_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, snic_stats_show, inode->i_private); +} + +static const struct file_operations snic_stats_fops = { + .owner = THIS_MODULE, + .open = snic_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations snic_reset_stats_fops = { + .owner = THIS_MODULE, + .open = snic_reset_stats_open, + .read = snic_reset_stats_read, + .write = snic_reset_stats_write, + .release = snic_reset_stats_release, +}; + +/* + * snic_stats_init - Initialize stats struct and create stats file + * per snic + * + * Description: + * When debugfs is cofigured this routine sets up the stats file per snic + * It will create file stats and reset_stats under statistics/host# directory + * to log per snic stats + */ +int +snic_stats_debugfs_init(struct snic *snic) +{ + int rc = -1; + char name[16]; + struct dentry *de = NULL; + + snprintf(name, sizeof(name), "host%d", snic->shost->host_no); + if (!snic_glob->stats_root) { + SNIC_DBG("snic_stats root doesn't exist\n"); + + return rc; + } + + de = debugfs_create_dir(name, snic_glob->stats_root); + if (!de) { + SNIC_DBG("Cannot create host directory\n"); + + return rc; + } + snic->stats_host = de; + + de = debugfs_create_file("stats", + S_IFREG|S_IRUGO, + snic->stats_host, + snic, + &snic_stats_fops); + if (!de) { + SNIC_DBG("Cannot create host's stats file\n"); + + return rc; + } + snic->stats_file = de; + + de = debugfs_create_file("reset_stats", + S_IFREG|S_IRUGO|S_IWUSR, + snic->stats_host, + snic, + &snic_reset_stats_fops); + + if (!de) { + SNIC_DBG("Cannot create host's reset_stats file\n"); + + return rc; + } + snic->reset_stats_file = de; + rc = 0; + + return rc; +} /* end of snic_stats_debugfs_init */ + +/* + * snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats + * + * Description: + * When Debufs is configured this routine removes debugfs file system + * elements that are specific to to snic stats + */ +void +snic_stats_debugfs_remove(struct snic *snic) +{ + debugfs_remove(snic->stats_file); + snic->stats_file = NULL; + + debugfs_remove(snic->reset_stats_file); + snic->reset_stats_file = NULL; + + debugfs_remove(snic->stats_host); + snic->stats_host = NULL; +} + +/* Trace Facility related API */ +static void * +snic_trc_seq_start(struct seq_file *sfp, loff_t *pos) +{ + return &snic_glob->trc; +} + +static void * +snic_trc_seq_next(struct seq_file *sfp, void *data, loff_t *pos) +{ + return NULL; +} + +static void +snic_trc_seq_stop(struct seq_file *sfp, void *data) +{ +} + +#define SNIC_TRC_PBLEN 256 +static int +snic_trc_seq_show(struct seq_file *sfp, void *data) +{ + char buf[SNIC_TRC_PBLEN]; + + if (snic_get_trc_data(buf, SNIC_TRC_PBLEN) > 0) + seq_printf(sfp, "%s\n", buf); + + return 0; +} + +static const struct seq_operations snic_trc_seq_ops = { + .start = snic_trc_seq_start, + .next = snic_trc_seq_next, + .stop = snic_trc_seq_stop, + .show = snic_trc_seq_show, +}; + +static int +snic_trc_open(struct inode *inode, struct file *filp) +{ + return seq_open(filp, &snic_trc_seq_ops); +} + +static const struct file_operations snic_trc_fops = { + .owner = THIS_MODULE, + .open = snic_trc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* + * snic_trc_debugfs_init : creates trace/tracing_enable files for trace + * under debugfs + */ +int +snic_trc_debugfs_init(void) +{ + struct dentry *de = NULL; + int ret = -1; + + if (!snic_glob->trc_root) { + SNIC_ERR("Debugfs root directory for snic doesn't exist.\n"); + + return ret; + } + + de = debugfs_create_bool("tracing_enable", + S_IFREG | S_IRUGO | S_IWUSR, + snic_glob->trc_root, + &snic_glob->trc.enable); + + if (!de) { + SNIC_ERR("Can't create trace_enable file.\n"); + + return ret; + } + snic_glob->trc.trc_enable = de; + + de = debugfs_create_file("trace", + S_IFREG | S_IRUGO | S_IWUSR, + snic_glob->trc_root, + NULL, + &snic_trc_fops); + + if (!de) { + SNIC_ERR("Cann't create trace file.\n"); + + return ret; + } + snic_glob->trc.trc_file = de; + ret = 0; + + return ret; +} /* end of snic_trc_debugfs_init */ + +/* + * snic_trc_debugfs_term : cleans up the files created for trace under debugfs + */ +void +snic_trc_debugfs_term(void) +{ + debugfs_remove(snic_glob->trc.trc_file); + snic_glob->trc.trc_file = NULL; + + debugfs_remove(snic_glob->trc.trc_enable); + snic_glob->trc.trc_enable = NULL; +} diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c new file mode 100644 index 000000000000..5f6321759ad9 --- /dev/null +++ b/drivers/scsi/snic/snic_disc.c @@ -0,0 +1,551 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/mempool.h> + +#include <scsi/scsi_tcq.h> + +#include "snic_disc.h" +#include "snic.h" +#include "snic_io.h" + + +/* snic target types */ +static const char * const snic_tgt_type_str[] = { + [SNIC_TGT_DAS] = "DAS", + [SNIC_TGT_SAN] = "SAN", +}; + +static inline const char * +snic_tgt_type_to_str(int typ) +{ + return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ? + snic_tgt_type_str[typ] : "Unknown"); +} + +static const char * const snic_tgt_state_str[] = { + [SNIC_TGT_STAT_INIT] = "INIT", + [SNIC_TGT_STAT_ONLINE] = "ONLINE", + [SNIC_TGT_STAT_OFFLINE] = "OFFLINE", + [SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS", +}; + +const char * +snic_tgt_state_to_str(int state) +{ + return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ? + snic_tgt_state_str[state] : "UNKNOWN"); +} + +/* + * Initiate report_tgt req desc + */ +static void +snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len, + dma_addr_t rsp_buf_pa, ulong ctx) +{ + struct snic_sg_desc *sgd = NULL; + + + snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid, + 1, ctx); + + req->u.rpt_tgts.sg_cnt = cpu_to_le16(1); + sgd = req_to_sgl(req); + sgd[0].addr = cpu_to_le64(rsp_buf_pa); + sgd[0].len = cpu_to_le32(len); + sgd[0]._resvd = 0; + req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd); +} + +/* + * snic_queue_report_tgt_req: Queues report target request. + */ +static int +snic_queue_report_tgt_req(struct snic *snic) +{ + struct snic_req_info *rqi = NULL; + u32 ntgts, buf_len = 0; + u8 *buf = NULL; + dma_addr_t pa = 0; + int ret = 0; + + rqi = snic_req_init(snic, 1); + if (!rqi) { + ret = -ENOMEM; + goto error; + } + + if (snic->fwinfo.max_tgts) + ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id); + else + ntgts = snic->shost->max_id; + + /* Allocate Response Buffer */ + SNIC_BUG_ON(ntgts == 0); + buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN; + + buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA); + if (!buf) { + snic_req_free(snic, rqi); + SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n"); + + ret = -ENOMEM; + goto error; + } + + SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0); + + pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(snic->pdev, pa)) { + kfree(buf); + snic_req_free(snic, rqi); + SNIC_HOST_ERR(snic->shost, + "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n", + buf); + ret = -EINVAL; + + goto error; + } + + + SNIC_BUG_ON(pa == 0); + rqi->sge_va = (ulong) buf; + + snic_report_tgt_init(rqi->req, + snic->config.hid, + buf, + buf_len, + pa, + (ulong)rqi); + + snic_handle_untagged_req(snic, rqi); + + ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); + if (ret) { + pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE); + kfree(buf); + rqi->sge_va = 0; + snic_release_untagged_req(snic, rqi); + SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n"); + + goto error; + } + + SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n"); + + return ret; + +error: + SNIC_HOST_ERR(snic->shost, + "Queuing Report Targets Failed, err = %d\n", + ret); + return ret; +} /* end of snic_queue_report_tgt_req */ + +/* call into SML */ +static void +snic_scsi_scan_tgt(struct work_struct *work) +{ + struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work); + struct Scsi_Host *shost = dev_to_shost(&tgt->dev); + unsigned long flags; + + SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id); + scsi_scan_target(&tgt->dev, + tgt->channel, + tgt->scsi_tgt_id, + SCAN_WILD_CARD, + 1); + + spin_lock_irqsave(shost->host_lock, flags); + tgt->flags &= ~SNIC_TGT_SCAN_PENDING; + spin_unlock_irqrestore(shost->host_lock, flags); +} /* end of snic_scsi_scan_tgt */ + +/* + * snic_tgt_lookup : + */ +static struct snic_tgt * +snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid) +{ + struct list_head *cur, *nxt; + struct snic_tgt *tgt = NULL; + + list_for_each_safe(cur, nxt, &snic->disc.tgt_list) { + tgt = list_entry(cur, struct snic_tgt, list); + if (tgt->id == le32_to_cpu(tgtid->tgt_id)) + return tgt; + tgt = NULL; + } + + return tgt; +} /* end of snic_tgt_lookup */ + +/* + * snic_tgt_dev_release : Called on dropping last ref for snic_tgt object + */ +void +snic_tgt_dev_release(struct device *dev) +{ + struct snic_tgt *tgt = dev_to_tgt(dev); + + SNIC_HOST_INFO(snic_tgt_to_shost(tgt), + "Target Device ID %d (%s) Permanently Deleted.\n", + tgt->id, + dev_name(dev)); + + SNIC_BUG_ON(!list_empty(&tgt->list)); + kfree(tgt); +} + +/* + * snic_tgt_del : work function to delete snic_tgt + */ +static void +snic_tgt_del(struct work_struct *work) +{ + struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work); + struct Scsi_Host *shost = snic_tgt_to_shost(tgt); + + if (tgt->flags & SNIC_TGT_SCAN_PENDING) + scsi_flush_work(shost); + + /* Block IOs on child devices, stops new IOs */ + scsi_target_block(&tgt->dev); + + /* Cleanup IOs */ + snic_tgt_scsi_abort_io(tgt); + + /* Unblock IOs now, to flush if there are any. */ + scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE); + + /* Delete SCSI Target and sdevs */ + scsi_remove_target(&tgt->dev); /* ?? */ + device_del(&tgt->dev); + put_device(&tgt->dev); +} /* end of snic_tgt_del */ + +/* snic_tgt_create: checks for existence of snic_tgt, if it doesn't + * it creates one. + */ +static struct snic_tgt * +snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) +{ + struct snic_tgt *tgt = NULL; + unsigned long flags; + int ret; + + tgt = snic_tgt_lookup(snic, tgtid); + if (tgt) { + /* update the information if required */ + return tgt; + } + + tgt = kzalloc(sizeof(*tgt), GFP_KERNEL); + if (!tgt) { + SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n"); + ret = -ENOMEM; + + return tgt; + } + + INIT_LIST_HEAD(&tgt->list); + tgt->id = le32_to_cpu(tgtid->tgt_id); + tgt->channel = 0; + + SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN); + tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type); + + /* + * Plugging into SML Device Tree + */ + tgt->tdata.disc_id = 0; + tgt->state = SNIC_TGT_STAT_INIT; + device_initialize(&tgt->dev); + tgt->dev.parent = get_device(&snic->shost->shost_gendev); + tgt->dev.release = snic_tgt_dev_release; + INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt); + INIT_WORK(&tgt->del_work, snic_tgt_del); + switch (tgt->tdata.typ) { + case SNIC_TGT_DAS: + dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d", + snic->shost->host_no, tgt->channel, tgt->id); + break; + + case SNIC_TGT_SAN: + dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d", + snic->shost->host_no, tgt->channel, tgt->id); + break; + + default: + SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n"); + dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d", + snic->shost->host_no, tgt->channel, tgt->id); + break; + } + + spin_lock_irqsave(snic->shost->host_lock, flags); + list_add_tail(&tgt->list, &snic->disc.tgt_list); + tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++; + tgt->state = SNIC_TGT_STAT_ONLINE; + spin_unlock_irqrestore(snic->shost->host_lock, flags); + + SNIC_HOST_INFO(snic->shost, + "Tgt %d, type = %s detected. Adding..\n", + tgt->id, snic_tgt_type_to_str(tgt->tdata.typ)); + + ret = device_add(&tgt->dev); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "Snic Tgt: device_add, with err = %d\n", + ret); + + put_device(&snic->shost->shost_gendev); + kfree(tgt); + tgt = NULL; + + return tgt; + } + + SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev)); + + scsi_queue_work(snic->shost, &tgt->scan_work); + + return tgt; +} /* end of snic_tgt_create */ + +/* Handler for discovery */ +void +snic_handle_tgt_disc(struct work_struct *work) +{ + struct snic *snic = container_of(work, struct snic, tgt_work); + struct snic_tgt_id *tgtid = NULL; + struct snic_tgt *tgt = NULL; + unsigned long flags; + int i; + + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic->in_remove) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + kfree(snic->disc.rtgt_info); + + return; + } + spin_unlock_irqrestore(&snic->snic_lock, flags); + + mutex_lock(&snic->disc.mutex); + /* Discover triggered during disc in progress */ + if (snic->disc.req_cnt) { + snic->disc.state = SNIC_DISC_DONE; + snic->disc.req_cnt = 0; + mutex_unlock(&snic->disc.mutex); + kfree(snic->disc.rtgt_info); + snic->disc.rtgt_info = NULL; + + SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n"); + /* Start Discovery Again */ + snic_disc_start(snic); + + return; + } + + tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info; + + SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL); + + for (i = 0; i < snic->disc.rtgt_cnt; i++) { + tgt = snic_tgt_create(snic, &tgtid[i]); + if (!tgt) { + int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid); + + SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n"); + snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz); + break; + } + } + + snic->disc.rtgt_info = NULL; + snic->disc.state = SNIC_DISC_DONE; + mutex_unlock(&snic->disc.mutex); + + SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n"); + + kfree(tgtid); +} /* end of snic_handle_tgt_disc */ + + +int +snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + + u8 typ, cmpl_stat; + u32 cmnd_id, hid, tgt_cnt = 0; + ulong ctx; + struct snic_req_info *rqi = NULL; + struct snic_tgt_id *tgtid; + int i, ret = 0; + + snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx); + rqi = (struct snic_req_info *) ctx; + tgtid = (struct snic_tgt_id *) rqi->sge_va; + + tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt); + if (tgt_cnt == 0) { + SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n"); + ret = 1; + + goto end; + } + + /* printing list of targets here */ + SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt); + + SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts); + + for (i = 0; i < tgt_cnt; i++) + SNIC_HOST_INFO(snic->shost, + "Tgt id = 0x%x\n", + le32_to_cpu(tgtid[i].tgt_id)); + + /* + * Queue work for further processing, + * Response Buffer Memory is freed after creating targets + */ + snic->disc.rtgt_cnt = tgt_cnt; + snic->disc.rtgt_info = (u8 *) tgtid; + queue_work(snic_glob->event_q, &snic->tgt_work); + ret = 0; + +end: + /* Unmap Response Buffer */ + snic_pci_unmap_rsp_buf(snic, rqi); + if (ret) + kfree(tgtid); + + rqi->sge_va = 0; + snic_release_untagged_req(snic, rqi); + + return ret; +} /* end of snic_report_tgt_cmpl_handler */ + +/* Discovery init fn */ +void +snic_disc_init(struct snic_disc *disc) +{ + INIT_LIST_HEAD(&disc->tgt_list); + mutex_init(&disc->mutex); + disc->disc_id = 0; + disc->nxt_tgt_id = 0; + disc->state = SNIC_DISC_INIT; + disc->req_cnt = 0; + disc->rtgt_cnt = 0; + disc->rtgt_info = NULL; + disc->cb = NULL; +} /* end of snic_disc_init */ + +/* Discovery, uninit fn */ +void +snic_disc_term(struct snic *snic) +{ + struct snic_disc *disc = &snic->disc; + + mutex_lock(&disc->mutex); + if (disc->req_cnt) { + disc->req_cnt = 0; + SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n"); + } + mutex_unlock(&disc->mutex); +} + +/* + * snic_disc_start: Discovery Start ... + */ +int +snic_disc_start(struct snic *snic) +{ + struct snic_disc *disc = &snic->disc; + int ret = 0; + + SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n"); + + mutex_lock(&disc->mutex); + if (disc->state == SNIC_DISC_PENDING) { + disc->req_cnt++; + mutex_unlock(&disc->mutex); + + return ret; + } + disc->state = SNIC_DISC_PENDING; + mutex_unlock(&disc->mutex); + + ret = snic_queue_report_tgt_req(snic); + if (ret) + SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret); + + return ret; +} /* end of snic_disc_start */ + +/* + * snic_disc_work : + */ +void +snic_handle_disc(struct work_struct *work) +{ + struct snic *snic = container_of(work, struct snic, disc_work); + int ret = 0; + + SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n"); + + ret = snic_disc_start(snic); + if (ret) + goto disc_err; + +disc_err: + SNIC_HOST_ERR(snic->shost, + "disc_work: Discovery Failed w/ err = %d\n", + ret); +} /* end of snic_disc_work */ + +/* + * snic_tgt_del_all : cleanup all snic targets + * Called on unbinding the interface + */ +void +snic_tgt_del_all(struct snic *snic) +{ + struct snic_tgt *tgt = NULL; + struct list_head *cur, *nxt; + unsigned long flags; + + mutex_lock(&snic->disc.mutex); + spin_lock_irqsave(snic->shost->host_lock, flags); + + list_for_each_safe(cur, nxt, &snic->disc.tgt_list) { + tgt = list_entry(cur, struct snic_tgt, list); + tgt->state = SNIC_TGT_STAT_DEL; + list_del_init(&tgt->list); + SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id); + queue_work(snic_glob->event_q, &tgt->del_work); + tgt = NULL; + } + spin_unlock_irqrestore(snic->shost->host_lock, flags); + + scsi_flush_work(snic->shost); + mutex_unlock(&snic->disc.mutex); +} /* end of snic_tgt_del_all */ diff --git a/drivers/scsi/snic/snic_disc.h b/drivers/scsi/snic/snic_disc.h new file mode 100644 index 000000000000..97fa3f5c5bb4 --- /dev/null +++ b/drivers/scsi/snic/snic_disc.h @@ -0,0 +1,124 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __SNIC_DISC_H +#define __SNIC_DISC_H + +#include "snic_fwint.h" + +enum snic_disc_state { + SNIC_DISC_NONE, + SNIC_DISC_INIT, + SNIC_DISC_PENDING, + SNIC_DISC_DONE +}; + +struct snic; +struct snic_disc { + struct list_head tgt_list; + enum snic_disc_state state; + struct mutex mutex; + u16 disc_id; + u8 req_cnt; + u32 nxt_tgt_id; + u32 rtgt_cnt; + u8 *rtgt_info; + struct delayed_work disc_timeout; + void (*cb)(struct snic *); +}; + +#define SNIC_TGT_NAM_LEN 16 + +enum snic_tgt_state { + SNIC_TGT_STAT_NONE, + SNIC_TGT_STAT_INIT, + SNIC_TGT_STAT_ONLINE, /* Target is Online */ + SNIC_TGT_STAT_OFFLINE, /* Target is Offline */ + SNIC_TGT_STAT_DEL, +}; + +struct snic_tgt_priv { + struct list_head list; + enum snic_tgt_type typ; + u16 disc_id; + char *name[SNIC_TGT_NAM_LEN]; + + union { + /*DAS Target specific info */ + /*SAN Target specific info */ + u8 dummmy; + } u; +}; + +/* snic tgt flags */ +#define SNIC_TGT_SCAN_PENDING 0x01 + +struct snic_tgt { + struct list_head list; + u16 id; + u16 channel; + u32 flags; + u32 scsi_tgt_id; + enum snic_tgt_state state; + struct device dev; + struct work_struct scan_work; + struct work_struct del_work; + struct snic_tgt_priv tdata; +}; + + +struct snic_fw_req; + +void snic_disc_init(struct snic_disc *); +int snic_disc_start(struct snic *); +void snic_disc_term(struct snic *); +int snic_report_tgt_cmpl_handler(struct snic *, struct snic_fw_req *); +int snic_tgtinfo_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq); +void snic_process_report_tgts_rsp(struct work_struct *); +void snic_handle_tgt_disc(struct work_struct *); +void snic_handle_disc(struct work_struct *); +void snic_tgt_dev_release(struct device *); +void snic_tgt_del_all(struct snic *); + +#define dev_to_tgt(d) \ + container_of(d, struct snic_tgt, dev) + +static inline int +is_snic_target(struct device *dev) +{ + return dev->release == snic_tgt_dev_release; +} + +#define starget_to_tgt(st) \ + (is_snic_target(((struct scsi_target *) st)->dev.parent) ? \ + dev_to_tgt(st->dev.parent) : NULL) + +#define snic_tgt_to_shost(t) \ + dev_to_shost(t->dev.parent) + +static inline int +snic_tgt_chkready(struct snic_tgt *tgt) +{ + if (tgt->state == SNIC_TGT_STAT_ONLINE) + return 0; + else + return DID_NO_CONNECT << 16; +} + +const char *snic_tgt_state_to_str(int); +int snic_tgt_scsi_abort_io(struct snic_tgt *); +#endif /* end of __SNIC_DISC_H */ diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h new file mode 100644 index 000000000000..2cfaf2dc915f --- /dev/null +++ b/drivers/scsi/snic/snic_fwint.h @@ -0,0 +1,525 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __SNIC_FWINT_H +#define __SNIC_FWINT_H + +#define SNIC_CDB_LEN 32 /* SCSI CDB size 32, can be used for 16 bytes */ +#define LUN_ADDR_LEN 8 + +/* + * Command entry type + */ +enum snic_io_type { + /* + * Initiator request types + */ + SNIC_REQ_REPORT_TGTS = 0x2, /* Report Targets */ + SNIC_REQ_ICMND, /* Initiator command for SCSI IO */ + SNIC_REQ_ITMF, /* Initiator command for Task Mgmt */ + SNIC_REQ_HBA_RESET, /* SNIC Reset */ + SNIC_REQ_EXCH_VER, /* Exchange Version Information */ + SNIC_REQ_TGT_INFO, /* Backend/Target Information */ + SNIC_REQ_BOOT_LUNS, + + /* + * Response type + */ + SNIC_RSP_REPORT_TGTS_CMPL = 0x12,/* Report Targets Completion */ + SNIC_RSP_ICMND_CMPL, /* SCSI IO Completion */ + SNIC_RSP_ITMF_CMPL, /* Task Management Completion */ + SNIC_RSP_HBA_RESET_CMPL, /* SNIC Reset Completion */ + SNIC_RSP_EXCH_VER_CMPL, /* Exchange Version Completion*/ + SNIC_RSP_BOOT_LUNS_CMPL, + + /* + * Misc Request types + */ + SNIC_MSG_ACK = 0x80, /* Ack: snic_notify_msg */ + SNIC_MSG_ASYNC_EVNOTIFY, /* Asynchronous Event Notification */ +}; /* end of enum snic_io_type */ + + +/* + * Header status codes from firmware + */ +enum snic_io_status { + SNIC_STAT_IO_SUCCESS = 0, /* request was successful */ + + /* + * If a request to the fw is rejected, the original request header + * will be returned with the status set to one of the following: + */ + SNIC_STAT_INVALID_HDR, /* header contains invalid data */ + SNIC_STAT_OUT_OF_RES, /* out of resources to complete request */ + SNIC_STAT_INVALID_PARM, /* some parameter in request is not valid */ + SNIC_STAT_REQ_NOT_SUP, /* req type is not supported */ + SNIC_STAT_IO_NOT_FOUND, /* requested IO was not found */ + + /* + * Once a request is processed, the fw will usually return + * a cmpl message type. In cases where errors occurred, + * the header status would be filled in with one of the following: + */ + SNIC_STAT_ABORTED, /* req was aborted */ + SNIC_STAT_TIMEOUT, /* req was timed out */ + SNIC_STAT_SGL_INVALID, /* req was aborted due to sgl error */ + SNIC_STAT_DATA_CNT_MISMATCH, /*recv/sent more/less data than expec */ + SNIC_STAT_FW_ERR, /* req was terminated due to fw error */ + SNIC_STAT_ITMF_REJECT, /* itmf req was rejected by target */ + SNIC_STAT_ITMF_FAIL, /* itmf req was failed */ + SNIC_STAT_ITMF_INCORRECT_LUN, /* itmf req has incorrect LUN id*/ + SNIC_STAT_CMND_REJECT, /* req was invalid and rejected */ + SNIC_STAT_DEV_OFFLINE, /* req sent to offline device */ + SNIC_STAT_NO_BOOTLUN, + SNIC_STAT_SCSI_ERR, /* SCSI error returned by Target. */ + SNIC_STAT_NOT_READY, /* sNIC Subsystem is not ready */ + SNIC_STAT_FATAL_ERROR, /* sNIC is in unrecoverable state */ +}; /* end of enum snic_io_status */ + +/* + * snic_io_hdr : host <--> firmare + * + * for any other message that will be queued to firmware should + * have the following request header + */ +struct snic_io_hdr { + __le32 hid; + __le32 cmnd_id; /* tag here */ + ulong init_ctx; /* initiator context */ + u8 type; /* request/response type */ + u8 status; /* header status entry */ + u8 protocol; /* Protocol specific, may needed for RoCE*/ + u8 flags; + __le16 sg_cnt; + u16 resvd; +}; + +/* auxillary funciton for encoding the snic_io_hdr */ +static inline void +snic_io_hdr_enc(struct snic_io_hdr *hdr, u8 typ, u8 status, u32 id, u32 hid, + u16 sg_cnt, ulong ctx) +{ + hdr->type = typ; + hdr->status = status; + hdr->protocol = 0; + hdr->hid = cpu_to_le32(hid); + hdr->cmnd_id = cpu_to_le32(id); + hdr->sg_cnt = cpu_to_le16(sg_cnt); + hdr->init_ctx = ctx; + hdr->flags = 0; +} + +/* auxillary funciton for decoding the snic_io_hdr */ +static inline void +snic_io_hdr_dec(struct snic_io_hdr *hdr, u8 *typ, u8 *stat, u32 *cmnd_id, + u32 *hid, ulong *ctx) +{ + *typ = hdr->type; + *stat = hdr->status; + *hid = le32_to_cpu(hdr->hid); + *cmnd_id = le32_to_cpu(hdr->cmnd_id); + *ctx = hdr->init_ctx; +} + +/* + * snic_host_info: host -> firmware + * + * Used for sending host information to firmware, and request fw version + */ +struct snic_exch_ver_req { + __le32 drvr_ver; /* for debugging, when fw dump captured */ + __le32 os_type; /* for OS specific features */ +}; + +/* + * os_type flags + * Bit 0-7 : OS information + * Bit 8-31: Feature/Capability Information + */ +#define SNIC_OS_LINUX 0x1 +#define SNIC_OS_WIN 0x2 +#define SNIC_OS_ESX 0x3 + +/* + * HBA Capabilities + * Bit 1: Reserved. + * Bit 2: Dynamic Discovery of LUNs. + * Bit 3: Async event notifications on on tgt online/offline events. + * Bit 4: IO timeout support in FW. + * Bit 5-31: Reserved. + */ +#define SNIC_HBA_CAP_DDL 0x02 /* Supports Dynamic Discovery of LUNs */ +#define SNIC_HBA_CAP_AEN 0x04 /* Supports Async Event Noitifcation */ +#define SNIC_HBA_CAP_TMO 0x08 /* Supports IO timeout in FW */ + +/* + * snic_exch_ver_rsp : firmware -> host + * + * Used by firmware to send response to version request + */ +struct snic_exch_ver_rsp { + __le32 version; + __le32 hid; + __le32 max_concur_ios; /* max concurrent ios */ + __le32 max_sgs_per_cmd; /* max sgls per IO */ + __le32 max_io_sz; /* max io size supported */ + __le32 hba_cap; /* hba capabilities */ + __le32 max_tgts; /* max tgts supported */ + __le16 io_timeout; /* FW extended timeout */ + u16 rsvd; +}; + + +/* + * snic_report_tgts : host -> firmware request + * + * Used by the host to request list of targets + */ +struct snic_report_tgts { + __le16 sg_cnt; + __le16 flags; /* specific flags from fw */ + u8 _resvd[4]; + __le64 sg_addr; /* Points to SGL */ + __le64 sense_addr; +}; + +enum snic_type { + SNIC_NONE = 0x0, + SNIC_DAS, + SNIC_SAN, +}; + + +/* Report Target Response */ +enum snic_tgt_type { + SNIC_TGT_NONE = 0x0, + SNIC_TGT_DAS, /* DAS Target */ + SNIC_TGT_SAN, /* SAN Target */ +}; + +/* target id format */ +struct snic_tgt_id { + __le32 tgt_id; /* target id */ + __le16 tgt_type; /* tgt type */ + __le16 vnic_id; /* corresponding vnic id */ +}; + +/* + * snic_report_tgts_cmpl : firmware -> host response + * + * Used by firmware to send response to Report Targets request + */ +struct snic_report_tgts_cmpl { + __le32 tgt_cnt; /* Number of Targets accessible */ + u32 _resvd; +}; + +/* + * Command flags + * + * Bit 0: Read flags + * Bit 1: Write flag + * Bit 2: ESGL - sg/esg array contains extended sg + * ESGE - is a host buffer contains sg elements + * Bit 3-4: Task Attributes + * 00b - simple + * 01b - head of queue + * 10b - ordered + * Bit 5-7: Priority - future use + * Bit 8-15: Reserved + */ + +#define SNIC_ICMND_WR 0x01 /* write command */ +#define SNIC_ICMND_RD 0x02 /* read command */ +#define SNIC_ICMND_ESGL 0x04 /* SGE/ESGE array contains valid data*/ + +/* + * Priority/Task Attribute settings + */ +#define SNIC_ICMND_TSK_SHIFT 2 /* task attr starts at bit 2 */ +#define SNIC_ICMND_TSK_MASK(x) ((x>>SNIC_ICMND_TSK_SHIFT) & ~(0xffff)) +#define SNIC_ICMND_TSK_SIMPLE 0 /* simple task attr */ +#define SNIC_ICMND_TSK_HEAD_OF_QUEUE 1 /* head of qeuue task attr */ +#define SNIC_ICMND_TSK_ORDERED 2 /* ordered task attr */ + +#define SNIC_ICMND_PRI_SHIFT 5 /* prio val starts at bit 5 */ + +/* + * snic_icmnd : host-> firmware request + * + * used for sending out an initiator SCSI 16/32-byte command + */ +struct snic_icmnd { + __le16 sg_cnt; /* Number of SG Elements */ + __le16 flags; /* flags */ + __le32 sense_len; /* Sense buffer length */ + __le64 tgt_id; /* Destination Target ID */ + __le64 lun_id; /* Destination LUN ID */ + u8 cdb_len; + u8 _resvd; + __le16 time_out; /* ms time for Res allocations fw to handle io*/ + __le32 data_len; /* Total number of bytes to be transferred */ + u8 cdb[SNIC_CDB_LEN]; + __le64 sg_addr; /* Points to SG List */ + __le64 sense_addr; /* Sense buffer address */ +}; + + +/* Response flags */ +/* Bit 0: Under run + * Bit 1: Over Run + * Bit 2-7: Reserved + */ +#define SNIC_ICMND_CMPL_UNDR_RUN 0x01 /* resid under and valid */ +#define SNIC_ICMND_CMPL_OVER_RUN 0x02 /* resid over and valid */ + +/* + * snic_icmnd_cmpl: firmware -> host response + * + * Used for sending the host a response to an icmnd (initiator command) + */ +struct snic_icmnd_cmpl { + u8 scsi_status; /* value as per SAM */ + u8 flags; + __le16 sense_len; /* Sense Length */ + __le32 resid; /* Residue : # bytes under or over run */ +}; + +/* + * snic_itmf: host->firmware request + * + * used for requesting the firmware to abort a request and/or send out + * a task management function + * + * the req_id field is valid in case of abort task and clear task + */ +struct snic_itmf { + u8 tm_type; /* SCSI Task Management request */ + u8 resvd; + __le16 flags; /* flags */ + __le32 req_id; /* Command id of snic req to be aborted */ + __le64 tgt_id; /* Target ID */ + __le64 lun_id; /* Destination LUN ID */ + __le16 timeout; /* in sec */ +}; + +/* + * Task Management Request + */ +enum snic_itmf_tm_type { + SNIC_ITMF_ABTS_TASK = 0x01, /* Abort Task */ + SNIC_ITMF_ABTS_TASK_SET, /* Abort Task Set */ + SNIC_ITMF_CLR_TASK, /* Clear Task */ + SNIC_ITMF_CLR_TASKSET, /* Clear Task Set */ + SNIC_ITMF_LUN_RESET, /* Lun Reset */ + SNIC_ITMF_ABTS_TASK_TERM, /* Supported for SAN Targets */ +}; + +/* + * snic_itmf_cmpl: firmware -> host resposne + * + * used for sending the host a response for a itmf request + */ +struct snic_itmf_cmpl { + __le32 nterminated; /* # IOs terminated as a result of tmf */ + u8 flags; /* flags */ + u8 _resvd[3]; +}; + +/* + * itmfl_cmpl flags + * Bit 0 : 1 - Num terminated field valid + * Bit 1 - 7 : Reserved + */ +#define SNIC_NUM_TERM_VALID 0x01 /* Number of IOs terminated */ + +/* + * snic_hba_reset: host -> firmware request + * + * used for requesting firmware to reset snic + */ +struct snic_hba_reset { + __le16 flags; /* flags */ + u8 _resvd[6]; +}; + +/* + * snic_hba_reset_cmpl: firmware -> host response + * + * Used by firmware to respond to the host's hba reset request + */ +struct snic_hba_reset_cmpl { + u8 flags; /* flags : more info needs to be added*/ + u8 _resvd[7]; +}; + +/* + * snic_notify_msg: firmware -> host response + * + * Used by firmware to notify host of the last work queue entry received + */ +struct snic_notify_msg { + __le32 wqe_num; /* wq entry number */ + u8 flags; /* flags, macros */ + u8 _resvd[4]; +}; + + +#define SNIC_EVDATA_LEN 24 /* in bytes */ +/* snic_async_evnotify: firmware -> host notification + * + * Used by firmware to notify the host about configuration/state changes + */ +struct snic_async_evnotify { + u8 FLS_EVENT_DESC; + u8 vnic; /* vnic id */ + u8 _resvd[2]; + __le32 ev_id; /* Event ID */ + u8 ev_data[SNIC_EVDATA_LEN]; /* Event Data */ + u8 _resvd2[4]; +}; + +/* async event flags */ +enum snic_ev_type { + SNIC_EV_TGT_OFFLINE = 0x01, /* Target Offline, PL contains TGT ID */ + SNIC_EV_TGT_ONLINE, /* Target Online, PL contains TGT ID */ + SNIC_EV_LUN_OFFLINE, /* LUN Offline, PL contains LUN ID */ + SNIC_EV_LUN_ONLINE, /* LUN Online, PL contains LUN ID */ + SNIC_EV_CONF_CHG, /* Dev Config/Attr Change Event */ + SNIC_EV_TGT_ADDED, /* Target Added */ + SNIC_EV_TGT_DELTD, /* Target Del'd, PL contains TGT ID */ + SNIC_EV_LUN_ADDED, /* LUN Added */ + SNIC_EV_LUN_DELTD, /* LUN Del'd, PL cont. TGT & LUN ID */ + + SNIC_EV_DISC_CMPL = 0x10, /* Discovery Completed Event */ +}; + + +#define SNIC_HOST_REQ_LEN 128 /*Exp length of host req, wq desc sz*/ +/* Payload 88 bytes = 128 - 24 - 16 */ +#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \ + sizeof(struct snic_io_hdr) - \ + (2 * sizeof(u64)))) + +/* + * snic_host_req: host -> firmware request + * + * Basic structure for all snic requests that are sent from the host to + * firmware. They are 128 bytes in size. + */ +struct snic_host_req { + u64 ctrl_data[2]; /*16 bytes - Control Data */ + struct snic_io_hdr hdr; + union { + /* + * Entry specific space, last byte contains color + */ + u8 buf[SNIC_HOST_REQ_PAYLOAD]; + + /* + * Exchange firmware version + */ + struct snic_exch_ver_req exch_ver; + + /* report targets */ + struct snic_report_tgts rpt_tgts; + + /* io request */ + struct snic_icmnd icmnd; + + /* task management request */ + struct snic_itmf itmf; + + /* hba reset */ + struct snic_hba_reset reset; + } u; +}; /* end of snic_host_req structure */ + + +#define SNIC_FW_REQ_LEN 64 /* Expected length of fw req */ +struct snic_fw_req { + struct snic_io_hdr hdr; + union { + /* + * Entry specific space, last byte contains color + */ + u8 buf[SNIC_FW_REQ_LEN - sizeof(struct snic_io_hdr)]; + + /* Exchange Version Response */ + struct snic_exch_ver_rsp exch_ver_cmpl; + + /* Report Targets Response */ + struct snic_report_tgts_cmpl rpt_tgts_cmpl; + + /* scsi response */ + struct snic_icmnd_cmpl icmnd_cmpl; + + /* task management response */ + struct snic_itmf_cmpl itmf_cmpl; + + /* hba reset response */ + struct snic_hba_reset_cmpl reset_cmpl; + + /* notify message */ + struct snic_notify_msg ack; + + /* async notification event */ + struct snic_async_evnotify async_ev; + + } u; +}; /* end of snic_fw_req structure */ + +/* + * Auxillary macro to verify specific snic req/cmpl structures + * to ensure that it will be aligned to 64 bit, and not using + * color bit field + */ +#define VERIFY_REQ_SZ(x) +#define VERIFY_CMPL_SZ(x) + +/* + * Access routines to encode and decode the color bit, which is the most + * significant bit of the structure. + */ +static inline void +snic_color_enc(struct snic_fw_req *req, u8 color) +{ + u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1; + + if (color) + *c |= 0x80; + else + *c &= ~0x80; +} + +static inline void +snic_color_dec(struct snic_fw_req *req, u8 *color) +{ + u8 *c = ((u8 *) req) + sizeof(struct snic_fw_req) - 1; + + *color = *c >> 7; + + /* Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); +} +#endif /* end of __SNIC_FWINT_H */ diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c new file mode 100644 index 000000000000..993db7de4e4b --- /dev/null +++ b/drivers/scsi/snic/snic_io.c @@ -0,0 +1,518 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/slab.h> + +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/mempool.h> +#include <scsi/scsi_tcq.h> + +#include "snic_io.h" +#include "snic.h" +#include "cq_enet_desc.h" +#include "snic_fwint.h" + +static void +snic_wq_cmpl_frame_send(struct vnic_wq *wq, + struct cq_desc *cq_desc, + struct vnic_wq_buf *buf, + void *opaque) +{ + struct snic *snic = svnic_dev_priv(wq->vdev); + + SNIC_BUG_ON(buf->os_buf == NULL); + + if (snic_log_level & SNIC_DESC_LOGGING) + SNIC_HOST_INFO(snic->shost, + "Ack received for snic_host_req %p.\n", + buf->os_buf); + + SNIC_TRC(snic->shost->host_no, 0, 0, + ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, + 0); + pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); + buf->os_buf = NULL; +} + +static int +snic_wq_cmpl_handler_cont(struct vnic_dev *vdev, + struct cq_desc *cq_desc, + u8 type, + u16 q_num, + u16 cmpl_idx, + void *opaque) +{ + struct snic *snic = svnic_dev_priv(vdev); + unsigned long flags; + + SNIC_BUG_ON(q_num != 0); + + spin_lock_irqsave(&snic->wq_lock[q_num], flags); + svnic_wq_service(&snic->wq[q_num], + cq_desc, + cmpl_idx, + snic_wq_cmpl_frame_send, + NULL); + spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); + + return 0; +} /* end of snic_cmpl_handler_cont */ + +int +snic_wq_cmpl_handler(struct snic *snic, int work_to_do) +{ + unsigned int work_done = 0; + unsigned int i; + + snic->s_stats.misc.last_ack_time = jiffies; + for (i = 0; i < snic->wq_count; i++) { + work_done += svnic_cq_service(&snic->cq[i], + work_to_do, + snic_wq_cmpl_handler_cont, + NULL); + } + + return work_done; +} /* end of snic_wq_cmpl_handler */ + +void +snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + + struct snic_host_req *req = buf->os_buf; + struct snic *snic = svnic_dev_priv(wq->vdev); + struct snic_req_info *rqi = NULL; + unsigned long flags; + + pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); + + rqi = req_to_rqi(req); + spin_lock_irqsave(&snic->spl_cmd_lock, flags); + if (list_empty(&rqi->list)) { + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); + goto end; + } + + SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */ + list_del_init(&rqi->list); + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); + + if (rqi->sge_va) { + snic_pci_unmap_rsp_buf(snic, rqi); + kfree((void *)rqi->sge_va); + rqi->sge_va = 0; + } + snic_req_free(snic, rqi); + SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n"); + +end: + return; +} + +/* Criteria to select work queue in multi queue mode */ +static int +snic_select_wq(struct snic *snic) +{ + /* No multi queue support for now */ + BUILD_BUG_ON(SNIC_WQ_MAX > 1); + + return 0; +} + +int +snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) +{ + dma_addr_t pa = 0; + unsigned long flags; + struct snic_fw_stats *fwstats = &snic->s_stats.fw; + long act_reqs; + int q_num = 0; + + snic_print_desc(__func__, os_buf, len); + + /* Map request buffer */ + pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(snic->pdev, pa)) { + SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n"); + + return -ENOMEM; + } + + q_num = snic_select_wq(snic); + + spin_lock_irqsave(&snic->wq_lock[q_num], flags); + if (!svnic_wq_desc_avail(snic->wq)) { + pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE); + spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); + atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); + SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); + + return -ENOMEM; + } + + snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); + spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); + + /* Update stats */ + act_reqs = atomic64_inc_return(&fwstats->actv_reqs); + if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) + atomic64_set(&fwstats->max_actv_reqs, act_reqs); + + return 0; +} /* end of snic_queue_wq_desc() */ + +/* + * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list. + * Purpose : Used during driver unload to clean up the requests. + */ +void +snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi) +{ + unsigned long flags; + + INIT_LIST_HEAD(&rqi->list); + + spin_lock_irqsave(&snic->spl_cmd_lock, flags); + list_add_tail(&rqi->list, &snic->spl_cmd_list); + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); +} + +/* + * snic_req_init: + * Allocates snic_req_info + snic_host_req + sgl data, and initializes. + */ +struct snic_req_info * +snic_req_init(struct snic *snic, int sg_cnt) +{ + u8 typ; + struct snic_req_info *rqi = NULL; + + typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ? + SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL; + + rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC); + if (!rqi) { + atomic64_inc(&snic->s_stats.io.alloc_fail); + SNIC_HOST_ERR(snic->shost, + "Failed to allocate memory from snic req pool id = %d\n", + typ); + return rqi; + } + + memset(rqi, 0, sizeof(*rqi)); + rqi->rq_pool_type = typ; + rqi->start_time = jiffies; + rqi->req = (struct snic_host_req *) (rqi + 1); + rqi->req_len = sizeof(struct snic_host_req); + rqi->snic = snic; + + rqi->req = (struct snic_host_req *)(rqi + 1); + + if (sg_cnt == 0) + goto end; + + rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc)); + + if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl)) + atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt); + + SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT); + atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]); + +end: + memset(rqi->req, 0, rqi->req_len); + + /* pre initialization of init_ctx to support req_to_rqi */ + rqi->req->hdr.init_ctx = (ulong) rqi; + + SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi); + + return rqi; +} /* end of snic_req_init */ + +/* + * snic_abort_req_init : Inits abort request. + */ +struct snic_host_req * +snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi) +{ + struct snic_host_req *req = NULL; + + SNIC_BUG_ON(!rqi); + + /* If abort to be issued second time, then reuse */ + if (rqi->abort_req) + return rqi->abort_req; + + + req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); + if (!req) { + SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n"); + WARN_ON_ONCE(1); + + return NULL; + } + + rqi->abort_req = req; + memset(req, 0, sizeof(struct snic_host_req)); + /* pre initialization of init_ctx to support req_to_rqi */ + req->hdr.init_ctx = (ulong) rqi; + + return req; +} /* end of snic_abort_req_init */ + +/* + * snic_dr_req_init : Inits device reset req + */ +struct snic_host_req * +snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi) +{ + struct snic_host_req *req = NULL; + + SNIC_BUG_ON(!rqi); + + req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); + if (!req) { + SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n"); + WARN_ON_ONCE(1); + + return NULL; + } + + SNIC_BUG_ON(rqi->dr_req != NULL); + rqi->dr_req = req; + memset(req, 0, sizeof(struct snic_host_req)); + /* pre initialization of init_ctx to support req_to_rqi */ + req->hdr.init_ctx = (ulong) rqi; + + return req; +} /* end of snic_dr_req_init */ + +/* frees snic_req_info and snic_host_req */ +void +snic_req_free(struct snic *snic, struct snic_req_info *rqi) +{ + SNIC_BUG_ON(rqi->req == rqi->abort_req); + SNIC_BUG_ON(rqi->req == rqi->dr_req); + SNIC_BUG_ON(rqi->sge_va != 0); + + SNIC_SCSI_DBG(snic->shost, + "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", + rqi, rqi->req, rqi->abort_req, rqi->dr_req); + + if (rqi->abort_req) + mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); + + if (rqi->dr_req) + mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); + + mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); +} + +void +snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi) +{ + struct snic_sg_desc *sgd; + + sgd = req_to_sgl(rqi_to_req(rqi)); + SNIC_BUG_ON(sgd[0].addr == 0); + pci_unmap_single(snic->pdev, + le64_to_cpu(sgd[0].addr), + le32_to_cpu(sgd[0].len), + PCI_DMA_FROMDEVICE); +} + +/* + * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them. + */ +void +snic_free_all_untagged_reqs(struct snic *snic) +{ + struct snic_req_info *rqi; + struct list_head *cur, *nxt; + unsigned long flags; + + spin_lock_irqsave(&snic->spl_cmd_lock, flags); + list_for_each_safe(cur, nxt, &snic->spl_cmd_list) { + rqi = list_entry(cur, struct snic_req_info, list); + list_del_init(&rqi->list); + if (rqi->sge_va) { + snic_pci_unmap_rsp_buf(snic, rqi); + kfree((void *)rqi->sge_va); + rqi->sge_va = 0; + } + + snic_req_free(snic, rqi); + } + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); +} + +/* + * snic_release_untagged_req : Unlinks the untagged req and frees it. + */ +void +snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi) +{ + unsigned long flags; + + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic->in_remove) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + goto end; + } + spin_unlock_irqrestore(&snic->snic_lock, flags); + + spin_lock_irqsave(&snic->spl_cmd_lock, flags); + if (list_empty(&rqi->list)) { + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); + goto end; + } + list_del_init(&rqi->list); + spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); + snic_req_free(snic, rqi); + +end: + return; +} + +/* dump buf in hex fmt */ +void +snic_hex_dump(char *pfx, char *data, int len) +{ + SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len); + print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len); +} + +#define LINE_BUFSZ 128 /* for snic_print_desc fn */ +static void +snic_dump_desc(const char *fn, char *os_buf, int len) +{ + struct snic_host_req *req = (struct snic_host_req *) os_buf; + struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf; + struct snic_req_info *rqi = NULL; + char line[LINE_BUFSZ] = { '\0' }; + char *cmd_str = NULL; + + if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) + rqi = (struct snic_req_info *) fwreq->hdr.init_ctx; + else + rqi = (struct snic_req_info *) req->hdr.init_ctx; + + SNIC_BUG_ON(rqi == NULL || rqi->req == NULL); + switch (req->hdr.type) { + case SNIC_REQ_REPORT_TGTS: + cmd_str = "report-tgt : "; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :"); + break; + + case SNIC_REQ_ICMND: + cmd_str = "icmnd : "; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :", + req->u.icmnd.cdb[0]); + break; + + case SNIC_REQ_ITMF: + cmd_str = "itmf : "; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :"); + break; + + case SNIC_REQ_HBA_RESET: + cmd_str = "hba reset :"; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :"); + break; + + case SNIC_REQ_EXCH_VER: + cmd_str = "exch ver : "; + snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :"); + break; + + case SNIC_REQ_TGT_INFO: + cmd_str = "tgt info : "; + break; + + case SNIC_RSP_REPORT_TGTS_CMPL: + cmd_str = "report tgt cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :"); + break; + + case SNIC_RSP_ICMND_CMPL: + cmd_str = "icmnd_cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :", + rqi->req->u.icmnd.cdb[0]); + break; + + case SNIC_RSP_ITMF_CMPL: + cmd_str = "itmf_cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :"); + break; + + case SNIC_RSP_HBA_RESET_CMPL: + cmd_str = "hba_reset_cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :"); + break; + + case SNIC_RSP_EXCH_VER_CMPL: + cmd_str = "exch_ver_cmpl : "; + snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :"); + break; + + case SNIC_MSG_ACK: + cmd_str = "msg ack : "; + snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :"); + break; + + case SNIC_MSG_ASYNC_EVNOTIFY: + cmd_str = "async notify : "; + snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :"); + break; + + default: + cmd_str = "unknown : "; + SNIC_BUG_ON(1); + break; + } + + SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n", + fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status, + req->hdr.init_ctx); + + /* Enable it, to dump byte stream */ + if (snic_log_level & 0x20) + snic_hex_dump(cmd_str, os_buf, len); +} /* end of __snic_print_desc */ + +void +snic_print_desc(const char *fn, char *os_buf, int len) +{ + if (snic_log_level & SNIC_DESC_LOGGING) + snic_dump_desc(fn, os_buf, len); +} + +void +snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi) +{ + u64 duration; + + duration = jiffies - rqi->start_time; + + if (duration > atomic64_read(&snic->s_stats.io.max_time)) + atomic64_set(&snic->s_stats.io.max_time, duration); +} diff --git a/drivers/scsi/snic/snic_io.h b/drivers/scsi/snic/snic_io.h new file mode 100644 index 000000000000..093d6524cd42 --- /dev/null +++ b/drivers/scsi/snic/snic_io.h @@ -0,0 +1,118 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _SNIC_IO_H +#define _SNIC_IO_H + +#define SNIC_DFLT_SG_DESC_CNT 32 /* Default descriptors for sgl */ +#define SNIC_MAX_SG_DESC_CNT 60 /* Max descriptor for sgl */ +#define SNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */ + +/* SG descriptor for snic */ +struct snic_sg_desc { + __le64 addr; + __le32 len; + u32 _resvd; +}; + +struct snic_dflt_sgl { + struct snic_sg_desc sg_desc[SNIC_DFLT_SG_DESC_CNT]; +}; + +struct snic_max_sgl { + struct snic_sg_desc sg_desc[SNIC_MAX_SG_DESC_CNT]; +}; + +enum snic_req_cache_type { + SNIC_REQ_CACHE_DFLT_SGL = 0, /* cache with default size sgl */ + SNIC_REQ_CACHE_MAX_SGL, /* cache with max size sgl */ + SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains + snic_host_req objects only*/ + SNIC_REQ_MAX_CACHES /* number of sgl caches */ +}; + +/* Per IO internal state */ +struct snic_internal_io_state { + char *rqi; + u64 flags; + u32 state; + u32 abts_status; /* Abort completion status */ + u32 lr_status; /* device reset completion status */ +}; + +/* IO state machine */ +enum snic_ioreq_state { + SNIC_IOREQ_NOT_INITED = 0, + SNIC_IOREQ_PENDING, + SNIC_IOREQ_ABTS_PENDING, + SNIC_IOREQ_ABTS_COMPLETE, + SNIC_IOREQ_LR_PENDING, + SNIC_IOREQ_LR_COMPLETE, + SNIC_IOREQ_COMPLETE, +}; + +struct snic; +struct snic_host_req; + +/* + * snic_req_info : Contains info about IO, one per scsi command. + * Notes: Make sure that the structure is aligned to 16 B + * this helps in easy access to snic_req_info from snic_host_req + */ +struct snic_req_info { + struct list_head list; + struct snic_host_req *req; + u64 start_time; /* start time in jiffies */ + u16 rq_pool_type; /* noticion of request pool type */ + u16 req_len; /* buf len passing to fw (req + sgl)*/ + u32 tgt_id; + + u32 tm_tag; + u8 io_cmpl:1; /* sets to 1 when fw completes IO */ + u8 resvd[3]; + struct scsi_cmnd *sc; /* Associated scsi cmd */ + struct snic *snic; /* Associated snic */ + ulong sge_va; /* Pointer to Resp Buffer */ + u64 snsbuf_va; + + struct snic_host_req *abort_req; + struct completion *abts_done; + + struct snic_host_req *dr_req; + struct completion *dr_done; +}; + + +#define rqi_to_req(rqi) \ + ((struct snic_host_req *) (((struct snic_req_info *)rqi)->req)) + +#define req_to_rqi(req) \ + ((struct snic_req_info *) (((struct snic_host_req *)req)->hdr.init_ctx)) + +#define req_to_sgl(req) \ + ((struct snic_sg_desc *) (((struct snic_host_req *)req)+1)) + +struct snic_req_info * +snic_req_init(struct snic *, int sg_cnt); +void snic_req_free(struct snic *, struct snic_req_info *); +void snic_calc_io_process_time(struct snic *, struct snic_req_info *); +void snic_pci_unmap_rsp_buf(struct snic *, struct snic_req_info *); +struct snic_host_req * +snic_abort_req_init(struct snic *, struct snic_req_info *); +struct snic_host_req * +snic_dr_req_init(struct snic *, struct snic_req_info *); +#endif /* _SNIC_IO_H */ diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c new file mode 100644 index 000000000000..a85fae25ec8c --- /dev/null +++ b/drivers/scsi/snic/snic_isr.c @@ -0,0 +1,204 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/interrupt.h> + +#include "vnic_dev.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "snic_io.h" +#include "snic.h" + + +/* + * snic_isr_msix_wq : MSIx ISR for work queue. + */ + +static irqreturn_t +snic_isr_msix_wq(int irq, void *data) +{ + struct snic *snic = data; + unsigned long wq_work_done = 0; + + snic->s_stats.misc.last_isr_time = jiffies; + atomic64_inc(&snic->s_stats.misc.isr_cnt); + + wq_work_done = snic_wq_cmpl_handler(snic, -1); + svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ], + wq_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + + return IRQ_HANDLED; +} /* end of snic_isr_msix_wq */ + +static irqreturn_t +snic_isr_msix_io_cmpl(int irq, void *data) +{ + struct snic *snic = data; + unsigned long iocmpl_work_done = 0; + + snic->s_stats.misc.last_isr_time = jiffies; + atomic64_inc(&snic->s_stats.misc.isr_cnt); + + iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1); + svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL], + iocmpl_work_done, + 1 /* unmask intr */, + 1 /* reset intr timer */); + + return IRQ_HANDLED; +} /* end of snic_isr_msix_io_cmpl */ + +static irqreturn_t +snic_isr_msix_err_notify(int irq, void *data) +{ + struct snic *snic = data; + + snic->s_stats.misc.last_isr_time = jiffies; + atomic64_inc(&snic->s_stats.misc.isr_cnt); + + svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]); + snic_log_q_error(snic); + + /*Handling link events */ + snic_handle_link_event(snic); + + return IRQ_HANDLED; +} /* end of snic_isr_msix_err_notify */ + + +void +snic_free_intr(struct snic *snic) +{ + int i; + + /* ONLY interrupt mode MSIX is supported */ + for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { + if (snic->msix[i].requested) { + free_irq(snic->msix_entry[i].vector, + snic->msix[i].devid); + } + } +} /* end of snic_free_intr */ + +int +snic_request_intr(struct snic *snic) +{ + int ret = 0, i; + enum vnic_dev_intr_mode intr_mode; + + intr_mode = svnic_dev_get_intr_mode(snic->vdev); + SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX); + + /* + * Currently HW supports single WQ and CQ. So passing devid as snic. + * When hardware supports multiple WQs and CQs, one idea is + * to pass devid as corresponding WQ or CQ ptr and retrieve snic + * from queue ptr. + * Except for err_notify, which is always one. + */ + sprintf(snic->msix[SNIC_MSIX_WQ].devname, + "%.11s-scsi-wq", + snic->name); + snic->msix[SNIC_MSIX_WQ].isr = snic_isr_msix_wq; + snic->msix[SNIC_MSIX_WQ].devid = snic; + + sprintf(snic->msix[SNIC_MSIX_IO_CMPL].devname, + "%.11s-io-cmpl", + snic->name); + snic->msix[SNIC_MSIX_IO_CMPL].isr = snic_isr_msix_io_cmpl; + snic->msix[SNIC_MSIX_IO_CMPL].devid = snic; + + sprintf(snic->msix[SNIC_MSIX_ERR_NOTIFY].devname, + "%.11s-err-notify", + snic->name); + snic->msix[SNIC_MSIX_ERR_NOTIFY].isr = snic_isr_msix_err_notify; + snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic; + + for (i = 0; i < ARRAY_SIZE(snic->msix); i++) { + ret = request_irq(snic->msix_entry[i].vector, + snic->msix[i].isr, + 0, + snic->msix[i].devname, + snic->msix[i].devid); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "MSI-X: requrest_irq(%d) failed %d\n", + i, + ret); + snic_free_intr(snic); + break; + } + snic->msix[i].requested = 1; + } + + return ret; +} /* end of snic_requrest_intr */ + +int +snic_set_intr_mode(struct snic *snic) +{ + unsigned int n = ARRAY_SIZE(snic->wq); + unsigned int m = SNIC_CQ_IO_CMPL_MAX; + unsigned int i; + + /* + * We need n WQs, m CQs, and n+m+1 INTRs + * (last INTR is used for WQ/CQ errors and notification area + */ + + BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) > + ARRAY_SIZE(snic->intr)); + SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1)); + + for (i = 0; i < (n + m + 1); i++) + snic->msix_entry[i].entry = i; + + if (snic->wq_count >= n && snic->cq_count >= (n + m)) { + if (!pci_enable_msix(snic->pdev, + snic->msix_entry, + (n + m + 1))) { + snic->wq_count = n; + snic->cq_count = n + m; + snic->intr_count = n + m + 1; + snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY; + + SNIC_ISR_DBG(snic->shost, + "Using MSI-X Interrupts\n"); + svnic_dev_set_intr_mode(snic->vdev, + VNIC_DEV_INTR_MODE_MSIX); + + return 0; + } + } + + svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); + + return -EINVAL; +} /* end of snic_set_intr_mode */ + +void +snic_clear_intr_mode(struct snic *snic) +{ + pci_disable_msix(snic->pdev); + + svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX); +} diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c new file mode 100644 index 000000000000..b2b87cef00fc --- /dev/null +++ b/drivers/scsi/snic/snic_main.c @@ -0,0 +1,1044 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/mempool.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/skbuff.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_tcq.h> + +#include "snic.h" +#include "snic_fwint.h" + +#define PCI_DEVICE_ID_CISCO_SNIC 0x0046 + +/* Supported devices by snic module */ +static struct pci_device_id snic_id_table[] = { + {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) }, + { 0, } /* end of table */ +}; + +unsigned int snic_log_level = 0x0; +module_param(snic_log_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels"); + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS +unsigned int snic_trace_max_pages = 16; +module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(snic_trace_max_pages, + "Total allocated memory pages for snic trace buffer"); + +#endif +unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH; +module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN"); + +/* + * snic_slave_alloc : callback function to SCSI Mid Layer, called on + * scsi device initialization. + */ +static int +snic_slave_alloc(struct scsi_device *sdev) +{ + struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); + + if (!tgt || snic_tgt_chkready(tgt)) + return -ENXIO; + + return 0; +} + +/* + * snic_slave_configure : callback function to SCSI Mid Layer, called on + * scsi device initialization. + */ +static int +snic_slave_configure(struct scsi_device *sdev) +{ + struct snic *snic = shost_priv(sdev->host); + u32 qdepth = 0, max_ios = 0; + int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ; + + /* Set Queue Depth */ + max_ios = snic_max_qdepth; + qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH); + scsi_change_queue_depth(sdev, qdepth); + + if (snic->fwinfo.io_tmo > 1) + tmo = snic->fwinfo.io_tmo * HZ; + + /* FW requires extended timeouts */ + blk_queue_rq_timeout(sdev->request_queue, tmo); + + return 0; +} + +static int +snic_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + int qsz = 0; + + qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); + scsi_change_queue_depth(sdev, qsz); + SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth); + + return sdev->queue_depth; +} + +static struct scsi_host_template snic_host_template = { + .module = THIS_MODULE, + .name = SNIC_DRV_NAME, + .queuecommand = snic_queuecommand, + .eh_abort_handler = snic_abort_cmd, + .eh_device_reset_handler = snic_device_reset, + .eh_host_reset_handler = snic_host_reset, + .slave_alloc = snic_slave_alloc, + .slave_configure = snic_slave_configure, + .change_queue_depth = snic_change_queue_depth, + .this_id = -1, + .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH, + .can_queue = SNIC_MAX_IO_REQ, + .use_clustering = ENABLE_CLUSTERING, + .sg_tablesize = SNIC_MAX_SG_DESC_CNT, + .max_sectors = 0x800, + .shost_attrs = snic_attrs, + .use_blk_tags = 1, + .track_queue_depth = 1, + .cmd_size = sizeof(struct snic_internal_io_state), + .proc_name = "snic_scsi", +}; + +/* + * snic_handle_link_event : Handles link events such as link up/down/error + */ +void +snic_handle_link_event(struct snic *snic) +{ + unsigned long flags; + + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic->stop_link_events) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + + return; + } + spin_unlock_irqrestore(&snic->snic_lock, flags); + + queue_work(snic_glob->event_q, &snic->link_work); +} /* end of snic_handle_link_event */ + +/* + * snic_notify_set : sets notification area + * This notification area is to receive events from fw + * Note: snic supports only MSIX interrupts, in which we can just call + * svnic_dev_notify_set directly + */ +static int +snic_notify_set(struct snic *snic) +{ + int ret = 0; + enum vnic_dev_intr_mode intr_mode; + + intr_mode = svnic_dev_get_intr_mode(snic->vdev); + + if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) { + ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY); + } else { + SNIC_HOST_ERR(snic->shost, + "Interrupt mode should be setup before devcmd notify set %d\n", + intr_mode); + ret = -1; + } + + return ret; +} /* end of snic_notify_set */ + +/* + * snic_dev_wait : polls vnic open status. + */ +static int +snic_dev_wait(struct vnic_dev *vdev, + int (*start)(struct vnic_dev *, int), + int (*finished)(struct vnic_dev *, int *), + int arg) +{ + unsigned long time; + int ret, done; + int retry_cnt = 0; + + ret = start(vdev, arg); + if (ret) + return ret; + + /* + * Wait for func to complete...2 seconds max. + * + * Sometimes schedule_timeout_uninterruptible take long time + * to wakeup, which results skipping retry. The retry counter + * ensures to retry at least two times. + */ + time = jiffies + (HZ * 2); + do { + ret = finished(vdev, &done); + if (ret) + return ret; + + if (done) + return 0; + schedule_timeout_uninterruptible(HZ/10); + ++retry_cnt; + } while (time_after(time, jiffies) || (retry_cnt < 3)); + + return -ETIMEDOUT; +} /* end of snic_dev_wait */ + +/* + * snic_cleanup: called by snic_remove + * Stops the snic device, masks all interrupts, Completed CQ entries are + * drained. Posted WQ/RQ/Copy-WQ entries are cleanup + */ +static int +snic_cleanup(struct snic *snic) +{ + unsigned int i; + int ret; + + svnic_dev_disable(snic->vdev); + for (i = 0; i < snic->intr_count; i++) + svnic_intr_mask(&snic->intr[i]); + + for (i = 0; i < snic->wq_count; i++) { + ret = svnic_wq_disable(&snic->wq[i]); + if (ret) + return ret; + } + + /* Clean up completed IOs */ + snic_fwcq_cmpl_handler(snic, -1); + + snic_wq_cmpl_handler(snic, -1); + + /* Clean up the IOs that have not completed */ + for (i = 0; i < snic->wq_count; i++) + svnic_wq_clean(&snic->wq[i], snic_free_wq_buf); + + for (i = 0; i < snic->cq_count; i++) + svnic_cq_clean(&snic->cq[i]); + + for (i = 0; i < snic->intr_count; i++) + svnic_intr_clean(&snic->intr[i]); + + /* Cleanup snic specific requests */ + snic_free_all_untagged_reqs(snic); + + /* Cleanup Pending SCSI commands */ + snic_shutdown_scsi_cleanup(snic); + + for (i = 0; i < SNIC_REQ_MAX_CACHES; i++) + mempool_destroy(snic->req_pool[i]); + + return 0; +} /* end of snic_cleanup */ + + +static void +snic_iounmap(struct snic *snic) +{ + if (snic->bar0.vaddr) + iounmap(snic->bar0.vaddr); +} + +/* + * snic_vdev_open_done : polls for svnic_dev_open cmd completion. + */ +static int +snic_vdev_open_done(struct vnic_dev *vdev, int *done) +{ + struct snic *snic = svnic_dev_priv(vdev); + int ret; + int nretries = 5; + + do { + ret = svnic_dev_open_done(vdev, done); + if (ret == 0) + break; + + SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n"); + } while (nretries--); + + return ret; +} /* end of snic_vdev_open_done */ + +/* + * snic_add_host : registers scsi host with ML + */ +static int +snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev) +{ + int ret = 0; + + ret = scsi_add_host(shost, &pdev->dev); + if (ret) { + SNIC_HOST_ERR(shost, + "snic: scsi_add_host failed. %d\n", + ret); + + return ret; + } + + SNIC_BUG_ON(shost->work_q != NULL); + snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", + shost->host_no); + shost->work_q = create_singlethread_workqueue(shost->work_q_name); + if (!shost->work_q) { + SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); + + ret = -ENOMEM; + } + + return ret; +} /* end of snic_add_host */ + +static void +snic_del_host(struct Scsi_Host *shost) +{ + if (!shost->work_q) + return; + + destroy_workqueue(shost->work_q); + shost->work_q = NULL; + scsi_remove_host(shost); +} + +int +snic_get_state(struct snic *snic) +{ + return atomic_read(&snic->state); +} + +void +snic_set_state(struct snic *snic, enum snic_state state) +{ + SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n", + snic_state_to_str(snic_get_state(snic)), + snic_state_to_str(state)); + + atomic_set(&snic->state, state); +} + +/* + * snic_probe : Initialize the snic interface. + */ +static int +snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct Scsi_Host *shost; + struct snic *snic; + mempool_t *pool; + unsigned long flags; + u32 max_ios = 0; + int ret, i; + + /* Device Information */ + SNIC_INFO("snic device %4x:%4x:%4x:%4x: ", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + + SNIC_INFO("snic device bus %x: slot %x: fn %x\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + /* + * Allocate SCSI Host and setup association between host, and snic + */ + shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic)); + if (!shost) { + SNIC_ERR("Unable to alloc scsi_host\n"); + ret = -ENOMEM; + + goto prob_end; + } + snic = shost_priv(shost); + snic->shost = shost; + + snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME, + shost->host_no); + + SNIC_HOST_INFO(shost, + "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n", + shost->host_no, snic, shost, pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + /* Per snic debugfs init */ + ret = snic_stats_debugfs_init(snic); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "Failed to initialize debugfs stats\n"); + snic_stats_debugfs_remove(snic); + } +#endif + + /* Setup PCI Resources */ + pci_set_drvdata(pdev, snic); + snic->pdev = pdev; + + ret = pci_enable_device(pdev); + if (ret) { + SNIC_HOST_ERR(shost, + "Cannot enable PCI Resources, aborting : %d\n", + ret); + + goto err_free_snic; + } + + ret = pci_request_regions(pdev, SNIC_DRV_NAME); + if (ret) { + SNIC_HOST_ERR(shost, + "Cannot obtain PCI Resources, aborting : %d\n", + ret); + + goto err_pci_disable; + } + + pci_set_master(pdev); + + /* + * Query PCI Controller on system for DMA addressing + * limitation for the device. Try 43-bit first, and + * fail to 32-bit. + */ + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43)); + if (ret) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + SNIC_HOST_ERR(shost, + "No Usable DMA Configuration, aborting %d\n", + ret); + + goto err_rel_regions; + } + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) { + SNIC_HOST_ERR(shost, + "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n", + ret); + + goto err_rel_regions; + } + } else { + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43)); + if (ret) { + SNIC_HOST_ERR(shost, + "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n", + ret); + + goto err_rel_regions; + } + } + + + /* Map vNIC resources from BAR0 */ + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); + + ret = -ENODEV; + goto err_rel_regions; + } + + snic->bar0.vaddr = pci_iomap(pdev, 0, 0); + if (!snic->bar0.vaddr) { + SNIC_HOST_ERR(shost, + "Cannot memory map BAR0 res hdr aborting.\n"); + + ret = -ENODEV; + goto err_rel_regions; + } + + snic->bar0.bus_addr = pci_resource_start(pdev, 0); + snic->bar0.len = pci_resource_len(pdev, 0); + SNIC_BUG_ON(snic->bar0.bus_addr == 0); + + /* Devcmd2 Resource Allocation and Initialization */ + snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1); + if (!snic->vdev) { + SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n"); + + ret = -ENODEV; + goto err_iounmap; + } + + ret = svnic_dev_cmd_init(snic->vdev, 0); + if (ret) { + SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret); + + goto err_vnic_unreg; + } + + ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0); + if (ret) { + SNIC_HOST_ERR(shost, + "vNIC dev open failed, aborting. %d\n", + ret); + + goto err_vnic_unreg; + } + + ret = svnic_dev_init(snic->vdev, 0); + if (ret) { + SNIC_HOST_ERR(shost, + "vNIC dev init failed. aborting. %d\n", + ret); + + goto err_dev_close; + } + + /* Get vNIC information */ + ret = snic_get_vnic_config(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Get vNIC configuration failed, aborting. %d\n", + ret); + + goto err_dev_close; + } + + /* Configure Maximum Outstanding IO reqs */ + max_ios = snic->config.io_throttle_count; + if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD) + shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ, + max_t(u32, SNIC_MIN_IO_REQ, max_ios)); + + snic->max_tag_id = shost->can_queue; + + ret = scsi_init_shared_tag_map(shost, snic->max_tag_id); + if (ret) { + SNIC_HOST_ERR(shost, + "Unable to alloc shared tag map. %d\n", + ret); + + goto err_dev_close; + } + + shost->max_lun = snic->config.luns_per_tgt; + shost->max_id = SNIC_MAX_TARGET; + + shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/ + + snic_get_res_counts(snic); + + /* + * Assumption: Only MSIx is supported + */ + ret = snic_set_intr_mode(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Failed to set intr mode aborting. %d\n", + ret); + + goto err_dev_close; + } + + ret = snic_alloc_vnic_res(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Failed to alloc vNIC resources aborting. %d\n", + ret); + + goto err_clear_intr; + } + + /* Initialize specific lists */ + INIT_LIST_HEAD(&snic->list); + + /* + * spl_cmd_list for maintaining snic specific cmds + * such as EXCH_VER_REQ, REPORT_TARGETS etc + */ + INIT_LIST_HEAD(&snic->spl_cmd_list); + spin_lock_init(&snic->spl_cmd_lock); + + /* initialize all snic locks */ + spin_lock_init(&snic->snic_lock); + + for (i = 0; i < SNIC_WQ_MAX; i++) + spin_lock_init(&snic->wq_lock[i]); + + for (i = 0; i < SNIC_IO_LOCKS; i++) + spin_lock_init(&snic->io_req_lock[i]); + + pool = mempool_create_slab_pool(2, + snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); + if (!pool) { + SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); + + goto err_free_res; + } + + snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool; + + pool = mempool_create_slab_pool(2, + snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); + if (!pool) { + SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); + + goto err_free_dflt_sgl_pool; + } + + snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool; + + pool = mempool_create_slab_pool(2, + snic_glob->req_cache[SNIC_REQ_TM_CACHE]); + if (!pool) { + SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); + + goto err_free_max_sgl_pool; + } + + snic->req_pool[SNIC_REQ_TM_CACHE] = pool; + + /* Initialize snic state */ + atomic_set(&snic->state, SNIC_INIT); + + atomic_set(&snic->ios_inflight, 0); + + /* Setup notification buffer area */ + ret = snic_notify_set(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Failed to alloc notify buffer aborting. %d\n", + ret); + + goto err_free_tmreq_pool; + } + + /* + * Initialization done with PCI system, hardware, firmware. + * Add shost to SCSI + */ + ret = snic_add_host(shost, pdev); + if (ret) { + SNIC_HOST_ERR(shost, + "Adding scsi host Failed ... exiting. %d\n", + ret); + + goto err_notify_unset; + } + + spin_lock_irqsave(&snic_glob->snic_list_lock, flags); + list_add_tail(&snic->list, &snic_glob->snic_list); + spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); + + snic_disc_init(&snic->disc); + INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc); + INIT_WORK(&snic->disc_work, snic_handle_disc); + INIT_WORK(&snic->link_work, snic_handle_link); + + /* Enable all queues */ + for (i = 0; i < snic->wq_count; i++) + svnic_wq_enable(&snic->wq[i]); + + ret = svnic_dev_enable_wait(snic->vdev); + if (ret) { + SNIC_HOST_ERR(shost, + "vNIC dev enable failed w/ error %d\n", + ret); + + goto err_vdev_enable; + } + + ret = snic_request_intr(snic); + if (ret) { + SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret); + + goto err_req_intr; + } + + for (i = 0; i < snic->intr_count; i++) + svnic_intr_unmask(&snic->intr[i]); + + snic_set_state(snic, SNIC_ONLINE); + + /* Get snic params */ + ret = snic_get_conf(snic); + if (ret) { + SNIC_HOST_ERR(shost, + "Failed to get snic io config from FW w err %d\n", + ret); + + goto err_get_conf; + } + + ret = snic_disc_start(snic); + if (ret) { + SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n", + ret); + + goto err_get_conf; + } + + SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n"); + + return 0; + +err_get_conf: + snic_free_all_untagged_reqs(snic); + + for (i = 0; i < snic->intr_count; i++) + svnic_intr_mask(&snic->intr[i]); + + snic_free_intr(snic); + +err_req_intr: + svnic_dev_disable(snic->vdev); + +err_vdev_enable: + for (i = 0; i < snic->wq_count; i++) { + int rc = 0; + + rc = svnic_wq_disable(&snic->wq[i]); + if (rc) { + SNIC_HOST_ERR(shost, + "WQ Disable Failed w/ err = %d\n", rc); + + break; + } + } + snic_del_host(snic->shost); + +err_notify_unset: + svnic_dev_notify_unset(snic->vdev); + +err_free_tmreq_pool: + mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]); + +err_free_max_sgl_pool: + mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]); + +err_free_dflt_sgl_pool: + mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]); + +err_free_res: + snic_free_vnic_res(snic); + +err_clear_intr: + snic_clear_intr_mode(snic); + +err_dev_close: + svnic_dev_close(snic->vdev); + +err_vnic_unreg: + svnic_dev_unregister(snic->vdev); + +err_iounmap: + snic_iounmap(snic); + +err_rel_regions: + pci_release_regions(pdev); + +err_pci_disable: + pci_disable_device(pdev); + +err_free_snic: +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + snic_stats_debugfs_remove(snic); +#endif + scsi_host_put(shost); + pci_set_drvdata(pdev, NULL); + +prob_end: + SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + return ret; +} /* end of snic_probe */ + + +/* + * snic_remove : invoked on unbinding the interface to cleanup the + * resources allocated in snic_probe on initialization. + */ +static void +snic_remove(struct pci_dev *pdev) +{ + struct snic *snic = pci_get_drvdata(pdev); + unsigned long flags; + + if (!snic) { + SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + return; + } + + /* + * Mark state so that the workqueue thread stops forwarding + * received frames and link events. ISR and other threads + * that can queue work items will also stop creating work + * items on the snic workqueue + */ + snic_set_state(snic, SNIC_OFFLINE); + spin_lock_irqsave(&snic->snic_lock, flags); + snic->stop_link_events = 1; + spin_unlock_irqrestore(&snic->snic_lock, flags); + + flush_workqueue(snic_glob->event_q); + snic_disc_term(snic); + + spin_lock_irqsave(&snic->snic_lock, flags); + snic->in_remove = 1; + spin_unlock_irqrestore(&snic->snic_lock, flags); + + /* + * This stops the snic device, masks all interrupts, Completed + * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are + * cleanup + */ + snic_cleanup(snic); + + spin_lock_irqsave(&snic_glob->snic_list_lock, flags); + list_del(&snic->list); + spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); + + snic_tgt_del_all(snic); +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + snic_stats_debugfs_remove(snic); +#endif + snic_del_host(snic->shost); + + svnic_dev_notify_unset(snic->vdev); + snic_free_intr(snic); + snic_free_vnic_res(snic); + snic_clear_intr_mode(snic); + svnic_dev_close(snic->vdev); + svnic_dev_unregister(snic->vdev); + snic_iounmap(snic); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + /* this frees Scsi_Host and snic memory (continuous chunk) */ + scsi_host_put(snic->shost); +} /* end of snic_remove */ + + +struct snic_global *snic_glob; + +/* + * snic_global_data_init: Initialize SNIC Global Data + * Notes: All the global lists, variables should be part of global data + * this helps in debugging. + */ +static int +snic_global_data_init(void) +{ + int ret = 0; + struct kmem_cache *cachep; + ssize_t len = 0; + + snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL); + + if (!snic_glob) { + SNIC_ERR("Failed to allocate Global Context.\n"); + + ret = -ENOMEM; + goto gdi_end; + } + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + /* Debugfs related Initialization */ + /* Create debugfs entries for snic */ + ret = snic_debugfs_init(); + if (ret < 0) { + SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n"); + snic_debugfs_term(); + /* continue even if it fails */ + } + + /* Trace related Initialization */ + /* Allocate memory for trace buffer */ + ret = snic_trc_init(); + if (ret < 0) { + SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n"); + snic_trc_free(); + /* continue even if it fails */ + } + +#endif + INIT_LIST_HEAD(&snic_glob->snic_list); + spin_lock_init(&snic_glob->snic_list_lock); + + /* Create a cache for allocation of snic_host_req+default size ESGLs */ + len = sizeof(struct snic_req_info); + len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl); + cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN, NULL); + if (!cachep) { + SNIC_ERR("Failed to create snic default sgl slab\n"); + ret = -ENOMEM; + + goto err_dflt_req_slab; + } + snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep; + + /* Create a cache for allocation of max size Extended SGLs */ + len = sizeof(struct snic_req_info); + len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl); + cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN, NULL); + if (!cachep) { + SNIC_ERR("Failed to create snic max sgl slab\n"); + ret = -ENOMEM; + + goto err_max_req_slab; + } + snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; + + len = sizeof(struct snic_host_req); + cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, + SLAB_HWCACHE_ALIGN, NULL); + if (!cachep) { + SNIC_ERR("Failed to create snic tm req slab\n"); + ret = -ENOMEM; + + goto err_tmreq_slab; + } + snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; + + /* snic_event queue */ + snic_glob->event_q = create_singlethread_workqueue("snic_event_wq"); + if (!snic_glob->event_q) { + SNIC_ERR("snic event queue create failed\n"); + ret = -ENOMEM; + + goto err_eventq; + } + + return ret; + +err_eventq: + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); + +err_tmreq_slab: + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); + +err_max_req_slab: + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); + +err_dflt_req_slab: +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + snic_trc_free(); + snic_debugfs_term(); +#endif + kfree(snic_glob); + snic_glob = NULL; + +gdi_end: + return ret; +} /* end of snic_glob_init */ + +/* + * snic_global_data_cleanup : Frees SNIC Global Data + */ +static void +snic_global_data_cleanup(void) +{ + SNIC_BUG_ON(snic_glob == NULL); + + destroy_workqueue(snic_glob->event_q); + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); + kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + /* Freeing Trace Resources */ + snic_trc_free(); + + /* Freeing Debugfs Resources */ + snic_debugfs_term(); +#endif + kfree(snic_glob); + snic_glob = NULL; +} /* end of snic_glob_cleanup */ + +static struct pci_driver snic_driver = { + .name = SNIC_DRV_NAME, + .id_table = snic_id_table, + .probe = snic_probe, + .remove = snic_remove, +}; + +static int __init +snic_init_module(void) +{ + int ret = 0; + +#ifndef __x86_64__ + SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n"); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); +#endif + + SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION); + + ret = snic_global_data_init(); + if (ret) { + SNIC_ERR("Failed to Initialize Global Data.\n"); + + return ret; + } + + ret = pci_register_driver(&snic_driver); + if (ret < 0) { + SNIC_ERR("PCI driver register error\n"); + + goto err_pci_reg; + } + + return ret; + +err_pci_reg: + snic_global_data_cleanup(); + + return ret; +} + +static void __exit +snic_cleanup_module(void) +{ + pci_unregister_driver(&snic_driver); + snic_global_data_cleanup(); +} + +module_init(snic_init_module); +module_exit(snic_cleanup_module); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION); +MODULE_VERSION(SNIC_DRV_VERSION); +MODULE_DEVICE_TABLE(pci, snic_id_table); +MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, " + "Sesidhar Baddela <sebaddel@cisco.com>"); diff --git a/drivers/scsi/snic/snic_res.c b/drivers/scsi/snic/snic_res.c new file mode 100644 index 000000000000..b54912c8ca0c --- /dev/null +++ b/drivers/scsi/snic/snic_res.c @@ -0,0 +1,295 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> + +#include "wq_enet_desc.h" +#include "cq_enet_desc.h" +#include "vnic_resource.h" +#include "vnic_dev.h" +#include "vnic_wq.h" +#include "vnic_cq.h" +#include "vnic_intr.h" +#include "vnic_stats.h" +#include "snic.h" + +int +snic_get_vnic_config(struct snic *snic) +{ + struct vnic_snic_config *c = &snic->config; + int ret; + +#define GET_CONFIG(m) \ + do { \ + ret = svnic_dev_spec(snic->vdev, \ + offsetof(struct vnic_snic_config, m), \ + sizeof(c->m), \ + &c->m); \ + if (ret) { \ + SNIC_HOST_ERR(snic->shost, \ + "Error getting %s, %d\n", #m, ret); \ + return ret; \ + } \ + } while (0) + + GET_CONFIG(wq_enet_desc_count); + GET_CONFIG(maxdatafieldsize); + GET_CONFIG(intr_timer); + GET_CONFIG(intr_timer_type); + GET_CONFIG(flags); + GET_CONFIG(io_throttle_count); + GET_CONFIG(port_down_timeout); + GET_CONFIG(port_down_io_retries); + GET_CONFIG(luns_per_tgt); + GET_CONFIG(xpt_type); + GET_CONFIG(hid); + + c->wq_enet_desc_count = min_t(u32, + VNIC_SNIC_WQ_DESCS_MAX, + max_t(u32, + VNIC_SNIC_WQ_DESCS_MIN, + c->wq_enet_desc_count)); + + c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16); + + c->maxdatafieldsize = min_t(u32, + VNIC_SNIC_MAXDATAFIELDSIZE_MAX, + max_t(u32, + VNIC_SNIC_MAXDATAFIELDSIZE_MIN, + c->maxdatafieldsize)); + + c->io_throttle_count = min_t(u32, + VNIC_SNIC_IO_THROTTLE_COUNT_MAX, + max_t(u32, + VNIC_SNIC_IO_THROTTLE_COUNT_MIN, + c->io_throttle_count)); + + c->port_down_timeout = min_t(u32, + VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX, + c->port_down_timeout); + + c->port_down_io_retries = min_t(u32, + VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX, + c->port_down_io_retries); + + c->luns_per_tgt = min_t(u32, + VNIC_SNIC_LUNS_PER_TARGET_MAX, + max_t(u32, + VNIC_SNIC_LUNS_PER_TARGET_MIN, + c->luns_per_tgt)); + + c->intr_timer = min_t(u32, VNIC_INTR_TIMER_MAX, c->intr_timer); + + SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count); + SNIC_INFO("vNIC mtu %d intr timer %d\n", + c->maxdatafieldsize, + c->intr_timer); + + SNIC_INFO("vNIC flags 0x%x luns per tgt %d\n", + c->flags, + c->luns_per_tgt); + + SNIC_INFO("vNIC io throttle count %d\n", c->io_throttle_count); + SNIC_INFO("vNIC port down timeout %d port down io retries %d\n", + c->port_down_timeout, + c->port_down_io_retries); + + SNIC_INFO("vNIC back end type = %d\n", c->xpt_type); + SNIC_INFO("vNIC hid = %d\n", c->hid); + + return 0; +} + +void +snic_get_res_counts(struct snic *snic) +{ + snic->wq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_WQ); + SNIC_BUG_ON(snic->wq_count == 0); + snic->cq_count = svnic_dev_get_res_count(snic->vdev, RES_TYPE_CQ); + SNIC_BUG_ON(snic->cq_count == 0); + snic->intr_count = svnic_dev_get_res_count(snic->vdev, + RES_TYPE_INTR_CTRL); + SNIC_BUG_ON(snic->intr_count == 0); +} + +void +snic_free_vnic_res(struct snic *snic) +{ + unsigned int i; + + for (i = 0; i < snic->wq_count; i++) + svnic_wq_free(&snic->wq[i]); + + for (i = 0; i < snic->cq_count; i++) + svnic_cq_free(&snic->cq[i]); + + for (i = 0; i < snic->intr_count; i++) + svnic_intr_free(&snic->intr[i]); +} + +int +snic_alloc_vnic_res(struct snic *snic) +{ + enum vnic_dev_intr_mode intr_mode; + unsigned int mask_on_assertion; + unsigned int intr_offset; + unsigned int err_intr_enable; + unsigned int err_intr_offset; + unsigned int i; + int ret; + + intr_mode = svnic_dev_get_intr_mode(snic->vdev); + + SNIC_INFO("vNIC interrupt mode: %s\n", + ((intr_mode == VNIC_DEV_INTR_MODE_INTX) ? + "Legacy PCI INTx" : + ((intr_mode == VNIC_DEV_INTR_MODE_MSI) ? + "MSI" : + ((intr_mode == VNIC_DEV_INTR_MODE_MSIX) ? + "MSI-X" : "Unknown")))); + + /* only MSI-X is supported */ + SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX); + + SNIC_INFO("wq %d cq %d intr %d\n", snic->wq_count, + snic->cq_count, + snic->intr_count); + + + /* Allocate WQs used for SCSI IOs */ + for (i = 0; i < snic->wq_count; i++) { + ret = svnic_wq_alloc(snic->vdev, + &snic->wq[i], + i, + snic->config.wq_enet_desc_count, + sizeof(struct wq_enet_desc)); + if (ret) + goto error_cleanup; + } + + /* CQ for each WQ */ + for (i = 0; i < snic->wq_count; i++) { + ret = svnic_cq_alloc(snic->vdev, + &snic->cq[i], + i, + snic->config.wq_enet_desc_count, + sizeof(struct cq_enet_wq_desc)); + if (ret) + goto error_cleanup; + } + + SNIC_BUG_ON(snic->cq_count != 2 * snic->wq_count); + /* CQ for FW TO host */ + for (i = snic->wq_count; i < snic->cq_count; i++) { + ret = svnic_cq_alloc(snic->vdev, + &snic->cq[i], + i, + (snic->config.wq_enet_desc_count * 3), + sizeof(struct snic_fw_req)); + if (ret) + goto error_cleanup; + } + + for (i = 0; i < snic->intr_count; i++) { + ret = svnic_intr_alloc(snic->vdev, &snic->intr[i], i); + if (ret) + goto error_cleanup; + } + + /* + * Init WQ Resources. + * WQ[0 to n] points to CQ[0 to n-1] + * firmware to host comm points to CQ[n to m+1] + */ + err_intr_enable = 1; + err_intr_offset = snic->err_intr_offset; + + for (i = 0; i < snic->wq_count; i++) { + svnic_wq_init(&snic->wq[i], + i, + err_intr_enable, + err_intr_offset); + } + + for (i = 0; i < snic->cq_count; i++) { + intr_offset = i; + + svnic_cq_init(&snic->cq[i], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 1 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + intr_offset, + 0 /* cq_message_addr */); + } + + /* + * Init INTR resources + * Assumption : snic is always in MSI-X mode + */ + SNIC_BUG_ON(intr_mode != VNIC_DEV_INTR_MODE_MSIX); + mask_on_assertion = 1; + + for (i = 0; i < snic->intr_count; i++) { + svnic_intr_init(&snic->intr[i], + snic->config.intr_timer, + snic->config.intr_timer_type, + mask_on_assertion); + } + + /* init the stats memory by making the first call here */ + ret = svnic_dev_stats_dump(snic->vdev, &snic->stats); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "svnic_dev_stats_dump failed - x%x\n", + ret); + goto error_cleanup; + } + + /* Clear LIF stats */ + svnic_dev_stats_clear(snic->vdev); + ret = 0; + + return ret; + +error_cleanup: + snic_free_vnic_res(snic); + + return ret; +} + +void +snic_log_q_error(struct snic *snic) +{ + unsigned int i; + u32 err_status; + + for (i = 0; i < snic->wq_count; i++) { + err_status = ioread32(&snic->wq[i].ctrl->error_status); + if (err_status) + SNIC_HOST_ERR(snic->shost, + "WQ[%d] error status %d\n", + i, + err_status); + } +} /* end of snic_log_q_error */ diff --git a/drivers/scsi/snic/snic_res.h b/drivers/scsi/snic/snic_res.h new file mode 100644 index 000000000000..273f72f2a023 --- /dev/null +++ b/drivers/scsi/snic/snic_res.h @@ -0,0 +1,97 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __SNIC_RES_H +#define __SNIC_RES_H + +#include "snic_io.h" +#include "wq_enet_desc.h" +#include "vnic_wq.h" +#include "snic_fwint.h" +#include "vnic_cq_fw.h" + +static inline void +snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx, + u16 flags, u64 tgt_id, u8 *lun, u8 *scsi_cdb, u8 cdb_len, + u32 data_len, u16 sg_cnt, ulong sgl_addr, + dma_addr_t sns_addr_pa, u32 sense_len) +{ + snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt, + ctx); + + req->u.icmnd.flags = cpu_to_le16(flags); + req->u.icmnd.tgt_id = cpu_to_le64(tgt_id); + memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN); + req->u.icmnd.cdb_len = cdb_len; + memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN); + memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len); + req->u.icmnd.data_len = cpu_to_le32(data_len); + req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr); + req->u.icmnd.sense_len = cpu_to_le32(sense_len); + req->u.icmnd.sense_addr = cpu_to_le64(sns_addr_pa); +} + +static inline void +snic_itmf_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, ulong ctx, + u16 flags, u32 req_id, u64 tgt_id, u8 *lun, u8 tm_type) +{ + snic_io_hdr_enc(&req->hdr, SNIC_REQ_ITMF, 0, cmnd_id, host_id, 0, ctx); + + req->u.itmf.tm_type = tm_type; + req->u.itmf.flags = cpu_to_le16(flags); + /* req_id valid only in abort, clear task */ + req->u.itmf.req_id = cpu_to_le32(req_id); + req->u.itmf.tgt_id = cpu_to_le64(tgt_id); + memcpy(&req->u.itmf.lun_id, lun, LUN_ADDR_LEN); +} + +static inline void +snic_queue_wq_eth_desc(struct vnic_wq *wq, + void *os_buf, + dma_addr_t dma_addr, + unsigned int len, + int vlan_tag_insert, + unsigned int vlan_tag, + int cq_entry) +{ + struct wq_enet_desc *desc = svnic_wq_next_desc(wq); + + wq_enet_desc_enc(desc, + (u64)dma_addr | VNIC_PADDR_TARGET, + (u16)len, + 0, /* mss_or_csum_offset */ + 0, /* fc_eof */ + 0, /* offload mode */ + 1, /* eop */ + (u8)cq_entry, + 0, /* fcoe_encap */ + (u8)vlan_tag_insert, + (u16)vlan_tag, + 0 /* loopback */); + + svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); +} + +struct snic; + +int snic_get_vnic_config(struct snic *); +int snic_alloc_vnic_res(struct snic *); +void snic_free_vnic_res(struct snic *); +void snic_get_res_counts(struct snic *); +void snic_log_q_error(struct snic *); +int snic_get_vnic_resources_size(struct snic *); +#endif /* __SNIC_RES_H */ diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c new file mode 100644 index 000000000000..2c7b4c321cbe --- /dev/null +++ b/drivers/scsi/snic/snic_scsi.c @@ -0,0 +1,2632 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mempool.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/workqueue.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/gfp.h> +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi_dbg.h> + +#include "snic_io.h" +#include "snic.h" + +#define snic_cmd_tag(sc) (((struct scsi_cmnd *) sc)->request->tag) + +const char *snic_state_str[] = { + [SNIC_INIT] = "SNIC_INIT", + [SNIC_ERROR] = "SNIC_ERROR", + [SNIC_ONLINE] = "SNIC_ONLINE", + [SNIC_OFFLINE] = "SNIC_OFFLINE", + [SNIC_FWRESET] = "SNIC_FWRESET", +}; + +static const char * const snic_req_state_str[] = { + [SNIC_IOREQ_NOT_INITED] = "SNIC_IOREQ_NOT_INITED", + [SNIC_IOREQ_PENDING] = "SNIC_IOREQ_PENDING", + [SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING", + [SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPELTE", + [SNIC_IOREQ_LR_PENDING] = "SNIC_IOREQ_LR_PENDING", + [SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPELTE", + [SNIC_IOREQ_COMPLETE] = "SNIC_IOREQ_CMD_COMPELTE", +}; + +/* snic cmd status strings */ +static const char * const snic_io_status_str[] = { + [SNIC_STAT_IO_SUCCESS] = "SNIC_STAT_IO_SUCCESS", /* 0x0 */ + [SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR", + [SNIC_STAT_OUT_OF_RES] = "SNIC_STAT_OUT_OF_RES", + [SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM", + [SNIC_STAT_REQ_NOT_SUP] = "SNIC_STAT_REQ_NOT_SUP", + [SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND", + [SNIC_STAT_ABORTED] = "SNIC_STAT_ABORTED", + [SNIC_STAT_TIMEOUT] = "SNIC_STAT_TIMEOUT", + [SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID", + [SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH", + [SNIC_STAT_FW_ERR] = "SNIC_STAT_FW_ERR", + [SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT", + [SNIC_STAT_ITMF_FAIL] = "SNIC_STAT_ITMF_FAIL", + [SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN", + [SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT", + [SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE", + [SNIC_STAT_NO_BOOTLUN] = "SNIC_STAT_NO_BOOTLUN", + [SNIC_STAT_SCSI_ERR] = "SNIC_STAT_SCSI_ERR", + [SNIC_STAT_NOT_READY] = "SNIC_STAT_NOT_READY", + [SNIC_STAT_FATAL_ERROR] = "SNIC_STAT_FATAL_ERROR", +}; + +static void snic_scsi_cleanup(struct snic *, int); + +const char * +snic_state_to_str(unsigned int state) +{ + if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state]) + return "Unknown"; + + return snic_state_str[state]; +} + +static const char * +snic_io_status_to_str(unsigned int state) +{ + if ((state >= ARRAY_SIZE(snic_io_status_str)) || + (!snic_io_status_str[state])) + return "Unknown"; + + return snic_io_status_str[state]; +} + +static const char * +snic_ioreq_state_to_str(unsigned int state) +{ + if (state >= ARRAY_SIZE(snic_req_state_str) || + !snic_req_state_str[state]) + return "Unknown"; + + return snic_req_state_str[state]; +} + +static inline spinlock_t * +snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc) +{ + u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1); + + return &snic->io_req_lock[hash]; +} + +static inline spinlock_t * +snic_io_lock_tag(struct snic *snic, int tag) +{ + return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)]; +} + +/* snic_release_req_buf : Releases snic_req_info */ +static void +snic_release_req_buf(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc) +{ + struct snic_host_req *req = rqi_to_req(rqi); + + /* Freeing cmd without marking completion, not okay */ + SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) || + (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) || + (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) || + (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) || + (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) || + (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) || + (CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE))); + + SNIC_SCSI_DBG(snic->shost, + "Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n", + sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req, + rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)), + CMD_FLAGS(sc)); + + if (req->u.icmnd.sense_addr) + pci_unmap_single(snic->pdev, + le64_to_cpu(req->u.icmnd.sense_addr), + SCSI_SENSE_BUFFERSIZE, + PCI_DMA_FROMDEVICE); + + scsi_dma_unmap(sc); + + snic_req_free(snic, rqi); +} /* end of snic_release_req_buf */ + +/* + * snic_queue_icmnd_req : Queues snic_icmnd request + */ +static int +snic_queue_icmnd_req(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc, + int sg_cnt) +{ + struct scatterlist *sg; + struct snic_sg_desc *sgd; + dma_addr_t pa = 0; + struct scsi_lun lun; + u16 flags = 0; + int ret = 0; + unsigned int i; + + if (sg_cnt) { + flags = SNIC_ICMND_ESGL; + sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req); + + for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) { + sgd->addr = cpu_to_le64(sg_dma_address(sg)); + sgd->len = cpu_to_le32(sg_dma_len(sg)); + sgd->_resvd = 0; + sgd++; + } + } + + pa = pci_map_single(snic->pdev, + sc->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(snic->pdev, pa)) { + SNIC_HOST_ERR(snic->shost, + "QIcmnd:PCI Map Failed for sns buf %p tag %x\n", + sc->sense_buffer, snic_cmd_tag(sc)); + ret = -ENOMEM; + + return ret; + } + + int_to_scsilun(sc->device->lun, &lun); + if (sc->sc_data_direction == DMA_FROM_DEVICE) + flags |= SNIC_ICMND_RD; + if (sc->sc_data_direction == DMA_TO_DEVICE) + flags |= SNIC_ICMND_WR; + + /* Initialize icmnd */ + snic_icmnd_init(rqi->req, + snic_cmd_tag(sc), + snic->config.hid, /* hid */ + (ulong) rqi, + flags, /* command flags */ + rqi->tgt_id, + lun.scsi_lun, + sc->cmnd, + sc->cmd_len, + scsi_bufflen(sc), + sg_cnt, + (ulong) req_to_sgl(rqi->req), + pa, /* sense buffer pa */ + SCSI_SENSE_BUFFERSIZE); + + ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len); + if (ret) + SNIC_HOST_ERR(snic->shost, + "QIcmnd: Queuing Icmnd Failed. ret = %d\n", + ret); + + return ret; +} /* end of snic_queue_icmnd_req */ + +/* + * snic_issue_scsi_req : Prepares IO request and Issues to FW. + */ +static int +snic_issue_scsi_req(struct snic *snic, + struct snic_tgt *tgt, + struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + int sg_cnt = 0; + int ret = 0; + u32 tag = snic_cmd_tag(sc); + u64 cmd_trc = 0, cmd_st_flags = 0; + spinlock_t *io_lock = NULL; + unsigned long flags; + + CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED; + CMD_FLAGS(sc) = SNIC_NO_FLAGS; + sg_cnt = scsi_dma_map(sc); + if (sg_cnt < 0) { + SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0, + sc->cmnd[0], sg_cnt, CMD_STATE(sc)); + + SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n"); + ret = -ENOMEM; + + goto issue_sc_end; + } + + rqi = snic_req_init(snic, sg_cnt); + if (!rqi) { + scsi_dma_unmap(sc); + ret = -ENOMEM; + + goto issue_sc_end; + } + + rqi->tgt_id = tgt->id; + rqi->sc = sc; + + CMD_STATE(sc) = SNIC_IOREQ_PENDING; + CMD_SP(sc) = (char *) rqi; + cmd_trc = SNIC_TRC_CMD(sc); + CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED); + cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc); + io_lock = snic_io_lock_hash(snic, sc); + + /* create wq desc and enqueue it */ + ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "issue_sc: icmnd qing Failed for sc %p, err %d\n", + sc, ret); + + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + CMD_SP(sc) = NULL; + CMD_STATE(sc) = SNIC_IOREQ_COMPLETE; + CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED; /* turn off the flag */ + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_release_req_buf(snic, rqi, sc); + + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0, + SNIC_TRC_CMD_STATE_FLAGS(sc)); + } else { + u32 io_sz = scsi_bufflen(sc) >> 9; + u32 qtime = jiffies - rqi->start_time; + struct snic_io_stats *iostats = &snic->s_stats.io; + + if (io_sz > atomic64_read(&iostats->max_io_sz)) + atomic64_set(&iostats->max_io_sz, io_sz); + + if (qtime > atomic64_read(&iostats->max_qtime)) + atomic64_set(&iostats->max_qtime, qtime); + + SNIC_SCSI_DBG(snic->shost, + "issue_sc:sc %p, tag %d queued to WQ.\n", + sc, tag); + + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi, + sg_cnt, cmd_trc, cmd_st_flags); + } + +issue_sc_end: + + return ret; +} /* end of snic_issue_scsi_req */ + + +/* + * snic_queuecommand + * Routine to send a scsi cdb to LLD + * Called with host_lock held and interrupts disabled + */ +int +snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) +{ + struct snic_tgt *tgt = NULL; + struct snic *snic = shost_priv(shost); + int ret; + + tgt = starget_to_tgt(scsi_target(sc->device)); + ret = snic_tgt_chkready(tgt); + if (ret) { + SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id); + atomic64_inc(&snic->s_stats.misc.tgt_not_rdy); + sc->result = ret; + sc->scsi_done(sc); + + return 0; + } + + if (snic_get_state(snic) != SNIC_ONLINE) { + SNIC_HOST_ERR(shost, "snic state is %s\n", + snic_state_str[snic_get_state(snic)]); + + return SCSI_MLQUEUE_HOST_BUSY; + } + atomic_inc(&snic->ios_inflight); + + SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n", + sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun); + + memset(scsi_cmd_priv(sc), 0, sizeof(struct snic_internal_io_state)); + + ret = snic_issue_scsi_req(snic, tgt, sc); + if (ret) { + SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret); + ret = SCSI_MLQUEUE_HOST_BUSY; + } else + snic_stats_update_active_ios(&snic->s_stats); + + atomic_dec(&snic->ios_inflight); + + return ret; +} /* end of snic_queuecommand */ + +/* + * snic_process_abts_pending_state: + * caller should hold IO lock + */ +static void +snic_proc_tmreq_pending_state(struct snic *snic, + struct scsi_cmnd *sc, + u8 cmpl_status) +{ + int state = CMD_STATE(sc); + + if (state == SNIC_IOREQ_ABTS_PENDING) + CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING; + else if (state == SNIC_IOREQ_LR_PENDING) + CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING; + else + SNIC_BUG_ON(1); + + switch (cmpl_status) { + case SNIC_STAT_IO_SUCCESS: + CMD_FLAGS(sc) |= SNIC_IO_DONE; + break; + + case SNIC_STAT_ABORTED: + CMD_FLAGS(sc) |= SNIC_IO_ABORTED; + break; + + default: + SNIC_BUG_ON(1); + } +} + +/* + * snic_process_io_failed_state: + * Processes IO's error states + */ +static void +snic_process_io_failed_state(struct snic *snic, + struct snic_icmnd_cmpl *icmnd_cmpl, + struct scsi_cmnd *sc, + u8 cmpl_stat) +{ + int res = 0; + + switch (cmpl_stat) { + case SNIC_STAT_TIMEOUT: /* Req was timedout */ + atomic64_inc(&snic->s_stats.misc.io_tmo); + res = DID_TIME_OUT; + break; + + case SNIC_STAT_ABORTED: /* Req was aborted */ + atomic64_inc(&snic->s_stats.misc.io_aborted); + res = DID_ABORT; + break; + + case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */ + atomic64_inc(&snic->s_stats.misc.data_cnt_mismat); + scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid)); + res = DID_ERROR; + break; + + case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */ + atomic64_inc(&snic->s_stats.fw.out_of_res); + res = DID_REQUEUE; + break; + + case SNIC_STAT_IO_NOT_FOUND: /* Requested I/O was not found */ + atomic64_inc(&snic->s_stats.io.io_not_found); + res = DID_ERROR; + break; + + case SNIC_STAT_SGL_INVALID: /* Req was aborted to due to sgl error*/ + atomic64_inc(&snic->s_stats.misc.sgl_inval); + res = DID_ERROR; + break; + + case SNIC_STAT_FW_ERR: /* Req terminated due to FW Error */ + atomic64_inc(&snic->s_stats.fw.io_errs); + res = DID_ERROR; + break; + + case SNIC_STAT_SCSI_ERR: /* FW hits SCSI Error */ + atomic64_inc(&snic->s_stats.fw.scsi_errs); + break; + + case SNIC_STAT_NOT_READY: /* XPT yet to initialize */ + case SNIC_STAT_DEV_OFFLINE: /* Device offline */ + res = DID_NO_CONNECT; + break; + + case SNIC_STAT_INVALID_HDR: /* Hdr contains invalid data */ + case SNIC_STAT_INVALID_PARM: /* Some param in req is invalid */ + case SNIC_STAT_REQ_NOT_SUP: /* Req type is not supported */ + case SNIC_STAT_CMND_REJECT: /* Req rejected */ + case SNIC_STAT_FATAL_ERROR: /* XPT Error */ + default: + SNIC_SCSI_DBG(snic->shost, + "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n"); + res = DID_ERROR; + break; + } + + SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n", + snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc)); + + /* Set sc->result */ + sc->result = (res << 16) | icmnd_cmpl->scsi_status; +} /* end of snic_process_io_failed_state */ + +/* + * snic_tmreq_pending : is task management in progress. + */ +static int +snic_tmreq_pending(struct scsi_cmnd *sc) +{ + int state = CMD_STATE(sc); + + return ((state == SNIC_IOREQ_ABTS_PENDING) || + (state == SNIC_IOREQ_LR_PENDING)); +} + +/* + * snic_process_icmnd_cmpl_status: + * Caller should hold io_lock + */ +static int +snic_process_icmnd_cmpl_status(struct snic *snic, + struct snic_icmnd_cmpl *icmnd_cmpl, + u8 cmpl_stat, + struct scsi_cmnd *sc) +{ + u8 scsi_stat = icmnd_cmpl->scsi_status; + u64 xfer_len = 0; + int ret = 0; + + /* Mark the IO as complete */ + CMD_STATE(sc) = SNIC_IOREQ_COMPLETE; + + if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) { + sc->result = (DID_OK << 16) | scsi_stat; + + xfer_len = scsi_bufflen(sc); + + /* Update SCSI Cmd with resid value */ + scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid)); + + if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) { + xfer_len -= le32_to_cpu(icmnd_cmpl->resid); + atomic64_inc(&snic->s_stats.misc.io_under_run); + } + + if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) + atomic64_inc(&snic->s_stats.misc.qfull); + + ret = 0; + } else { + snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat); + atomic64_inc(&snic->s_stats.io.fail); + SNIC_HOST_ERR(snic->shost, + "icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n", + snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc)); + ret = 1; + } + + return ret; +} /* end of snic_process_icmnd_cmpl_status */ + + +/* + * snic_icmnd_cmpl_handler + * Routine to handle icmnd completions + */ +static void +snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + u8 typ, hdr_stat; + u32 cmnd_id, hid; + ulong ctx; + struct scsi_cmnd *sc = NULL; + struct snic_icmnd_cmpl *icmnd_cmpl = NULL; + struct snic_host_req *req = NULL; + struct snic_req_info *rqi = NULL; + unsigned long flags, start_time; + spinlock_t *io_lock; + u8 sc_stat = 0; + + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + icmnd_cmpl = &fwreq->u.icmnd_cmpl; + sc_stat = icmnd_cmpl->scsi_status; + + SNIC_SCSI_DBG(snic->shost, + "Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n", + typ, hdr_stat, cmnd_id, hid, ctx); + + if (cmnd_id >= snic->max_tag_id) { + SNIC_HOST_ERR(snic->shost, + "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n", + cmnd_id, snic_io_status_to_str(hdr_stat)); + return; + } + + sc = scsi_host_find_tag(snic->shost, cmnd_id); + WARN_ON_ONCE(!sc); + + if (!sc) { + atomic64_inc(&snic->s_stats.io.sc_null); + SNIC_HOST_ERR(snic->shost, + "Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n", + snic_io_status_to_str(hdr_stat), + cmnd_id, + fwreq); + + SNIC_TRC(snic->shost->host_no, cmnd_id, 0, + ((u64)hdr_stat << 16 | + (u64)sc_stat << 8 | (u64)icmnd_cmpl->flags), + (ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx); + + return; + } + + io_lock = snic_io_lock_hash(snic, sc); + + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + SNIC_SCSI_DBG(snic->shost, + "Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n", + sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc), + CMD_FLAGS(sc), rqi); + + SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx); + WARN_ON_ONCE(req); + if (!rqi) { + atomic64_inc(&snic->s_stats.io.req_null); + CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL; + spin_unlock_irqrestore(io_lock, flags); + + SNIC_HOST_ERR(snic->shost, + "Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n", + snic_io_status_to_str(hdr_stat), + cmnd_id, sc, CMD_FLAGS(sc)); + return; + } + + rqi = (struct snic_req_info *) ctx; + start_time = rqi->start_time; + + /* firmware completed the io */ + rqi->io_cmpl = 1; + + /* + * if SCSI-ML has already issued abort on this command, + * ignore completion of the IO. The abts path will clean it up + */ + if (unlikely(snic_tmreq_pending(sc))) { + snic_proc_tmreq_pending_state(snic, sc, hdr_stat); + spin_unlock_irqrestore(io_lock, flags); + + snic_stats_update_io_cmpl(&snic->s_stats); + + /* Expected value is SNIC_STAT_ABORTED */ + if (likely(hdr_stat == SNIC_STAT_ABORTED)) + return; + + SNIC_SCSI_DBG(snic->shost, + "icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n", + snic_ioreq_state_to_str(CMD_STATE(sc)), + snic_io_status_to_str(hdr_stat), + sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid), + CMD_FLAGS(sc)); + + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), (ulong) fwreq, + SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + return; + } + + if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) { + scsi_print_command(sc); + SNIC_HOST_ERR(snic->shost, + "icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n", + sc, sc->cmnd[0], cmnd_id, + snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc)); + } + + /* Break link with the SCSI Command */ + CMD_SP(sc) = NULL; + CMD_FLAGS(sc) |= SNIC_IO_DONE; + + spin_unlock_irqrestore(io_lock, flags); + + /* For now, consider only successful IO. */ + snic_calc_io_process_time(snic, rqi); + + snic_release_req_buf(snic, rqi, sc); + + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), (ulong) fwreq, + SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + + if (sc->scsi_done) + sc->scsi_done(sc); + + snic_stats_update_io_cmpl(&snic->s_stats); +} /* end of snic_icmnd_cmpl_handler */ + +static void +snic_proc_dr_cmpl_locked(struct snic *snic, + struct snic_fw_req *fwreq, + u8 cmpl_stat, + u32 cmnd_id, + struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc); + u32 start_time = rqi->start_time; + + CMD_LR_STATUS(sc) = cmpl_stat; + + SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n", + snic_ioreq_state_to_str(CMD_STATE(sc))); + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) { + CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING; + + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc)); + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n", + (int)(cmnd_id & SNIC_TAG_MASK), + snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + return; + } + + + if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) { + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc)); + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n", + (int)(cmnd_id & SNIC_TAG_MASK), + snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + return; + } + + CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE; + CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE; + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n", + (int)(cmnd_id & SNIC_TAG_MASK), + snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + if (rqi->dr_done) + complete(rqi->dr_done); +} /* end of snic_proc_dr_cmpl_locked */ + +/* + * snic_update_abort_stats : Updates abort stats based on completion status. + */ +static void +snic_update_abort_stats(struct snic *snic, u8 cmpl_stat) +{ + struct snic_abort_stats *abt_stats = &snic->s_stats.abts; + + SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n"); + + switch (cmpl_stat) { + case SNIC_STAT_IO_SUCCESS: + break; + + case SNIC_STAT_TIMEOUT: + atomic64_inc(&abt_stats->fw_tmo); + break; + + case SNIC_STAT_IO_NOT_FOUND: + atomic64_inc(&abt_stats->io_not_found); + break; + + default: + atomic64_inc(&abt_stats->fail); + break; + } +} + +static int +snic_process_itmf_cmpl(struct snic *snic, + struct snic_fw_req *fwreq, + u32 cmnd_id, + u8 cmpl_stat, + struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + u32 tm_tags = 0; + spinlock_t *io_lock = NULL; + unsigned long flags; + u32 start_time = 0; + int ret = 0; + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + WARN_ON_ONCE(!rqi); + + if (!rqi) { + atomic64_inc(&snic->s_stats.io.req_null); + spin_unlock_irqrestore(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; + SNIC_HOST_ERR(snic->shost, + "itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n", + snic_io_status_to_str(cmpl_stat), cmnd_id, sc, + CMD_FLAGS(sc)); + + return ret; + } + + /* Extract task management flags */ + tm_tags = cmnd_id & ~(SNIC_TAG_MASK); + + start_time = rqi->start_time; + cmnd_id &= (SNIC_TAG_MASK); + + switch (tm_tags) { + case SNIC_TAG_ABORT: + /* Abort only issued on cmd */ + snic_update_abort_stats(snic, cmpl_stat); + + if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) { + /* This is a late completion. Ignore it. */ + ret = -1; + spin_unlock_irqrestore(io_lock, flags); + break; + } + + CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE; + CMD_ABTS_STATUS(sc) = cmpl_stat; + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE; + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n", + cmnd_id, + snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + /* + * If scsi_eh thread is blocked waiting for abts complete, + * signal completion to it. IO will be cleaned in the thread, + * else clean it in this context. + */ + if (rqi->abts_done) { + complete(rqi->abts_done); + spin_unlock_irqrestore(io_lock, flags); + + break; /* jump out */ + } + + CMD_SP(sc) = NULL; + sc->result = (DID_ERROR << 16); + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl: Completing IO. sc %p flags 0x%llx\n", + sc, CMD_FLAGS(sc)); + + spin_unlock_irqrestore(io_lock, flags); + + snic_release_req_buf(snic, rqi, sc); + + if (sc->scsi_done) { + SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + (ulong) fwreq, SNIC_TRC_CMD(sc), + SNIC_TRC_CMD_STATE_FLAGS(sc)); + + sc->scsi_done(sc); + } + + break; + + case SNIC_TAG_DEV_RST: + case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST: + snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc); + spin_unlock_irqrestore(io_lock, flags); + ret = 0; + + break; + + case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST: + /* Abort and terminate completion of device reset req */ + + CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE; + CMD_ABTS_STATUS(sc) = cmpl_stat; + CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE; + + SNIC_SCSI_DBG(snic->shost, + "itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n", + cmnd_id, snic_io_status_to_str(cmpl_stat), + CMD_FLAGS(sc)); + + if (rqi->abts_done) + complete(rqi->abts_done); + + spin_unlock_irqrestore(io_lock, flags); + + break; + + default: + spin_unlock_irqrestore(io_lock, flags); + SNIC_HOST_ERR(snic->shost, + "itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags); + + SNIC_HOST_ERR(snic->shost, + "itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n", + snic_ioreq_state_to_str(CMD_STATE(sc)), + cmnd_id, + CMD_FLAGS(sc)); + ret = -1; + SNIC_BUG_ON(1); + + break; + } + + return ret; +} /* end of snic_process_itmf_cmpl_status */ + +/* + * snic_itmf_cmpl_handler. + * Routine to handle itmf completions. + */ +static void +snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + struct scsi_cmnd *sc = NULL; + struct snic_req_info *rqi = NULL; + struct snic_itmf_cmpl *itmf_cmpl = NULL; + ulong ctx; + u32 cmnd_id; + u32 hid; + u8 typ; + u8 hdr_stat; + + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + SNIC_SCSI_DBG(snic->shost, + "Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n", + __func__, typ, hdr_stat, cmnd_id, hid, ctx); + + itmf_cmpl = &fwreq->u.itmf_cmpl; + SNIC_SCSI_DBG(snic->shost, + "Itmf_cmpl: nterm %u , flags 0x%x\n", + le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags); + + /* spl case, dev reset issued through ioctl */ + if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) { + rqi = (struct snic_req_info *) ctx; + sc = rqi->sc; + + goto ioctl_dev_rst; + } + + if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) { + SNIC_HOST_ERR(snic->shost, + "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n", + cmnd_id, snic_io_status_to_str(hdr_stat)); + SNIC_BUG_ON(1); + + return; + } + + sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK); + WARN_ON_ONCE(!sc); + +ioctl_dev_rst: + if (!sc) { + atomic64_inc(&snic->s_stats.io.sc_null); + SNIC_HOST_ERR(snic->shost, + "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n", + snic_io_status_to_str(hdr_stat), cmnd_id); + + return; + } + + snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc); +} /* end of snic_itmf_cmpl_handler */ + + + +static void +snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_stats *st = &snic->s_stats; + long act_ios = 0, act_fwreqs = 0; + + SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n"); + snic_scsi_cleanup(snic, snic_cmd_tag(sc)); + + /* Update stats on pending IOs */ + act_ios = atomic64_read(&st->io.active); + atomic64_add(act_ios, &st->io.compl); + atomic64_sub(act_ios, &st->io.active); + + act_fwreqs = atomic64_read(&st->fw.actv_reqs); + atomic64_sub(act_fwreqs, &st->fw.actv_reqs); +} + +/* + * snic_hba_reset_cmpl_handler : + * + * Notes : + * 1. Cleanup all the scsi cmds, release all snic specific cmds + * 2. Issue Report Targets in case of SAN targets + */ +static int +snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + ulong ctx; + u32 cmnd_id; + u32 hid; + u8 typ; + u8 hdr_stat; + struct scsi_cmnd *sc = NULL; + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags, gflags; + int ret = 0; + + SNIC_HOST_INFO(snic->shost, + "reset_cmpl:HBA Reset Completion received.\n"); + + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + SNIC_SCSI_DBG(snic->shost, + "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", + typ, hdr_stat, cmnd_id, hid, ctx); + + /* spl case, host reset issued through ioctl */ + if (cmnd_id == SCSI_NO_TAG) { + rqi = (struct snic_req_info *) ctx; + sc = rqi->sc; + + goto ioctl_hba_rst; + } + + if (cmnd_id >= snic->max_tag_id) { + SNIC_HOST_ERR(snic->shost, + "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n", + cmnd_id, snic_io_status_to_str(hdr_stat)); + SNIC_BUG_ON(1); + + return 1; + } + + sc = scsi_host_find_tag(snic->shost, cmnd_id); +ioctl_hba_rst: + if (!sc) { + atomic64_inc(&snic->s_stats.io.sc_null); + SNIC_HOST_ERR(snic->shost, + "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n", + snic_io_status_to_str(hdr_stat), cmnd_id); + ret = 1; + + return ret; + } + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + + if (!snic->remove_wait) { + spin_unlock_irqrestore(io_lock, flags); + SNIC_HOST_ERR(snic->shost, + "reset_cmpl:host reset completed after timout\n"); + ret = 1; + + return ret; + } + + rqi = (struct snic_req_info *) CMD_SP(sc); + WARN_ON_ONCE(!rqi); + + if (!rqi) { + atomic64_inc(&snic->s_stats.io.req_null); + spin_unlock_irqrestore(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; + SNIC_HOST_ERR(snic->shost, + "reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n", + snic_io_status_to_str(hdr_stat), cmnd_id, sc, + CMD_FLAGS(sc)); + + ret = 1; + + return ret; + } + /* stats */ + spin_unlock_irqrestore(io_lock, flags); + + /* scsi cleanup */ + snic_hba_reset_scsi_cleanup(snic, sc); + + SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE && + snic_get_state(snic) != SNIC_FWRESET); + + /* Careful locking between snic_lock and io lock */ + spin_lock_irqsave(io_lock, flags); + spin_lock_irqsave(&snic->snic_lock, gflags); + if (snic_get_state(snic) == SNIC_FWRESET) + snic_set_state(snic, SNIC_ONLINE); + spin_unlock_irqrestore(&snic->snic_lock, gflags); + + if (snic->remove_wait) + complete(snic->remove_wait); + + spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl); + + ret = 0; + /* Rediscovery is for SAN */ + if (snic->config.xpt_type == SNIC_DAS) + return ret; + + SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n"); + queue_work(snic_glob->event_q, &snic->disc_work); + + return ret; +} + +static void +snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n"); + + SNIC_ASSERT_NOT_IMPL(1); +} + +static void +snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq) +{ + u8 typ, hdr_stat; + u32 cmnd_id, hid; + ulong ctx; + struct snic_async_evnotify *aen = &fwreq->u.async_ev; + u32 event_id = 0; + + snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx); + SNIC_SCSI_DBG(snic->shost, + "aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n", + typ, hdr_stat, cmnd_id, hid, ctx); + + event_id = le32_to_cpu(aen->ev_id); + + switch (event_id) { + case SNIC_EV_TGT_OFFLINE: + SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n"); + break; + + case SNIC_EV_TGT_ONLINE: + SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n"); + break; + + case SNIC_EV_LUN_OFFLINE: + SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n"); + break; + + case SNIC_EV_LUN_ONLINE: + SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n"); + break; + + case SNIC_EV_CONF_CHG: + SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n"); + break; + + case SNIC_EV_TGT_ADDED: + SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n"); + break; + + case SNIC_EV_TGT_DELTD: + SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n"); + break; + + case SNIC_EV_LUN_ADDED: + SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n"); + break; + + case SNIC_EV_LUN_DELTD: + SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n"); + break; + + case SNIC_EV_DISC_CMPL: + SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n"); + break; + + default: + SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n"); + SNIC_BUG_ON(1); + break; + } + + SNIC_ASSERT_NOT_IMPL(1); +} /* end of snic_aen_handler */ + +/* + * snic_io_cmpl_handler + * Routine to process CQ entries(IO Completions) posted by fw. + */ +static int +snic_io_cmpl_handler(struct vnic_dev *vdev, + unsigned int cq_idx, + struct snic_fw_req *fwreq) +{ + struct snic *snic = svnic_dev_priv(vdev); + u64 start = jiffies, cmpl_time; + + snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq)); + + /* Update FW Stats */ + if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) && + (fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL)) + atomic64_dec(&snic->s_stats.fw.actv_reqs); + + SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) && + (fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY)); + + /* Check for snic subsys errors */ + switch (fwreq->hdr.status) { + case SNIC_STAT_NOT_READY: /* XPT yet to initialize */ + SNIC_HOST_ERR(snic->shost, + "sNIC SubSystem is NOT Ready.\n"); + break; + + case SNIC_STAT_FATAL_ERROR: /* XPT Error */ + SNIC_HOST_ERR(snic->shost, + "sNIC SubSystem in Unrecoverable State.\n"); + break; + } + + switch (fwreq->hdr.type) { + case SNIC_RSP_EXCH_VER_CMPL: + snic_io_exch_ver_cmpl_handler(snic, fwreq); + break; + + case SNIC_RSP_REPORT_TGTS_CMPL: + snic_report_tgt_cmpl_handler(snic, fwreq); + break; + + case SNIC_RSP_ICMND_CMPL: + snic_icmnd_cmpl_handler(snic, fwreq); + break; + + case SNIC_RSP_ITMF_CMPL: + snic_itmf_cmpl_handler(snic, fwreq); + break; + + case SNIC_RSP_HBA_RESET_CMPL: + snic_hba_reset_cmpl_handler(snic, fwreq); + break; + + case SNIC_MSG_ACK: + snic_msg_ack_handler(snic, fwreq); + break; + + case SNIC_MSG_ASYNC_EVNOTIFY: + snic_aen_handler(snic, fwreq); + break; + + default: + SNIC_BUG_ON(1); + SNIC_SCSI_DBG(snic->shost, + "Unknown Firmwqre completion request type %d\n", + fwreq->hdr.type); + break; + } + + /* Update Stats */ + cmpl_time = jiffies - start; + if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time)) + atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time); + + return 0; +} /* end of snic_io_cmpl_handler */ + +/* + * snic_fwcq_cmpl_handler + * Routine to process fwCQ + * This CQ is independent, and not associated with wq/rq/wq_copy queues + */ +int +snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work) +{ + unsigned int num_ent = 0; /* number cq entries processed */ + unsigned int cq_idx; + unsigned int nent_per_cq; + struct snic_misc_stats *misc_stats = &snic->s_stats.misc; + + for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) { + nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx], + snic_io_cmpl_handler, + io_cmpl_work); + num_ent += nent_per_cq; + + if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents)) + atomic64_set(&misc_stats->max_cq_ents, nent_per_cq); + } + + return num_ent; +} /* end of snic_fwcq_cmpl_handler */ + +/* + * snic_queue_itmf_req: Common API to queue Task Management requests. + * Use rqi->tm_tag for passing special tags. + * @req_id : aborted request's tag, -1 for lun reset. + */ +static int +snic_queue_itmf_req(struct snic *snic, + struct snic_host_req *tmreq, + struct scsi_cmnd *sc, + u32 tmf, + u32 req_id) +{ + struct snic_req_info *rqi = req_to_rqi(tmreq); + struct scsi_lun lun; + int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag; + int ret = 0; + + SNIC_BUG_ON(!rqi); + SNIC_BUG_ON(!rqi->tm_tag); + + /* fill in lun info */ + int_to_scsilun(sc->device->lun, &lun); + + /* Initialize snic_host_req: itmf */ + snic_itmf_init(tmreq, + tm_tag, + snic->config.hid, + (ulong) rqi, + 0 /* flags */, + req_id, /* Command to be aborted. */ + rqi->tgt_id, + lun.scsi_lun, + tmf); + + /* + * In case of multiple aborts on same cmd, + * use try_wait_for_completion and completion_done() to check + * whether it queues aborts even after completion of abort issued + * prior.SNIC_BUG_ON(completion_done(&rqi->done)); + */ + + ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq)); + if (ret) + SNIC_HOST_ERR(snic->shost, + "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n", + tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret); + else + SNIC_SCSI_DBG(snic->shost, + "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.", + tmf, sc, rqi, req_id, snic_cmd_tag(sc)); + + return ret; +} /* end of snic_queue_itmf_req */ + +static int +snic_issue_tm_req(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc, + int tmf) +{ + struct snic_host_req *tmreq = NULL; + int req_id = 0, tag = snic_cmd_tag(sc); + int ret = 0; + + if (snic_get_state(snic) == SNIC_FWRESET) + return -EBUSY; + + atomic_inc(&snic->ios_inflight); + + SNIC_SCSI_DBG(snic->shost, + "issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n", + tmf, rqi, tag); + + + if (tmf == SNIC_ITMF_LUN_RESET) { + tmreq = snic_dr_req_init(snic, rqi); + req_id = SCSI_NO_TAG; + } else { + tmreq = snic_abort_req_init(snic, rqi); + req_id = tag; + } + + if (!tmreq) { + ret = -ENOMEM; + + goto tmreq_err; + } + + ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id); + if (ret) + goto tmreq_err; + + ret = 0; + +tmreq_err: + if (ret) { + SNIC_HOST_ERR(snic->shost, + "issu_tmreq: Queing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n", + tmf, sc, rqi, req_id, tag, ret); + } else { + SNIC_SCSI_DBG(snic->shost, + "issu_tmreq: Queuing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n", + tmf, sc, rqi, req_id, tag); + } + + atomic_dec(&snic->ios_inflight); + + return ret; +} + +/* + * snic_queue_abort_req : Queues abort req to WQ + */ +static int +snic_queue_abort_req(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc, + int tmf) +{ + SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n", + sc, rqi, snic_cmd_tag(sc), tmf); + + /* Add special tag for abort */ + rqi->tm_tag |= SNIC_TAG_ABORT; + + return snic_issue_tm_req(snic, rqi, sc, tmf); +} + +/* + * snic_abort_finish : called by snic_abort_cmd on queuing abort successfully. + */ +static int +snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + int ret = 0, tag = snic_cmd_tag(sc); + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + atomic64_inc(&snic->s_stats.io.req_null); + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; + + SNIC_SCSI_DBG(snic->shost, + "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n", + tag, sc, CMD_FLAGS(sc)); + ret = FAILED; + + goto abort_fail; + } + + rqi->abts_done = NULL; + + ret = FAILED; + + /* Check the abort status. */ + switch (CMD_ABTS_STATUS(sc)) { + case SNIC_INVALID_CODE: + /* Firmware didn't complete abort req, timedout */ + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT; + atomic64_inc(&snic->s_stats.abts.drv_tmo); + SNIC_SCSI_DBG(snic->shost, + "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n", + sc, snic_cmd_tag(sc), CMD_FLAGS(sc)); + /* do not release snic request in timedout case */ + rqi = NULL; + + goto abort_fail; + + case SNIC_STAT_IO_SUCCESS: + case SNIC_STAT_IO_NOT_FOUND: + ret = SUCCESS; + break; + + default: + /* Firmware completed abort with error */ + ret = FAILED; + break; + } + + CMD_SP(sc) = NULL; + SNIC_HOST_INFO(snic->shost, + "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n", + tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)), + CMD_FLAGS(sc)); + +abort_fail: + spin_unlock_irqrestore(io_lock, flags); + if (rqi) + snic_release_req_buf(snic, rqi, sc); + + return ret; +} /* end of snic_abort_finish */ + +/* + * snic_send_abort_and_wait : Issues Abort, and Waits + */ +static int +snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + enum snic_ioreq_state sv_state; + struct snic_tgt *tgt = NULL; + spinlock_t *io_lock = NULL; + DECLARE_COMPLETION_ONSTACK(tm_done); + unsigned long flags; + int ret = 0, tmf = 0, tag = snic_cmd_tag(sc); + + tgt = starget_to_tgt(scsi_target(sc->device)); + if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN)) + tmf = SNIC_ITMF_ABTS_TASK_TERM; + else + tmf = SNIC_ITMF_ABTS_TASK; + + /* stats */ + + io_lock = snic_io_lock_hash(snic, sc); + + /* + * Avoid a race between SCSI issuing the abort and the device + * completing the command. + * + * If the command is already completed by fw_cmpl code, + * we just return SUCCESS from here. This means that the abort + * succeeded. In the SCSI ML, since the timeout for command has + * happend, the completion wont actually complete the command + * and it will be considered as an aborted command + * + * The CMD_SP will not be cleared except while holding io_lock + */ + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + spin_unlock_irqrestore(io_lock, flags); + + SNIC_HOST_ERR(snic->shost, + "abt_cmd: rqi is null. Tag %d flags 0x%llx\n", + tag, CMD_FLAGS(sc)); + + ret = SUCCESS; + + goto send_abts_end; + } + + rqi->abts_done = &tm_done; + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + + ret = 0; + goto abts_pending; + } + SNIC_BUG_ON(!rqi->abts_done); + + /* Save Command State, should be restored on failed to Queue. */ + sv_state = CMD_STATE(sc); + + /* + * Command is still pending, need to abort it + * If the fw completes the command after this point, + * the completion won't be done till mid-layer, since abot + * has already started. + */ + CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING; + CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE; + + SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag); + + spin_unlock_irqrestore(io_lock, flags); + + /* Now Queue the abort command to firmware */ + ret = snic_queue_abort_req(snic, rqi, sc, tmf); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n", + tag, ret, CMD_FLAGS(sc)); + + spin_lock_irqsave(io_lock, flags); + /* Restore Command's previous state */ + CMD_STATE(sc) = sv_state; + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) + rqi->abts_done = NULL; + spin_unlock_irqrestore(io_lock, flags); + ret = FAILED; + + goto send_abts_end; + } + + spin_lock_irqsave(io_lock, flags); + if (tmf == SNIC_ITMF_ABTS_TASK) { + CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED; + atomic64_inc(&snic->s_stats.abts.num); + } else { + /* term stats */ + CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED; + } + spin_unlock_irqrestore(io_lock, flags); + + SNIC_SCSI_DBG(snic->shost, + "send_abt_cmd: sc %p Tag %x flags 0x%llx\n", + sc, tag, CMD_FLAGS(sc)); + + + ret = 0; + +abts_pending: + /* + * Queued an abort IO, wait for its completion. + * Once the fw completes the abort command, it will + * wakeup this thread. + */ + wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT); + +send_abts_end: + return ret; +} /* end of snic_send_abort_and_wait */ + +/* + * This function is exported to SCSI for sending abort cmnds. + * A SCSI IO is represent by snic_ioreq in the driver. + * The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO + */ +int +snic_abort_cmd(struct scsi_cmnd *sc) +{ + struct snic *snic = shost_priv(sc->device->host); + int ret = SUCCESS, tag = snic_cmd_tag(sc); + u32 start_time = jiffies; + + SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n", + sc, sc->cmnd[0], sc->request, tag); + + if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) { + SNIC_HOST_ERR(snic->shost, + "abt_cmd: tag %x Parent Devs are not rdy\n", + tag); + ret = FAST_IO_FAIL; + + goto abort_end; + } + + + ret = snic_send_abort_and_wait(snic, sc); + if (ret) + goto abort_end; + + ret = snic_abort_finish(snic, sc); + +abort_end: + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), 0, + SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + SNIC_SCSI_DBG(snic->shost, + "abts: Abort Req Status = %s\n", + (ret == SUCCESS) ? "SUCCESS" : + ((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED")); + + return ret; +} + + + +static int +snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc) +{ + struct snic_req_info *rqi = NULL; + struct scsi_cmnd *sc = NULL; + struct scsi_device *lr_sdev = NULL; + spinlock_t *io_lock = NULL; + u32 tag; + unsigned long flags; + + if (lr_sc) + lr_sdev = lr_sc->device; + + /* walk through the tag map, an dcheck if IOs are still pending in fw*/ + for (tag = 0; tag < snic->max_tag_id; tag++) { + io_lock = snic_io_lock_tag(snic, tag); + + spin_lock_irqsave(io_lock, flags); + sc = scsi_host_find_tag(snic->shost, tag); + + if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + /* + * Found IO that is still pending w/ firmware and belongs to + * the LUN that is under reset, if lr_sc != NULL + */ + SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n", + snic_ioreq_state_to_str(CMD_STATE(sc))); + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) { + spin_unlock_irqrestore(io_lock, flags); + + return 1; + } + + spin_unlock_irqrestore(io_lock, flags); + } + + return 0; +} /* end of snic_is_abts_pending */ + +static int +snic_dr_clean_single_req(struct snic *snic, + u32 tag, + struct scsi_device *lr_sdev) +{ + struct snic_req_info *rqi = NULL; + struct snic_tgt *tgt = NULL; + struct scsi_cmnd *sc = NULL; + spinlock_t *io_lock = NULL; + u32 sv_state = 0, tmf = 0; + DECLARE_COMPLETION_ONSTACK(tm_done); + unsigned long flags; + int ret = 0; + + io_lock = snic_io_lock_tag(snic, tag); + spin_lock_irqsave(io_lock, flags); + sc = scsi_host_find_tag(snic->shost, tag); + + /* Ignore Cmd that don't belong to Lun Reset device */ + if (!sc || sc->device != lr_sdev) + goto skip_clean; + + rqi = (struct snic_req_info *) CMD_SP(sc); + + if (!rqi) + goto skip_clean; + + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) + goto skip_clean; + + + if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) && + (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) { + + SNIC_SCSI_DBG(snic->shost, + "clean_single_req: devrst is not pending sc 0x%p\n", + sc); + + goto skip_clean; + } + + SNIC_SCSI_DBG(snic->shost, + "clean_single_req: Found IO in %s on lun\n", + snic_ioreq_state_to_str(CMD_STATE(sc))); + + /* Save Command State */ + sv_state = CMD_STATE(sc); + + /* + * Any pending IO issued prior to reset is expected to be + * in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING + * to indicate the IO is abort pending. + * When IO is completed, the IO will be handed over and handled + * in this function. + */ + + CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING; + SNIC_BUG_ON(rqi->abts_done); + + if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) { + rqi->tm_tag = SNIC_TAG_DEV_RST; + + SNIC_SCSI_DBG(snic->shost, + "clean_single_req:devrst sc 0x%p\n", sc); + } + + CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE; + rqi->abts_done = &tm_done; + spin_unlock_irqrestore(io_lock, flags); + + tgt = starget_to_tgt(scsi_target(sc->device)); + if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN)) + tmf = SNIC_ITMF_ABTS_TASK_TERM; + else + tmf = SNIC_ITMF_ABTS_TASK; + + /* Now queue the abort command to firmware */ + ret = snic_queue_abort_req(snic, rqi, sc, tmf); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n", + sc, tag, rqi->tm_tag, CMD_FLAGS(sc)); + + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) + rqi->abts_done = NULL; + + /* Restore Command State */ + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) + CMD_STATE(sc) = sv_state; + + ret = 1; + goto skip_clean; + } + + spin_lock_irqsave(io_lock, flags); + if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) + CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED; + + CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED; + spin_unlock_irqrestore(io_lock, flags); + + wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT); + + /* Recheck cmd state to check if it now aborted. */ + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL; + goto skip_clean; + } + rqi->abts_done = NULL; + + /* if abort is still pending w/ fw, fail */ + if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) { + SNIC_HOST_ERR(snic->shost, + "clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n", + sc, tag, rqi->tm_tag, CMD_FLAGS(sc)); + + CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE; + ret = 1; + + goto skip_clean; + } + + CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE; + CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + + snic_release_req_buf(snic, rqi, sc); + + ret = 0; + + return ret; + +skip_clean: + spin_unlock_irqrestore(io_lock, flags); + + return ret; +} /* end of snic_dr_clean_single_req */ + +static int +snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc) +{ + struct scsi_device *lr_sdev = lr_sc->device; + u32 tag = 0; + int ret = FAILED; + + for (tag = 0; tag < snic->max_tag_id; tag++) { + if (tag == snic_cmd_tag(lr_sc)) + continue; + + ret = snic_dr_clean_single_req(snic, tag, lr_sdev); + if (ret) { + SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag); + + goto clean_err; + } + } + + schedule_timeout(msecs_to_jiffies(100)); + + /* Walk through all the cmds and check abts status. */ + if (snic_is_abts_pending(snic, lr_sc)) { + ret = FAILED; + + goto clean_err; + } + + ret = 0; + SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n"); + + return ret; + +clean_err: + ret = FAILED; + SNIC_HOST_ERR(snic->shost, + "Failed to Clean Pending IOs on %s device.\n", + dev_name(&lr_sdev->sdev_gendev)); + + return ret; + +} /* end of snic_dr_clean_pending_req */ + +/* + * snic_dr_finish : Called by snic_device_reset + */ +static int +snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + int lr_res = 0; + int ret = FAILED; + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + spin_unlock_irqrestore(io_lock, flags); + SNIC_SCSI_DBG(snic->shost, + "dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n", + snic_cmd_tag(sc), sc, CMD_FLAGS(sc)); + + ret = FAILED; + goto dr_fini_end; + } + + rqi->dr_done = NULL; + + lr_res = CMD_LR_STATUS(sc); + + switch (lr_res) { + case SNIC_INVALID_CODE: + /* stats */ + SNIC_SCSI_DBG(snic->shost, + "dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n", + snic_cmd_tag(sc), CMD_FLAGS(sc)); + + CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT; + ret = FAILED; + + goto dr_failed; + + case SNIC_STAT_IO_SUCCESS: + SNIC_SCSI_DBG(snic->shost, + "dr_fini: Tag %x Dev Reset cmpl\n", + snic_cmd_tag(sc)); + ret = 0; + break; + + default: + SNIC_HOST_ERR(snic->shost, + "dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n", + snic_cmd_tag(sc), + snic_io_status_to_str(lr_res), CMD_FLAGS(sc)); + ret = FAILED; + goto dr_failed; + } + spin_unlock_irqrestore(io_lock, flags); + + /* + * Cleanup any IOs on this LUN that have still not completed. + * If any of these fail, then LUN Reset fails. + * Cleanup cleans all commands on this LUN except + * the lun reset command. If all cmds get cleaned, the LUN Reset + * succeeds. + */ + + ret = snic_dr_clean_pending_req(snic, sc); + if (ret) { + spin_lock_irqsave(io_lock, flags); + SNIC_SCSI_DBG(snic->shost, + "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n", + snic_cmd_tag(sc)); + rqi = (struct snic_req_info *) CMD_SP(sc); + + goto dr_failed; + } else { + /* Cleanup LUN Reset Command */ + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) + ret = SUCCESS; /* Completed Successfully */ + else + ret = FAILED; + } + +dr_failed: + SNIC_BUG_ON(!spin_is_locked(io_lock)); + if (rqi) + CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_release_req_buf(snic, rqi, sc); + +dr_fini_end: + return ret; +} /* end of snic_dr_finish */ + +static int +snic_queue_dr_req(struct snic *snic, + struct snic_req_info *rqi, + struct scsi_cmnd *sc) +{ + /* Add special tag for device reset */ + rqi->tm_tag |= SNIC_TAG_DEV_RST; + + return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET); +} + +static int +snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + enum snic_ioreq_state sv_state; + spinlock_t *io_lock = NULL; + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(tm_done); + int ret = FAILED, tag = snic_cmd_tag(sc); + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_DEVICE_RESET; + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + SNIC_HOST_ERR(snic->shost, + "send_dr: rqi is null, Tag 0x%x flags 0x%llx\n", + tag, CMD_FLAGS(sc)); + spin_unlock_irqrestore(io_lock, flags); + + ret = FAILED; + goto send_dr_end; + } + + /* Save Command state to restore in case Queuing failed. */ + sv_state = CMD_STATE(sc); + + CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING; + CMD_LR_STATUS(sc) = SNIC_INVALID_CODE; + + SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag); + + rqi->dr_done = &tm_done; + SNIC_BUG_ON(!rqi->dr_done); + + spin_unlock_irqrestore(io_lock, flags); + /* + * The Command state is changed to IOREQ_PENDING, + * in this case, if the command is completed, the icmnd_cmpl will + * mark the cmd as completed. + * This logic still makes LUN Reset is inevitable. + */ + + ret = snic_queue_dr_req(snic, rqi, sc); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n", + tag, ret, CMD_FLAGS(sc)); + + spin_lock_irqsave(io_lock, flags); + /* Restore State */ + CMD_STATE(sc) = sv_state; + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) + rqi->dr_done = NULL; + /* rqi is freed in caller. */ + spin_unlock_irqrestore(io_lock, flags); + ret = FAILED; + + goto send_dr_end; + } + + spin_lock_irqsave(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED; + spin_unlock_irqrestore(io_lock, flags); + + ret = 0; + + wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT); + +send_dr_end: + return ret; +} + +/* + * auxillary funciton to check lun reset op is supported or not + * Not supported if returns 0 + */ +static int +snic_dev_reset_supported(struct scsi_device *sdev) +{ + struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); + + if (tgt->tdata.typ == SNIC_TGT_DAS) + return 0; + + return 1; +} + +static void +snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag) +{ + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + u32 start_time = jiffies; + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (rqi) { + start_time = rqi->start_time; + CMD_SP(sc) = NULL; + } + + CMD_FLAGS(sc) |= flag; + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_release_req_buf(snic, rqi, sc); + + SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc, + jiffies_to_msecs(jiffies - start_time), (ulong) rqi, + SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); +} + +/* + * SCSI Eh thread issues a LUN Reset when one or more commands on a LUN + * fail to get aborted. It calls driver's eh_device_reset with a SCSI + * command on the LUN. + */ +int +snic_device_reset(struct scsi_cmnd *sc) +{ + struct Scsi_Host *shost = sc->device->host; + struct snic *snic = shost_priv(shost); + struct snic_req_info *rqi = NULL; + int tag = snic_cmd_tag(sc); + int start_time = jiffies; + int ret = FAILED; + int dr_supp = 0; + + SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n", + sc, sc->cmnd[0], sc->request, + snic_cmd_tag(sc)); + dr_supp = snic_dev_reset_supported(sc->device); + if (!dr_supp) { + /* device reset op is not supported */ + SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n"); + snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP); + + goto dev_rst_end; + } + + if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) { + snic_unlink_and_release_req(snic, sc, 0); + SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n"); + + goto dev_rst_end; + } + + /* There is no tag when lun reset is issue through ioctl. */ + if (unlikely(tag <= SNIC_NO_TAG)) { + SNIC_HOST_INFO(snic->shost, + "Devrst: LUN Reset Recvd thru IOCTL.\n"); + + rqi = snic_req_init(snic, 0); + if (!rqi) + goto dev_rst_end; + + memset(scsi_cmd_priv(sc), 0, + sizeof(struct snic_internal_io_state)); + CMD_SP(sc) = (char *)rqi; + CMD_FLAGS(sc) = SNIC_NO_FLAGS; + + /* Add special tag for dr coming from user spc */ + rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST; + rqi->sc = sc; + } + + ret = snic_send_dr_and_wait(snic, sc); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "Devrst: IO w/ Tag %x Failed w/ err = %d\n", + tag, ret); + + snic_unlink_and_release_req(snic, sc, 0); + + goto dev_rst_end; + } + + ret = snic_dr_finish(snic, sc); + +dev_rst_end: + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + SNIC_SCSI_DBG(snic->shost, + "Devrst: Returning from Device Reset : %s\n", + (ret == SUCCESS) ? "SUCCESS" : "FAILED"); + + return ret; +} /* end of snic_device_reset */ + +/* + * SCSI Error handling calls driver's eh_host_reset if all prior + * error handling levels return FAILED. + * + * Host Reset is the highest level of error recovery. If this fails, then + * host is offlined by SCSI. + */ +/* + * snic_issue_hba_reset : Queues FW Reset Request. + */ +static int +snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + struct snic_host_req *req = NULL; + spinlock_t *io_lock = NULL; + DECLARE_COMPLETION_ONSTACK(wait); + unsigned long flags; + int ret = -ENOMEM; + + rqi = snic_req_init(snic, 0); + if (!rqi) { + ret = -ENOMEM; + + goto hba_rst_end; + } + + if (snic_cmd_tag(sc) == SCSI_NO_TAG) { + memset(scsi_cmd_priv(sc), 0, + sizeof(struct snic_internal_io_state)); + SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n"); + rqi->sc = sc; + } + + req = rqi_to_req(rqi); + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + SNIC_BUG_ON(CMD_SP(sc) != NULL); + CMD_STATE(sc) = SNIC_IOREQ_PENDING; + CMD_SP(sc) = (char *) rqi; + CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED; + snic->remove_wait = &wait; + spin_unlock_irqrestore(io_lock, flags); + + /* Initialize Request */ + snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc), + snic->config.hid, 0, (ulong) rqi); + + req->u.reset.flags = 0; + + ret = snic_queue_wq_desc(snic, req, sizeof(*req)); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "issu_hr:Queuing HBA Reset Failed. w err %d\n", + ret); + + goto hba_rst_err; + } + + spin_lock_irqsave(io_lock, flags); + CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED; + spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&snic->s_stats.reset.hba_resets); + SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n"); + + wait_for_completion_timeout(snic->remove_wait, + SNIC_HOST_RESET_TIMEOUT); + + if (snic_get_state(snic) == SNIC_FWRESET) { + SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n"); + ret = -ETIMEDOUT; + + goto hba_rst_err; + } + + spin_lock_irqsave(io_lock, flags); + snic->remove_wait = NULL; + rqi = (struct snic_req_info *) CMD_SP(sc); + CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_req_free(snic, rqi); + + ret = 0; + + return ret; + +hba_rst_err: + spin_lock_irqsave(io_lock, flags); + snic->remove_wait = NULL; + rqi = (struct snic_req_info *) CMD_SP(sc); + CMD_SP(sc) = NULL; + spin_unlock_irqrestore(io_lock, flags); + + if (rqi) + snic_req_free(snic, rqi); + +hba_rst_end: + SNIC_HOST_ERR(snic->shost, + "reset:HBA Reset Failed w/ err = %d.\n", + ret); + + return ret; +} /* end of snic_issue_hba_reset */ + +int +snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc) +{ + struct snic *snic = shost_priv(shost); + enum snic_state sv_state; + unsigned long flags; + int ret = FAILED; + + /* Set snic state as SNIC_FWRESET*/ + sv_state = snic_get_state(snic); + + spin_lock_irqsave(&snic->snic_lock, flags); + if (snic_get_state(snic) == SNIC_FWRESET) { + spin_unlock_irqrestore(&snic->snic_lock, flags); + SNIC_HOST_INFO(shost, "reset:prev reset is in progres\n"); + + msleep(SNIC_HOST_RESET_TIMEOUT); + ret = SUCCESS; + + goto reset_end; + } + + snic_set_state(snic, SNIC_FWRESET); + spin_unlock_irqrestore(&snic->snic_lock, flags); + + + /* Wait for all the IOs that are entered in Qcmd */ + while (atomic_read(&snic->ios_inflight)) + schedule_timeout(msecs_to_jiffies(1)); + + ret = snic_issue_hba_reset(snic, sc); + if (ret) { + SNIC_HOST_ERR(shost, + "reset:Host Reset Failed w/ err %d.\n", + ret); + spin_lock_irqsave(&snic->snic_lock, flags); + snic_set_state(snic, sv_state); + spin_unlock_irqrestore(&snic->snic_lock, flags); + atomic64_inc(&snic->s_stats.reset.hba_reset_fail); + ret = FAILED; + + goto reset_end; + } + + ret = SUCCESS; + +reset_end: + return ret; +} /* end of snic_reset */ + +/* + * SCSI Error handling calls driver's eh_host_reset if all prior + * error handling levels return FAILED. + * + * Host Reset is the highest level of error recovery. If this fails, then + * host is offlined by SCSI. + */ +int +snic_host_reset(struct scsi_cmnd *sc) +{ + struct Scsi_Host *shost = sc->device->host; + u32 start_time = jiffies; + int ret = FAILED; + + SNIC_SCSI_DBG(shost, + "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n", + sc, sc->cmnd[0], sc->request, + snic_cmd_tag(sc), CMD_FLAGS(sc)); + + ret = snic_reset(shost, sc); + + SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc, + jiffies_to_msecs(jiffies - start_time), + 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc)); + + return ret; +} /* end of snic_host_reset */ + +/* + * snic_cmpl_pending_tmreq : Caller should hold io_lock + */ +static void +snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc) +{ + struct snic_req_info *rqi = NULL; + + SNIC_SCSI_DBG(snic->shost, + "Completing Pending TM Req sc %p, state %s flags 0x%llx\n", + sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc)); + + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) + return; + + if (rqi->dr_done) + complete(rqi->dr_done); + else if (rqi->abts_done) + complete(rqi->abts_done); +} + +/* + * snic_scsi_cleanup: Walks through tag map and releases the reqs + */ +static void +snic_scsi_cleanup(struct snic *snic, int ex_tag) +{ + struct snic_req_info *rqi = NULL; + struct scsi_cmnd *sc = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + int tag; + u64 st_time = 0; + + SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n"); + + for (tag = 0; tag < snic->max_tag_id; tag++) { + /* Skip ex_tag */ + if (tag == ex_tag) + continue; + + io_lock = snic_io_lock_tag(snic, tag); + spin_lock_irqsave(io_lock, flags); + sc = scsi_host_find_tag(snic->shost, tag); + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + if (unlikely(snic_tmreq_pending(sc))) { + /* + * When FW Completes reset w/o sending completions + * for outstanding ios. + */ + snic_cmpl_pending_tmreq(snic, sc); + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) { + spin_unlock_irqrestore(io_lock, flags); + + goto cleanup; + } + + SNIC_SCSI_DBG(snic->shost, + "sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n", + sc, rqi, tag, CMD_FLAGS(sc)); + + CMD_SP(sc) = NULL; + CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP; + spin_unlock_irqrestore(io_lock, flags); + st_time = rqi->start_time; + + SNIC_HOST_INFO(snic->shost, + "sc_clean: Releasing rqi %p : flags 0x%llx\n", + rqi, CMD_FLAGS(sc)); + + snic_release_req_buf(snic, rqi, sc); + +cleanup: + sc->result = DID_TRANSPORT_DISRUPTED << 16; + SNIC_HOST_INFO(snic->shost, + "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n", + sc, rqi, (jiffies - st_time)); + + /* Update IO stats */ + snic_stats_update_io_cmpl(&snic->s_stats); + + if (sc->scsi_done) { + SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, + jiffies_to_msecs(jiffies - st_time), 0, + SNIC_TRC_CMD(sc), + SNIC_TRC_CMD_STATE_FLAGS(sc)); + + sc->scsi_done(sc); + } + } +} /* end of snic_scsi_cleanup */ + +void +snic_shutdown_scsi_cleanup(struct snic *snic) +{ + SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n"); + + snic_scsi_cleanup(snic, SCSI_NO_TAG); +} /* end of snic_shutdown_scsi_cleanup */ + +/* + * snic_internal_abort_io + * called by : snic_tgt_scsi_abort_io + */ +static int +snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf) +{ + struct snic_req_info *rqi = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + u32 sv_state = 0; + int ret = 0; + + io_lock = snic_io_lock_hash(snic, sc); + spin_lock_irqsave(io_lock, flags); + rqi = (struct snic_req_info *) CMD_SP(sc); + if (!rqi) + goto skip_internal_abts; + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) + goto skip_internal_abts; + + if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) && + (!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) { + + SNIC_SCSI_DBG(snic->shost, + "internal_abts: dev rst not pending sc 0x%p\n", + sc); + + goto skip_internal_abts; + } + + + if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) { + SNIC_SCSI_DBG(snic->shost, + "internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n", + sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc)); + + goto skip_internal_abts; + } + + sv_state = CMD_STATE(sc); + CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING; + CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE; + CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING; + + if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) { + /* stats */ + rqi->tm_tag = SNIC_TAG_DEV_RST; + SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc); + } + + SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n", + snic_cmd_tag(sc)); + SNIC_BUG_ON(rqi->abts_done); + spin_unlock_irqrestore(io_lock, flags); + + ret = snic_queue_abort_req(snic, rqi, sc, tmf); + if (ret) { + SNIC_HOST_ERR(snic->shost, + "internal_abts: Tag = %x , Failed w/ err = %d\n", + snic_cmd_tag(sc), ret); + + spin_lock_irqsave(io_lock, flags); + + if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) + CMD_STATE(sc) = sv_state; + + goto skip_internal_abts; + } + + spin_lock_irqsave(io_lock, flags); + if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) + CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED; + else + CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED; + + ret = SUCCESS; + +skip_internal_abts: + SNIC_BUG_ON(!spin_is_locked(io_lock)); + spin_unlock_irqrestore(io_lock, flags); + + return ret; +} /* end of snic_internal_abort_io */ + +/* + * snic_tgt_scsi_abort_io : called by snic_tgt_del + */ +int +snic_tgt_scsi_abort_io(struct snic_tgt *tgt) +{ + struct snic *snic = NULL; + struct scsi_cmnd *sc = NULL; + struct snic_tgt *sc_tgt = NULL; + spinlock_t *io_lock = NULL; + unsigned long flags; + int ret = 0, tag, abt_cnt = 0, tmf = 0; + + if (!tgt) + return -1; + + snic = shost_priv(snic_tgt_to_shost(tgt)); + SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n"); + + if (tgt->tdata.typ == SNIC_TGT_DAS) + tmf = SNIC_ITMF_ABTS_TASK; + else + tmf = SNIC_ITMF_ABTS_TASK_TERM; + + for (tag = 0; tag < snic->max_tag_id; tag++) { + io_lock = snic_io_lock_tag(snic, tag); + + spin_lock_irqsave(io_lock, flags); + sc = scsi_host_find_tag(snic->shost, tag); + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + + sc_tgt = starget_to_tgt(scsi_target(sc->device)); + if (sc_tgt != tgt) { + spin_unlock_irqrestore(io_lock, flags); + + continue; + } + spin_unlock_irqrestore(io_lock, flags); + + ret = snic_internal_abort_io(snic, sc, tmf); + if (ret < 0) { + SNIC_HOST_ERR(snic->shost, + "tgt_abt_io: Tag %x, Failed w err = %d\n", + tag, ret); + + continue; + } + + if (ret == SUCCESS) + abt_cnt++; + } + + SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt); + + return 0; +} /* end of snic_tgt_scsi_abort_io */ diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h new file mode 100644 index 000000000000..11e614849a82 --- /dev/null +++ b/drivers/scsi/snic/snic_stats.h @@ -0,0 +1,123 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __SNIC_STATS_H +#define __SNIC_STATS_H + +struct snic_io_stats { + atomic64_t active; /* Active IOs */ + atomic64_t max_active; /* Max # active IOs */ + atomic64_t max_sgl; /* Max # SGLs for any IO */ + atomic64_t max_time; /* Max time to process IO */ + atomic64_t max_qtime; /* Max time to Queue the IO */ + atomic64_t max_cmpl_time; /* Max time to complete the IO */ + atomic64_t sgl_cnt[SNIC_MAX_SG_DESC_CNT]; /* SGL Counters */ + atomic64_t max_io_sz; /* Max IO Size */ + atomic64_t compl; /* IO Completions */ + atomic64_t fail; /* IO Failures */ + atomic64_t req_null; /* req or req info is NULL */ + atomic64_t alloc_fail; /* Alloc Failures */ + atomic64_t sc_null; + atomic64_t io_not_found; /* IO Not Found */ + atomic64_t num_ios; /* Number of IOs */ +}; + +struct snic_abort_stats { + atomic64_t num; /* Abort counter */ + atomic64_t fail; /* Abort Failure Counter */ + atomic64_t drv_tmo; /* Abort Driver Timeouts */ + atomic64_t fw_tmo; /* Abort Firmware Timeouts */ + atomic64_t io_not_found;/* Abort IO Not Found */ +}; + +struct snic_reset_stats { + atomic64_t dev_resets; /* Device Reset Counter */ + atomic64_t dev_reset_fail; /* Device Reset Failures */ + atomic64_t dev_reset_aborts; /* Device Reset Aborts */ + atomic64_t dev_reset_tmo; /* Device Reset Timeout */ + atomic64_t dev_reset_terms; /* Device Reset terminate */ + atomic64_t hba_resets; /* hba/firmware resets */ + atomic64_t hba_reset_cmpl; /* hba/firmware reset completions */ + atomic64_t hba_reset_fail; /* hba/firmware failures */ + atomic64_t snic_resets; /* snic resets */ + atomic64_t snic_reset_compl; /* snic reset completions */ + atomic64_t snic_reset_fail; /* snic reset failures */ +}; + +struct snic_fw_stats { + atomic64_t actv_reqs; /* Active Requests */ + atomic64_t max_actv_reqs; /* Max Active Requests */ + atomic64_t out_of_res; /* Firmware Out Of Resources */ + atomic64_t io_errs; /* Firmware IO Firmware Errors */ + atomic64_t scsi_errs; /* Target hits check condition */ +}; + +struct snic_misc_stats { + u64 last_isr_time; + u64 last_ack_time; + atomic64_t isr_cnt; + atomic64_t max_cq_ents; /* Max CQ Entries */ + atomic64_t data_cnt_mismat; /* Data Count Mismatch */ + atomic64_t io_tmo; + atomic64_t io_aborted; + atomic64_t sgl_inval; /* SGL Invalid */ + atomic64_t abts_wq_alloc_fail; /* Abort Path WQ desc alloc failure */ + atomic64_t devrst_wq_alloc_fail;/* Device Reset - WQ desc alloc fail */ + atomic64_t wq_alloc_fail; /* IO WQ desc alloc failure */ + atomic64_t no_icmnd_itmf_cmpls; + atomic64_t io_under_run; + atomic64_t qfull; + atomic64_t tgt_not_rdy; +}; + +struct snic_stats { + struct snic_io_stats io; + struct snic_abort_stats abts; + struct snic_reset_stats reset; + struct snic_fw_stats fw; + struct snic_misc_stats misc; + atomic64_t io_cmpl_skip; +}; + +int snic_stats_debugfs_init(struct snic *); +void snic_stats_debugfs_remove(struct snic *); + +/* Auxillary function to update active IO counter */ +static inline void +snic_stats_update_active_ios(struct snic_stats *s_stats) +{ + struct snic_io_stats *io = &s_stats->io; + u32 nr_active_ios; + + nr_active_ios = atomic64_inc_return(&io->active); + if (atomic64_read(&io->max_active) < nr_active_ios) + atomic64_set(&io->max_active, nr_active_ios); + + atomic64_inc(&io->num_ios); +} + +/* Auxillary function to update IO completion counter */ +static inline void +snic_stats_update_io_cmpl(struct snic_stats *s_stats) +{ + atomic64_dec(&s_stats->io.active); + if (unlikely(atomic64_read(&s_stats->io_cmpl_skip))) + atomic64_dec(&s_stats->io_cmpl_skip); + else + atomic64_inc(&s_stats->io.compl); +} +#endif /* __SNIC_STATS_H */ diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c new file mode 100644 index 000000000000..28a40a7ade38 --- /dev/null +++ b/drivers/scsi/snic/snic_trc.c @@ -0,0 +1,181 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/mempool.h> +#include <linux/errno.h> +#include <linux/vmalloc.h> + +#include "snic_io.h" +#include "snic.h" + +/* + * snic_get_trc_buf : Allocates a trace record and returns. + */ +struct snic_trc_data * +snic_get_trc_buf(void) +{ + struct snic_trc *trc = &snic_glob->trc; + struct snic_trc_data *td = NULL; + unsigned long flags; + + spin_lock_irqsave(&trc->lock, flags); + td = &trc->buf[trc->wr_idx]; + trc->wr_idx++; + + if (trc->wr_idx == trc->max_idx) + trc->wr_idx = 0; + + if (trc->wr_idx != trc->rd_idx) { + spin_unlock_irqrestore(&trc->lock, flags); + + goto end; + } + + trc->rd_idx++; + if (trc->rd_idx == trc->max_idx) + trc->rd_idx = 0; + + td->ts = 0; /* Marker for checking the record, for complete data*/ + spin_unlock_irqrestore(&trc->lock, flags); + +end: + + return td; +} /* end of snic_get_trc_buf */ + +/* + * snic_fmt_trc_data : Formats trace data for printing. + */ +static int +snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz) +{ + int len = 0; + struct timespec tmspec; + + jiffies_to_timespec(td->ts, &tmspec); + + len += snprintf(buf, buf_sz, + "%lu.%10lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n", + tmspec.tv_sec, + tmspec.tv_nsec, + td->fn, + td->hno, + td->tag, + td->data[0], td->data[1], td->data[2], td->data[3], + td->data[4]); + + return len; +} /* end of snic_fmt_trc_data */ + +/* + * snic_get_trc_data : Returns a formatted trace buffer. + */ +int +snic_get_trc_data(char *buf, int buf_sz) +{ + struct snic_trc_data *td = NULL; + struct snic_trc *trc = &snic_glob->trc; + unsigned long flags; + + spin_lock_irqsave(&trc->lock, flags); + if (trc->rd_idx == trc->wr_idx) { + spin_unlock_irqrestore(&trc->lock, flags); + + return -1; + } + td = &trc->buf[trc->rd_idx]; + + if (td->ts == 0) { + /* write in progress. */ + spin_unlock_irqrestore(&trc->lock, flags); + + return -1; + } + + trc->rd_idx++; + if (trc->rd_idx == trc->max_idx) + trc->rd_idx = 0; + spin_unlock_irqrestore(&trc->lock, flags); + + return snic_fmt_trc_data(td, buf, buf_sz); +} /* end of snic_get_trc_data */ + +/* + * snic_trc_init() : Configures Trace Functionality for snic. + */ +int +snic_trc_init(void) +{ + struct snic_trc *trc = &snic_glob->trc; + void *tbuf = NULL; + int tbuf_sz = 0, ret; + + tbuf_sz = (snic_trace_max_pages * PAGE_SIZE); + tbuf = vmalloc(tbuf_sz); + if (!tbuf) { + SNIC_ERR("Failed to Allocate Trace Buffer Size. %d\n", tbuf_sz); + SNIC_ERR("Trace Facility not enabled.\n"); + ret = -ENOMEM; + + return ret; + } + + memset(tbuf, 0, tbuf_sz); + trc->buf = (struct snic_trc_data *) tbuf; + spin_lock_init(&trc->lock); + + ret = snic_trc_debugfs_init(); + if (ret) { + SNIC_ERR("Failed to create Debugfs Files.\n"); + + goto error; + } + + trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ); + trc->rd_idx = trc->wr_idx = 0; + trc->enable = 1; + SNIC_INFO("Trace Facility Enabled.\n Trace Buffer SZ %lu Pages.\n", + tbuf_sz / PAGE_SIZE); + ret = 0; + + return ret; + +error: + snic_trc_free(); + + return ret; +} /* end of snic_trc_init */ + +/* + * snic_trc_free : Releases the trace buffer and disables the tracing. + */ +void +snic_trc_free(void) +{ + struct snic_trc *trc = &snic_glob->trc; + + trc->enable = 0; + snic_trc_debugfs_term(); + + if (trc->buf) { + vfree(trc->buf); + trc->buf = NULL; + } + + SNIC_INFO("Trace Facility Disabled.\n"); +} /* end of snic_trc_free */ diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h new file mode 100644 index 000000000000..427faee5f97e --- /dev/null +++ b/drivers/scsi/snic/snic_trc.h @@ -0,0 +1,121 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __SNIC_TRC_H +#define __SNIC_TRC_H + +#ifdef CONFIG_SCSI_SNIC_DEBUG_FS + +extern ssize_t simple_read_from_buffer(void __user *to, + size_t count, + loff_t *ppos, + const void *from, + size_t available); + +extern unsigned int snic_trace_max_pages; + +/* Global Data structure for trace to manage trace functionality */ +struct snic_trc_data { + u64 ts; /* Time Stamp */ + char *fn; /* Ptr to Function Name */ + u32 hno; /* SCSI Host ID */ + u32 tag; /* Command Tag */ + u64 data[5]; +} __attribute__((__packed__)); + +#define SNIC_TRC_ENTRY_SZ 64 /* in Bytes */ + +struct snic_trc { + spinlock_t lock; + struct snic_trc_data *buf; /* Trace Buffer */ + u32 max_idx; /* Max Index into trace buffer */ + u32 rd_idx; + u32 wr_idx; + u32 enable; /* Control Variable for Tracing */ + + struct dentry *trc_enable; /* debugfs file object */ + struct dentry *trc_file; +}; + +int snic_trc_init(void); +void snic_trc_free(void); +int snic_trc_debugfs_init(void); +void snic_trc_debugfs_term(void); +struct snic_trc_data *snic_get_trc_buf(void); +int snic_get_trc_data(char *buf, int buf_sz); + +int snic_debugfs_init(void); +void snic_debugfs_term(void); + +static inline void +snic_trace(char *fn, u16 hno, u32 tag, u64 d1, u64 d2, u64 d3, u64 d4, u64 d5) +{ + struct snic_trc_data *tr_rec = snic_get_trc_buf(); + + if (!tr_rec) + return; + + tr_rec->fn = (char *)fn; + tr_rec->hno = hno; + tr_rec->tag = tag; + tr_rec->data[0] = d1; + tr_rec->data[1] = d2; + tr_rec->data[2] = d3; + tr_rec->data[3] = d4; + tr_rec->data[4] = d5; + tr_rec->ts = jiffies; /* Update time stamp at last */ +} + +#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \ + do { \ + if (unlikely(snic_glob->trc.enable)) \ + snic_trace((char *)__func__, \ + (u16)(_hno), \ + (u32)(_tag), \ + (u64)(d1), \ + (u64)(d2), \ + (u64)(d3), \ + (u64)(d4), \ + (u64)(d5)); \ + } while (0) +#else + +#define SNIC_TRC(_hno, _tag, d1, d2, d3, d4, d5) \ + do { \ + if (unlikely(snic_log_level & 0x2)) \ + SNIC_DBG("SnicTrace: %s %2u %2u %llx %llx %llx %llx %llx", \ + (char *)__func__, \ + (u16)(_hno), \ + (u32)(_tag), \ + (u64)(d1), \ + (u64)(d2), \ + (u64)(d3), \ + (u64)(d4), \ + (u64)(d5)); \ + } while (0) +#endif /* end of CONFIG_SCSI_SNIC_DEBUG_FS */ + +#define SNIC_TRC_CMD(sc) \ + ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | \ + (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | \ + (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | \ + (u64)sc->cmnd[5]) + +#define SNIC_TRC_CMD_STATE_FLAGS(sc) \ + ((u64) CMD_FLAGS(sc) << 32 | CMD_STATE(sc)) + +#endif /* end of __SNIC_TRC_H */ diff --git a/drivers/scsi/snic/vnic_cq.c b/drivers/scsi/snic/vnic_cq.c new file mode 100644 index 000000000000..4c8e64e4fba6 --- /dev/null +++ b/drivers/scsi/snic/vnic_cq.c @@ -0,0 +1,86 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include "vnic_dev.h" +#include "vnic_cq.h" + +void svnic_cq_free(struct vnic_cq *cq) +{ + svnic_dev_free_desc_ring(cq->vdev, &cq->ring); + + cq->ctrl = NULL; +} + +int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, + unsigned int index, unsigned int desc_count, unsigned int desc_size) +{ + int err; + + cq->index = index; + cq->vdev = vdev; + + cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index); + if (!cq->ctrl) { + pr_err("Failed to hook CQ[%d] resource\n", index); + + return -EINVAL; + } + + err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); + if (err) + return err; + + return 0; +} + +void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int cq_message_enable, + unsigned int interrupt_offset, u64 cq_message_addr) +{ + u64 paddr; + + paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &cq->ctrl->ring_base); + iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); + iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); + iowrite32(color_enable, &cq->ctrl->color_enable); + iowrite32(cq_head, &cq->ctrl->cq_head); + iowrite32(cq_tail, &cq->ctrl->cq_tail); + iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); + iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); + iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); + iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); + iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); + writeq(cq_message_addr, &cq->ctrl->cq_message_addr); +} + +void svnic_cq_clean(struct vnic_cq *cq) +{ + cq->to_clean = 0; + cq->last_color = 0; + + iowrite32(0, &cq->ctrl->cq_head); + iowrite32(0, &cq->ctrl->cq_tail); + iowrite32(1, &cq->ctrl->cq_tail_color); + + svnic_dev_clear_desc_ring(&cq->ring); +} diff --git a/drivers/scsi/snic/vnic_cq.h b/drivers/scsi/snic/vnic_cq.h new file mode 100644 index 000000000000..6e651c3e16f7 --- /dev/null +++ b/drivers/scsi/snic/vnic_cq.h @@ -0,0 +1,110 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_CQ_H_ +#define _VNIC_CQ_H_ + +#include "cq_desc.h" +#include "vnic_dev.h" + +/* Completion queue control */ +struct vnic_cq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 flow_control_enable; /* 0x10 */ + u32 pad1; + u32 color_enable; /* 0x18 */ + u32 pad2; + u32 cq_head; /* 0x20 */ + u32 pad3; + u32 cq_tail; /* 0x28 */ + u32 pad4; + u32 cq_tail_color; /* 0x30 */ + u32 pad5; + u32 interrupt_enable; /* 0x38 */ + u32 pad6; + u32 cq_entry_enable; /* 0x40 */ + u32 pad7; + u32 cq_message_enable; /* 0x48 */ + u32 pad8; + u32 interrupt_offset; /* 0x50 */ + u32 pad9; + u64 cq_message_addr; /* 0x58 */ + u32 pad10; +}; + +struct vnic_cq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + unsigned int to_clean; + unsigned int last_color; +}; + +static inline unsigned int svnic_cq_service(struct vnic_cq *cq, + unsigned int work_to_do, + int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index, void *opaque), + void *opaque) +{ + struct cq_desc *cq_desc; + unsigned int work_done = 0; + u16 q_number, completed_index; + u8 type, color; + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + while (color != cq->last_color) { + + if ((*q_service)(cq->vdev, cq_desc, type, + q_number, completed_index, opaque)) + break; + + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + cq_desc_dec(cq_desc, &type, &color, + &q_number, &completed_index); + + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +void svnic_cq_free(struct vnic_cq *cq); +int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, + unsigned int index, unsigned int desc_count, unsigned int desc_size); +void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, + unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, + unsigned int cq_tail_color, unsigned int interrupt_enable, + unsigned int cq_entry_enable, unsigned int message_enable, + unsigned int interrupt_offset, u64 message_addr); +void svnic_cq_clean(struct vnic_cq *cq); +#endif /* _VNIC_CQ_H_ */ diff --git a/drivers/scsi/snic/vnic_cq_fw.h b/drivers/scsi/snic/vnic_cq_fw.h new file mode 100644 index 000000000000..c2d1bbd44bd1 --- /dev/null +++ b/drivers/scsi/snic/vnic_cq_fw.h @@ -0,0 +1,62 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_CQ_FW_H_ +#define _VNIC_CQ_FW_H_ + +#include "snic_fwint.h" + +static inline unsigned int +vnic_cq_fw_service(struct vnic_cq *cq, + int (*q_service)(struct vnic_dev *vdev, + unsigned int index, + struct snic_fw_req *desc), + unsigned int work_to_do) + +{ + struct snic_fw_req *desc; + unsigned int work_done = 0; + u8 color; + + desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + snic_color_dec(desc, &color); + + while (color != cq->last_color) { + + if ((*q_service)(cq->vdev, cq->index, desc)) + break; + + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + + cq->ring.desc_size * cq->to_clean); + snic_color_dec(desc, &color); + + work_done++; + if (work_done >= work_to_do) + break; + } + + return work_done; +} + +#endif /* _VNIC_CQ_FW_H_ */ diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c new file mode 100644 index 000000000000..e0b5549bc9fb --- /dev/null +++ b/drivers/scsi/snic/vnic_dev.c @@ -0,0 +1,748 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/if_ether.h> +#include <linux/slab.h> +#include "vnic_resource.h" +#include "vnic_devcmd.h" +#include "vnic_dev.h" +#include "vnic_stats.h" +#include "vnic_wq.h" + +#define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */ +#define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL + +struct devcmd2_controller { + struct vnic_wq_ctrl __iomem *wq_ctrl; + struct vnic_dev_ring results_ring; + struct vnic_wq wq; + struct vnic_devcmd2 *cmd_ring; + struct devcmd2_result *result; + u16 next_result; + u16 result_size; + int color; +}; + +struct vnic_res { + void __iomem *vaddr; + unsigned int count; +}; + +struct vnic_dev { + void *priv; + struct pci_dev *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + u32 *linkstatus; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; + u64 args[VNIC_DEVCMD_NARGS]; + struct devcmd2_controller *devcmd2; + + int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait); +}; + +#define VNIC_MAX_RES_HDR_SIZE \ + (sizeof(struct vnic_resource_header) + \ + sizeof(struct vnic_resource) * RES_TYPE_MAX) +#define VNIC_RES_STRIDE 128 + +void *svnic_dev_priv(struct vnic_dev *vdev) +{ + return vdev->priv; +} + +static int vnic_dev_discover_res(struct vnic_dev *vdev, + struct vnic_dev_bar *bar, unsigned int num_bars) +{ + struct vnic_resource_header __iomem *rh; + struct vnic_resource __iomem *r; + u8 type; + + if (num_bars == 0) + return -EINVAL; + + if (bar->len < VNIC_MAX_RES_HDR_SIZE) { + pr_err("vNIC BAR0 res hdr length error\n"); + + return -EINVAL; + } + + rh = bar->vaddr; + if (!rh) { + pr_err("vNIC BAR0 res hdr not mem-mapped\n"); + + return -EINVAL; + } + + if (ioread32(&rh->magic) != VNIC_RES_MAGIC || + ioread32(&rh->version) != VNIC_RES_VERSION) { + pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); + + return -EINVAL; + } + + r = (struct vnic_resource __iomem *)(rh + 1); + + while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { + + u8 bar_num = ioread8(&r->bar); + u32 bar_offset = ioread32(&r->bar_offset); + u32 count = ioread32(&r->count); + u32 len; + + r++; + + if (bar_num >= num_bars) + continue; + + if (!bar[bar_num].len || !bar[bar_num].vaddr) + continue; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + /* each count is stride bytes long */ + len = count * VNIC_RES_STRIDE; + if (len + bar_offset > bar->len) { + pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", + type, bar_offset, + len, + bar->len); + + return -EINVAL; + } + break; + + case RES_TYPE_INTR_PBA_LEGACY: + case RES_TYPE_DEVCMD: + case RES_TYPE_DEVCMD2: + len = count; + break; + + default: + continue; + } + + vdev->res[type].count = count; + vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; + } + + return 0; +} + +unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type) +{ + return vdev->res[type].count; +} + +void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index) +{ + if (!vdev->res[type].vaddr) + return NULL; + + switch (type) { + case RES_TYPE_WQ: + case RES_TYPE_RQ: + case RES_TYPE_CQ: + case RES_TYPE_INTR_CTRL: + return (char __iomem *)vdev->res[type].vaddr + + index * VNIC_RES_STRIDE; + + default: + return (char __iomem *)vdev->res[type].vaddr; + } +} + +unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, + unsigned int desc_size) +{ + /* The base address of the desc rings must be 512 byte aligned. + * Descriptor count is aligned to groups of 32 descriptors. A + * count of 0 means the maximum 4096 descriptors. Descriptor + * size is aligned to 16 bytes. + */ + + unsigned int count_align = 32; + unsigned int desc_align = 16; + + ring->base_align = 512; + + if (desc_count == 0) + desc_count = 4096; + + ring->desc_count = ALIGN(desc_count, count_align); + + ring->desc_size = ALIGN(desc_size, desc_align); + + ring->size = ring->desc_count * ring->desc_size; + ring->size_unaligned = ring->size + ring->base_align; + + return ring->size_unaligned; +} + +void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) +{ + memset(ring->descs, 0, ring->size); +} + +int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size) +{ + svnic_dev_desc_ring_size(ring, desc_count, desc_size); + + ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, + ring->size_unaligned, + &ring->base_addr_unaligned); + + if (!ring->descs_unaligned) { + pr_err("Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); + + return -ENOMEM; + } + + ring->base_addr = ALIGN(ring->base_addr_unaligned, + ring->base_align); + ring->descs = (u8 *)ring->descs_unaligned + + (ring->base_addr - ring->base_addr_unaligned); + + svnic_dev_clear_desc_ring(ring); + + ring->desc_avail = ring->desc_count - 1; + + return 0; +} + +void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) +{ + if (ring->descs) { + pci_free_consistent(vdev->pdev, + ring->size_unaligned, + ring->descs_unaligned, + ring->base_addr_unaligned); + ring->descs = NULL; + } +} + +static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + struct devcmd2_result *result = dc2c->result + dc2c->next_result; + unsigned int i; + int delay; + int err; + u32 posted; + u32 new_posted; + + posted = ioread32(&dc2c->wq_ctrl->posted_index); + + if (posted == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: return error */ + return -ENODEV; + } + + new_posted = (posted + 1) % DEVCMD2_RING_SIZE; + dc2c->cmd_ring[posted].cmd = cmd; + dc2c->cmd_ring[posted].flags = 0; + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; + + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + dc2c->cmd_ring[posted].args[i] = vdev->args[i]; + } + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); + + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) + return 0; + + for (delay = 0; delay < wait; delay++) { + udelay(100); + if (result->color == dc2c->color) { + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + if (result->error) { + err = (int) result->error; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + pr_err("Error %d devcmd %d\n", + err, _CMD_N(cmd)); + + return err; + } + if (_CMD_DIR(cmd) & _CMD_DIR_READ) { + /* + * Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which + * would potentially result in reading stale + * values. + */ + rmb(); + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + vdev->args[i] = result->results[i]; + } + + return 0; + } + } + + pr_err("Timed out devcmd %d\n", _CMD_N(cmd)); + + return -ETIMEDOUT; +} + +static int svnic_dev_init_devcmd2(struct vnic_dev *vdev) +{ + struct devcmd2_controller *dc2c = NULL; + unsigned int fetch_idx; + int ret; + void __iomem *p; + + if (vdev->devcmd2) + return 0; + + p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (!p) + return -ENODEV; + + dc2c = kzalloc(sizeof(*dc2c), GFP_ATOMIC); + if (!dc2c) + return -ENOMEM; + + vdev->devcmd2 = dc2c; + + dc2c->color = 1; + dc2c->result_size = DEVCMD2_RING_SIZE; + + ret = vnic_wq_devcmd2_alloc(vdev, + &dc2c->wq, + DEVCMD2_RING_SIZE, + DEVCMD2_DESC_SIZE); + if (ret) + goto err_free_devcmd2; + + fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index); + if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */ + /* Hardware surprise removal: reset fetch_index */ + fetch_idx = 0; + } + + /* + * Don't change fetch_index ever and + * set posted_index same as fetch_index + * when setting up the WQ for devcmd2. + */ + vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0); + svnic_wq_enable(&dc2c->wq); + ret = svnic_dev_alloc_desc_ring(vdev, + &dc2c->results_ring, + DEVCMD2_RING_SIZE, + DEVCMD2_DESC_SIZE); + if (ret) + goto err_free_wq; + + dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs; + dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs; + dc2c->wq_ctrl = dc2c->wq.ctrl; + vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET; + vdev->args[1] = DEVCMD2_RING_SIZE; + + ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO); + if (ret < 0) + goto err_free_desc_ring; + + vdev->devcmd_rtn = &_svnic_dev_cmd2; + pr_info("DEVCMD2 Initialized.\n"); + + return ret; + +err_free_desc_ring: + svnic_dev_free_desc_ring(vdev, &dc2c->results_ring); + +err_free_wq: + svnic_wq_disable(&dc2c->wq); + svnic_wq_free(&dc2c->wq); + +err_free_devcmd2: + kfree(dc2c); + vdev->devcmd2 = NULL; + + return ret; +} /* end of svnic_dev_init_devcmd2 */ + +static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + + vdev->devcmd2 = NULL; + vdev->devcmd_rtn = NULL; + + svnic_dev_free_desc_ring(vdev, &dc2c->results_ring); + svnic_wq_disable(&dc2c->wq); + svnic_wq_free(&dc2c->wq); + kfree(dc2c); +} + +int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + int err; + + memset(vdev->args, 0, sizeof(vdev->args)); + vdev->args[0] = *a0; + vdev->args[1] = *a1; + + err = (*vdev->devcmd_rtn)(vdev, cmd, wait); + + *a0 = vdev->args[0]; + *a1 = vdev->args[1]; + + return err; +} + +int svnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info) +{ + u64 a0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + int err = 0; + + if (!vdev->fw_info) { + vdev->fw_info = pci_alloc_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_fw_info), + &vdev->fw_info_pa); + if (!vdev->fw_info) + return -ENOMEM; + + a0 = vdev->fw_info_pa; + + /* only get fw_info once and cache it */ + err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); + } + + *fw_info = vdev->fw_info; + + return err; +} + +int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, + unsigned int size, void *value) +{ + u64 a0, a1; + int wait = VNIC_DVCMD_TMO; + int err; + + a0 = offset; + a1 = size; + + err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); + + switch (size) { + case 1: + *(u8 *)value = (u8)a0; + break; + case 2: + *(u16 *)value = (u16)a0; + break; + case 4: + *(u32 *)value = (u32)a0; + break; + case 8: + *(u64 *)value = a0; + break; + default: + BUG(); + break; + } + + return err; +} + +int svnic_dev_stats_clear(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); +} + +int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) +{ + u64 a0, a1; + int wait = VNIC_DVCMD_TMO; + + if (!vdev->stats) { + vdev->stats = pci_alloc_consistent(vdev->pdev, + sizeof(struct vnic_stats), &vdev->stats_pa); + if (!vdev->stats) + return -ENOMEM; + } + + *stats = vdev->stats; + a0 = vdev->stats_pa; + a1 = sizeof(struct vnic_stats); + + return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); +} + +int svnic_dev_close(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); +} + +int svnic_dev_enable_wait(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + int err = 0; + + err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); + if (err == ERR_ECMDUNKNOWN) + return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); + + return err; +} + +int svnic_dev_disable(struct vnic_dev *vdev) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); +} + +int svnic_dev_open(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); +} + +int svnic_dev_open_done(struct vnic_dev *vdev, int *done) +{ + u64 a0 = 0, a1 = 0; + int wait = VNIC_DVCMD_TMO; + int err; + + *done = 0; + + err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); + if (err) + return err; + + *done = (a0 == 0); + + return 0; +} + +int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) +{ + u64 a0, a1; + int wait = VNIC_DVCMD_TMO; + + if (!vdev->notify) { + vdev->notify = pci_alloc_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + &vdev->notify_pa); + if (!vdev->notify) + return -ENOMEM; + } + + a0 = vdev->notify_pa; + a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK; + a1 += sizeof(struct vnic_devcmd_notify); + + return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); +} + +void svnic_dev_notify_unset(struct vnic_dev *vdev) +{ + u64 a0, a1; + int wait = VNIC_DVCMD_TMO; + + a0 = 0; /* paddr = 0 to unset notify buffer */ + a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */ + a1 += sizeof(struct vnic_devcmd_notify); + + svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); +} + +static int vnic_dev_notify_ready(struct vnic_dev *vdev) +{ + u32 *words; + unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; + unsigned int i; + u32 csum; + + if (!vdev->notify) + return 0; + + do { + csum = 0; + memcpy(&vdev->notify_copy, vdev->notify, + sizeof(struct vnic_devcmd_notify)); + words = (u32 *)&vdev->notify_copy; + for (i = 1; i < nwords; i++) + csum += words[i]; + } while (csum != words[0]); + + return 1; +} + +int svnic_dev_init(struct vnic_dev *vdev, int arg) +{ + u64 a0 = (u32)arg, a1 = 0; + int wait = VNIC_DVCMD_TMO; + + return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); +} + +int svnic_dev_link_status(struct vnic_dev *vdev) +{ + if (vdev->linkstatus) + return *vdev->linkstatus; + + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_state; +} + +u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev) +{ + if (!vnic_dev_notify_ready(vdev)) + return 0; + + return vdev->notify_copy.link_down_cnt; +} + +void svnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode) +{ + vdev->intr_mode = intr_mode; +} + +enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev) +{ + return vdev->intr_mode; +} + +void svnic_dev_unregister(struct vnic_dev *vdev) +{ + if (vdev) { + if (vdev->notify) + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_notify), + vdev->notify, + vdev->notify_pa); + if (vdev->linkstatus) + pci_free_consistent(vdev->pdev, + sizeof(u32), + vdev->linkstatus, + vdev->linkstatus_pa); + if (vdev->stats) + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_stats), + vdev->stats, vdev->stats_pa); + if (vdev->fw_info) + pci_free_consistent(vdev->pdev, + sizeof(struct vnic_devcmd_fw_info), + vdev->fw_info, vdev->fw_info_pa); + if (vdev->devcmd2) + vnic_dev_deinit_devcmd2(vdev); + kfree(vdev); + } +} + +struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev, + void *priv, + struct pci_dev *pdev, + struct vnic_dev_bar *bar, + unsigned int num_bars) +{ + if (!vdev) { + vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC); + if (!vdev) + return NULL; + } + + vdev->priv = priv; + vdev->pdev = pdev; + + if (vnic_dev_discover_res(vdev, bar, num_bars)) + goto err_out; + + return vdev; + +err_out: + svnic_dev_unregister(vdev); + + return NULL; +} /* end of svnic_dev_alloc_discover */ + +/* + * fallback option is left to keep the interface common for other vnics. + */ +int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback) +{ + int err = -ENODEV; + void __iomem *p; + + p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (p) + err = svnic_dev_init_devcmd2(vdev); + else + pr_err("DEVCMD2 resource not found.\n"); + + return err; +} /* end of svnic_dev_cmd_init */ diff --git a/drivers/scsi/snic/vnic_dev.h b/drivers/scsi/snic/vnic_dev.h new file mode 100644 index 000000000000..e65726da6504 --- /dev/null +++ b/drivers/scsi/snic/vnic_dev.h @@ -0,0 +1,110 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_DEV_H_ +#define _VNIC_DEV_H_ + +#include "vnic_resource.h" +#include "vnic_devcmd.h" + +#ifndef VNIC_PADDR_TARGET +#define VNIC_PADDR_TARGET 0x0000000000000000ULL +#endif + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return ((u64)readl(reg + 0x4UL) << 32) | (u64)readl(reg); +} + +static inline void writeq(u64 val, void __iomem *reg) +{ + writel(lower_32_bits(val), reg); + writel(upper_32_bits(val), reg + 0x4UL); +} +#endif + +enum vnic_dev_intr_mode { + VNIC_DEV_INTR_MODE_UNKNOWN, + VNIC_DEV_INTR_MODE_INTX, + VNIC_DEV_INTR_MODE_MSI, + VNIC_DEV_INTR_MODE_MSIX, +}; + +struct vnic_dev_bar { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned long len; +}; + +struct vnic_dev_ring { + void *descs; + size_t size; + dma_addr_t base_addr; + size_t base_align; + void *descs_unaligned; + size_t size_unaligned; + dma_addr_t base_addr_unaligned; + unsigned int desc_size; + unsigned int desc_count; + unsigned int desc_avail; +}; + +struct vnic_dev; +struct vnic_stats; + +void *svnic_dev_priv(struct vnic_dev *vdev); +unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev, + enum vnic_res_type type); +void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, + unsigned int index); +unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring, + unsigned int desc_count, + unsigned int desc_size); +void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); +int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, + unsigned int desc_count, unsigned int desc_size); +void svnic_dev_free_desc_ring(struct vnic_dev *vdev, + struct vnic_dev_ring *ring); +int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait); +int svnic_dev_fw_info(struct vnic_dev *vdev, + struct vnic_devcmd_fw_info **fw_info); +int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, + unsigned int size, void *value); +int svnic_dev_stats_clear(struct vnic_dev *vdev); +int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); +int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); +void svnic_dev_notify_unset(struct vnic_dev *vdev); +int svnic_dev_link_status(struct vnic_dev *vdev); +u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev); +int svnic_dev_close(struct vnic_dev *vdev); +int svnic_dev_enable_wait(struct vnic_dev *vdev); +int svnic_dev_disable(struct vnic_dev *vdev); +int svnic_dev_open(struct vnic_dev *vdev, int arg); +int svnic_dev_open_done(struct vnic_dev *vdev, int *done); +int svnic_dev_init(struct vnic_dev *vdev, int arg); +struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev, + void *priv, struct pci_dev *pdev, + struct vnic_dev_bar *bar, + unsigned int num_bars); +void svnic_dev_set_intr_mode(struct vnic_dev *vdev, + enum vnic_dev_intr_mode intr_mode); +enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev); +void svnic_dev_unregister(struct vnic_dev *vdev); +int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback); +#endif /* _VNIC_DEV_H_ */ diff --git a/drivers/scsi/snic/vnic_devcmd.h b/drivers/scsi/snic/vnic_devcmd.h new file mode 100644 index 000000000000..d81b4f0ceaaa --- /dev/null +++ b/drivers/scsi/snic/vnic_devcmd.h @@ -0,0 +1,270 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_DEVCMD_H_ +#define _VNIC_DEVCMD_H_ + +#define _CMD_NBITS 14 +#define _CMD_VTYPEBITS 10 +#define _CMD_FLAGSBITS 6 +#define _CMD_DIRBITS 2 + +#define _CMD_NMASK ((1 << _CMD_NBITS)-1) +#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1) +#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1) +#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1) + +#define _CMD_NSHIFT 0 +#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS) +#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS) +#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS) + +/* + * Direction bits (from host perspective). + */ +#define _CMD_DIR_NONE 0U +#define _CMD_DIR_WRITE 1U +#define _CMD_DIR_READ 2U +#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ) + +/* + * Flag bits. + */ +#define _CMD_FLAGS_NONE 0U +#define _CMD_FLAGS_NOWAIT 1U + +/* + * vNIC type bits. + */ +#define _CMD_VTYPE_NONE 0U +#define _CMD_VTYPE_ENET 1U +#define _CMD_VTYPE_FC 2U +#define _CMD_VTYPE_SCSI 4U +#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI) + +/* + * Used to create cmds.. +*/ +#define _CMDCF(dir, flags, vtype, nr) \ + (((dir) << _CMD_DIRSHIFT) | \ + ((flags) << _CMD_FLAGSSHIFT) | \ + ((vtype) << _CMD_VTYPESHIFT) | \ + ((nr) << _CMD_NSHIFT)) +#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr) +#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr) + +/* + * Used to decode cmds.. +*/ +#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK) +#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK) +#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK) +#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK) + +enum vnic_devcmd_cmd { + CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), + + /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */ + CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), + + /* dev-specific block member: + * in: (u16)a0=offset,(u8)a1=size + * out: a0=value */ + CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2), + + /* stats clear */ + CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3), + + /* stats dump in mem: (u64)a0=paddr to stats area, + * (u16)a1=sizeof stats area */ + CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4), + + /* nic_cfg in (u32)a0 */ + CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + + /* set struct vnic_devcmd_notify buffer in mem: + * in: + * (u64)a0=paddr to notify (set paddr=0 to unset) + * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (u32)a1 = effective size + */ + CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21), + + /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */ + CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23), + + /* open status: + * out: a0=0 open complete, a0=1 open in progress */ + CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24), + + /* close vnic */ + CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), + + /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ + CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), + + /* enable virtual link */ + CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* enable virtual link, waiting variant. */ + CMD_ENABLE_WAIT = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28), + + /* disable virtual link */ + CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29), + + /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */ + CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30), + + /* init status: + * out: a0=0 init complete, a0=1 init in progress + * if a0=0, a1=errno */ + CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31), + + /* undo initialize of virtual link */ + CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* + * Initialization for the devcmd2 interface. + * in: (u64) a0=host result buffer physical address + * in: (u16) a1=number of entries in result buffer + */ + CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57) +}; + +/* flags for CMD_OPEN */ +#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ + +/* flags for CMD_INIT */ +#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ + +/* flags for CMD_PACKET_FILTER */ +#define CMD_PFILTER_DIRECTED 0x01 +#define CMD_PFILTER_MULTICAST 0x02 +#define CMD_PFILTER_BROADCAST 0x04 +#define CMD_PFILTER_PROMISCUOUS 0x08 +#define CMD_PFILTER_ALL_MULTICAST 0x10 + +enum vnic_devcmd_status { + STAT_NONE = 0, + STAT_BUSY = 1 << 0, /* cmd in progress */ + STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */ +}; + +enum vnic_devcmd_error { + ERR_SUCCESS = 0, + ERR_EINVAL = 1, + ERR_EFAULT = 2, + ERR_EPERM = 3, + ERR_EBUSY = 4, + ERR_ECMDUNKNOWN = 5, + ERR_EBADSTATE = 6, + ERR_ENOMEM = 7, + ERR_ETIMEDOUT = 8, + ERR_ELINKDOWN = 9, +}; + +struct vnic_devcmd_fw_info { + char fw_version[32]; + char fw_build[32]; + char hw_version[32]; + char hw_serial_number[32]; +}; + +struct vnic_devcmd_notify { + u32 csum; /* checksum over following words */ + + u32 link_state; /* link up == 1 */ + u32 port_speed; /* effective port speed (rate limit) */ + u32 mtu; /* MTU */ + u32 msglvl; /* requested driver msg lvl */ + u32 uif; /* uplink interface */ + u32 status; /* status bits (see VNIC_STF_*) */ + u32 error; /* error code (see ERR_*) for first ERR */ + u32 link_down_cnt; /* running count of link down transitions */ +}; +#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ + +struct vnic_devcmd_provinfo { + u8 oui[3]; + u8 type; + u8 data[0]; +}; + +/* + * Writing cmd register causes STAT_BUSY to get set in status register. + * When cmd completes, STAT_BUSY will be cleared. + * + * If cmd completed successfully STAT_ERROR will be clear + * and args registers contain cmd-specific results. + * + * If cmd error, STAT_ERROR will be set and args[0] contains error code. + * + * status register is read-only. While STAT_BUSY is set, + * all other register contents are read-only. + */ + +/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */ +#define VNIC_DEVCMD_NARGS 15 +struct vnic_devcmd { + u32 status; /* RO */ + u32 cmd; /* RW */ + u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ +}; + + +/* + * Version 2 of the interface. + * + * Some things are carried over, notably the vnic_devcmd_cmd enum. + */ + +/* + * Flags for vnic_devcmd2.flags + */ + +#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */ + +#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS +struct vnic_devcmd2 { + u16 pad; + u16 flags; + u32 cmd; /* same command #defines as original */ + u64 args[VNIC_DEVCMD2_NARGS]; +}; + +#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS +struct devcmd2_result { + u64 results[VNIC_DEVCMD2_NRESULTS]; + u32 pad; + u16 completed_index; /* into copy WQ */ + u8 error; /* same error codes as original */ + u8 color; /* 0 or 1 as with completion queues */ +}; + +#define DEVCMD2_RING_SIZE 32 +#define DEVCMD2_DESC_SIZE 128 + +#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1) + +#endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/scsi/snic/vnic_intr.c b/drivers/scsi/snic/vnic_intr.c new file mode 100644 index 000000000000..a7d54806787d --- /dev/null +++ b/drivers/scsi/snic/vnic_intr.c @@ -0,0 +1,59 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include "vnic_dev.h" +#include "vnic_intr.h" + +void svnic_intr_free(struct vnic_intr *intr) +{ + intr->ctrl = NULL; +} + +int svnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, + unsigned int index) +{ + intr->index = index; + intr->vdev = vdev; + + intr->ctrl = svnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); + if (!intr->ctrl) { + pr_err("Failed to hook INTR[%d].ctrl resource\n", + index); + return -EINVAL; + } + + return 0; +} + +void svnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, + unsigned int coalescing_type, unsigned int mask_on_assertion) +{ + iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); + iowrite32(coalescing_type, &intr->ctrl->coalescing_type); + iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); + iowrite32(0, &intr->ctrl->int_credits); +} + +void svnic_intr_clean(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->int_credits); +} diff --git a/drivers/scsi/snic/vnic_intr.h b/drivers/scsi/snic/vnic_intr.h new file mode 100644 index 000000000000..4547f603fe5e --- /dev/null +++ b/drivers/scsi/snic/vnic_intr.h @@ -0,0 +1,105 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_INTR_H_ +#define _VNIC_INTR_H_ + +#include <linux/pci.h> +#include "vnic_dev.h" + +#define VNIC_INTR_TIMER_MAX 0xffff + +#define VNIC_INTR_TIMER_TYPE_ABS 0 +#define VNIC_INTR_TIMER_TYPE_QUIET 1 + +/* Interrupt control */ +struct vnic_intr_ctrl { + u32 coalescing_timer; /* 0x00 */ + u32 pad0; + u32 coalescing_value; /* 0x08 */ + u32 pad1; + u32 coalescing_type; /* 0x10 */ + u32 pad2; + u32 mask_on_assertion; /* 0x18 */ + u32 pad3; + u32 mask; /* 0x20 */ + u32 pad4; + u32 int_credits; /* 0x28 */ + u32 pad5; + u32 int_credit_return; /* 0x30 */ + u32 pad6; +}; + +struct vnic_intr { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */ +}; + +static inline void +svnic_intr_unmask(struct vnic_intr *intr) +{ + iowrite32(0, &intr->ctrl->mask); +} + +static inline void +svnic_intr_mask(struct vnic_intr *intr) +{ + iowrite32(1, &intr->ctrl->mask); +} + +static inline void +svnic_intr_return_credits(struct vnic_intr *intr, + unsigned int credits, + int unmask, + int reset_timer) +{ +#define VNIC_INTR_UNMASK_SHIFT 16 +#define VNIC_INTR_RESET_TIMER_SHIFT 17 + + u32 int_credit_return = (credits & 0xffff) | + (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) | + (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0); + + iowrite32(int_credit_return, &intr->ctrl->int_credit_return); +} + +static inline unsigned int +svnic_intr_credits(struct vnic_intr *intr) +{ + return ioread32(&intr->ctrl->int_credits); +} + +static inline void +svnic_intr_return_all_credits(struct vnic_intr *intr) +{ + unsigned int credits = svnic_intr_credits(intr); + int unmask = 1; + int reset_timer = 1; + + svnic_intr_return_credits(intr, credits, unmask, reset_timer); +} + +void svnic_intr_free(struct vnic_intr *); +int svnic_intr_alloc(struct vnic_dev *, struct vnic_intr *, unsigned int); +void svnic_intr_init(struct vnic_intr *intr, + unsigned int coalescing_timer, + unsigned int coalescing_type, + unsigned int mask_on_assertion); +void svnic_intr_clean(struct vnic_intr *); + +#endif /* _VNIC_INTR_H_ */ diff --git a/drivers/scsi/snic/vnic_resource.h b/drivers/scsi/snic/vnic_resource.h new file mode 100644 index 000000000000..9713d6835db3 --- /dev/null +++ b/drivers/scsi/snic/vnic_resource.h @@ -0,0 +1,68 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_RESOURCE_H_ +#define _VNIC_RESOURCE_H_ + +#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ +#define VNIC_RES_VERSION 0x00000000L + +/* vNIC resource types */ +enum vnic_res_type { + RES_TYPE_EOL, /* End-of-list */ + RES_TYPE_WQ, /* Work queues */ + RES_TYPE_RQ, /* Receive queues */ + RES_TYPE_CQ, /* Completion queues */ + RES_TYPE_RSVD1, + RES_TYPE_NIC_CFG, /* Enet NIC config registers */ + RES_TYPE_RSVD2, + RES_TYPE_RSVD3, + RES_TYPE_RSVD4, + RES_TYPE_RSVD5, + RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ + RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ + RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ + RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */ + RES_TYPE_RSVD6, + RES_TYPE_RSVD7, + RES_TYPE_DEVCMD, /* Device command region */ + RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + RES_TYPE_SUBVNIC, /* subvnic resource type */ + RES_TYPE_MQ_WQ, /* MQ Work queues */ + RES_TYPE_MQ_RQ, /* MQ Receive queues */ + RES_TYPE_MQ_CQ, /* MQ Completion queues */ + RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */ + RES_TYPE_DEVCMD2, /* Device control region */ + + RES_TYPE_MAX, /* Count of resource types */ +}; + +struct vnic_resource_header { + u32 magic; + u32 version; +}; + +struct vnic_resource { + u8 type; + u8 bar; + u8 pad[2]; + u32 bar_offset; + u32 count; +}; + +#endif /* _VNIC_RESOURCE_H_ */ diff --git a/drivers/scsi/snic/vnic_snic.h b/drivers/scsi/snic/vnic_snic.h new file mode 100644 index 000000000000..514d39f5cf00 --- /dev/null +++ b/drivers/scsi/snic/vnic_snic.h @@ -0,0 +1,54 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_SNIC_H_ +#define _VNIC_SNIC_H_ + +#define VNIC_SNIC_WQ_DESCS_MIN 64 +#define VNIC_SNIC_WQ_DESCS_MAX 1024 + +#define VNIC_SNIC_MAXDATAFIELDSIZE_MIN 256 +#define VNIC_SNIC_MAXDATAFIELDSIZE_MAX 2112 + +#define VNIC_SNIC_IO_THROTTLE_COUNT_MIN 1 +#define VNIC_SNIC_IO_THROTTLE_COUNT_MAX 1024 + +#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MIN 0 +#define VNIC_SNIC_PORT_DOWN_TIMEOUT_MAX 240000 + +#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MIN 0 +#define VNIC_SNIC_PORT_DOWN_IO_RETRIES_MAX 255 + +#define VNIC_SNIC_LUNS_PER_TARGET_MIN 1 +#define VNIC_SNIC_LUNS_PER_TARGET_MAX 1024 + +/* Device-specific region: scsi configuration */ +struct vnic_snic_config { + u32 flags; + u32 wq_enet_desc_count; + u32 io_throttle_count; + u32 port_down_timeout; + u32 port_down_io_retries; + u32 luns_per_tgt; + u16 maxdatafieldsize; + u16 intr_timer; + u8 intr_timer_type; + u8 _resvd2; + u8 xpt_type; + u8 hid; +}; +#endif /* _VNIC_SNIC_H_ */ diff --git a/drivers/scsi/snic/vnic_stats.h b/drivers/scsi/snic/vnic_stats.h new file mode 100644 index 000000000000..370a37c97748 --- /dev/null +++ b/drivers/scsi/snic/vnic_stats.h @@ -0,0 +1,68 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_STATS_H_ +#define _VNIC_STATS_H_ + +/* Tx statistics */ +struct vnic_tx_stats { + u64 tx_frames_ok; + u64 tx_unicast_frames_ok; + u64 tx_multicast_frames_ok; + u64 tx_broadcast_frames_ok; + u64 tx_bytes_ok; + u64 tx_unicast_bytes_ok; + u64 tx_multicast_bytes_ok; + u64 tx_broadcast_bytes_ok; + u64 tx_drops; + u64 tx_errors; + u64 tx_tso; + u64 rsvd[16]; +}; + +/* Rx statistics */ +struct vnic_rx_stats { + u64 rx_frames_ok; + u64 rx_frames_total; + u64 rx_unicast_frames_ok; + u64 rx_multicast_frames_ok; + u64 rx_broadcast_frames_ok; + u64 rx_bytes_ok; + u64 rx_unicast_bytes_ok; + u64 rx_multicast_bytes_ok; + u64 rx_broadcast_bytes_ok; + u64 rx_drop; + u64 rx_no_bufs; + u64 rx_errors; + u64 rx_rss; + u64 rx_crc_errors; + u64 rx_frames_64; + u64 rx_frames_127; + u64 rx_frames_255; + u64 rx_frames_511; + u64 rx_frames_1023; + u64 rx_frames_1518; + u64 rx_frames_to_max; + u64 rsvd[16]; +}; + +struct vnic_stats { + struct vnic_tx_stats tx; + struct vnic_rx_stats rx; +}; + +#endif /* _VNIC_STATS_H_ */ diff --git a/drivers/scsi/snic/vnic_wq.c b/drivers/scsi/snic/vnic_wq.c new file mode 100644 index 000000000000..1e91d432089e --- /dev/null +++ b/drivers/scsi/snic/vnic_wq.c @@ -0,0 +1,237 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include "vnic_dev.h" +#include "vnic_wq.h" + +static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, enum vnic_res_type res_type) +{ + wq->ctrl = svnic_dev_get_res(vdev, res_type, index); + if (!wq->ctrl) + return -EINVAL; + + return 0; +} + +static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, unsigned int desc_count, unsigned int desc_size) +{ + return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, + desc_size); +} + +static int vnic_wq_alloc_bufs(struct vnic_wq *wq) +{ + struct vnic_wq_buf *buf; + unsigned int i, j, count = wq->ring.desc_count; + unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); + + for (i = 0; i < blks; i++) { + wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); + if (!wq->bufs[i]) { + pr_err("Failed to alloc wq_bufs\n"); + + return -ENOMEM; + } + } + + for (i = 0; i < blks; i++) { + buf = wq->bufs[i]; + for (j = 0; j < VNIC_WQ_BUF_DFLT_BLK_ENTRIES; j++) { + buf->index = i * VNIC_WQ_BUF_DFLT_BLK_ENTRIES + j; + buf->desc = (u8 *)wq->ring.descs + + wq->ring.desc_size * buf->index; + if (buf->index + 1 == count) { + buf->next = wq->bufs[0]; + break; + } else if (j + 1 == VNIC_WQ_BUF_DFLT_BLK_ENTRIES) { + buf->next = wq->bufs[i + 1]; + } else { + buf->next = buf + 1; + buf++; + } + } + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + return 0; +} + +void svnic_wq_free(struct vnic_wq *wq) +{ + struct vnic_dev *vdev; + unsigned int i; + + vdev = wq->vdev; + + svnic_dev_free_desc_ring(vdev, &wq->ring); + + for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { + kfree(wq->bufs[i]); + wq->bufs[i] = NULL; + } + + wq->ctrl = NULL; + +} + +int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = 0; + wq->vdev = vdev; + + err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2); + if (err) { + pr_err("Failed to get devcmd2 resource\n"); + + return err; + } + + svnic_wq_disable(wq); + + err = vnic_wq_alloc_ring(vdev, wq, 0, desc_count, desc_size); + if (err) + return err; + + return 0; +} + +int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = index; + wq->vdev = vdev; + + err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ); + if (err) { + pr_err("Failed to hook WQ[%d] resource\n", index); + + return err; + } + + svnic_wq_disable(wq); + + err = vnic_wq_alloc_ring(vdev, wq, index, desc_count, desc_size); + if (err) + return err; + + err = vnic_wq_alloc_bufs(wq); + if (err) { + svnic_wq_free(wq); + + return err; + } + + return 0; +} + +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + unsigned int count = wq->ring.desc_count; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(count, &wq->ctrl->ring_size); + iowrite32(fetch_index, &wq->ctrl->fetch_index); + iowrite32(posted_index, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); + iowrite32(0, &wq->ctrl->error_status); + + wq->to_use = wq->to_clean = + &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] + [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; +} + +void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, + error_interrupt_offset); +} + +unsigned int svnic_wq_error_status(struct vnic_wq *wq) +{ + return ioread32(&wq->ctrl->error_status); +} + +void svnic_wq_enable(struct vnic_wq *wq) +{ + iowrite32(1, &wq->ctrl->enable); +} + +int svnic_wq_disable(struct vnic_wq *wq) +{ + unsigned int wait; + + iowrite32(0, &wq->ctrl->enable); + + /* Wait for HW to ACK disable request */ + for (wait = 0; wait < 100; wait++) { + if (!(ioread32(&wq->ctrl->running))) + return 0; + udelay(1); + } + + pr_err("Failed to disable WQ[%d]\n", wq->index); + + return -ETIMEDOUT; +} + +void svnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) +{ + struct vnic_wq_buf *buf; + + BUG_ON(ioread32(&wq->ctrl->enable)); + + buf = wq->to_clean; + + while (svnic_wq_desc_used(wq) > 0) { + + (*buf_clean)(wq, buf); + + buf = wq->to_clean = buf->next; + wq->ring.desc_avail++; + } + + wq->to_use = wq->to_clean = wq->bufs[0]; + + iowrite32(0, &wq->ctrl->fetch_index); + iowrite32(0, &wq->ctrl->posted_index); + iowrite32(0, &wq->ctrl->error_status); + + svnic_dev_clear_desc_ring(&wq->ring); +} diff --git a/drivers/scsi/snic/vnic_wq.h b/drivers/scsi/snic/vnic_wq.h new file mode 100644 index 000000000000..7cc031c7ceba --- /dev/null +++ b/drivers/scsi/snic/vnic_wq.h @@ -0,0 +1,170 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _VNIC_WQ_H_ +#define _VNIC_WQ_H_ + +#include <linux/pci.h> +#include "vnic_dev.h" +#include "vnic_cq.h" + +/* Work queue control */ +struct vnic_wq_ctrl { + u64 ring_base; /* 0x00 */ + u32 ring_size; /* 0x08 */ + u32 pad0; + u32 posted_index; /* 0x10 */ + u32 pad1; + u32 cq_index; /* 0x18 */ + u32 pad2; + u32 enable; /* 0x20 */ + u32 pad3; + u32 running; /* 0x28 */ + u32 pad4; + u32 fetch_index; /* 0x30 */ + u32 pad5; + u32 dca_value; /* 0x38 */ + u32 pad6; + u32 error_interrupt_enable; /* 0x40 */ + u32 pad7; + u32 error_interrupt_offset; /* 0x48 */ + u32 pad8; + u32 error_status; /* 0x50 */ + u32 pad9; +}; + +struct vnic_wq_buf { + struct vnic_wq_buf *next; + dma_addr_t dma_addr; + void *os_buf; + unsigned int len; + unsigned int index; + int sop; + void *desc; +}; + +/* Break the vnic_wq_buf allocations into blocks of 64 entries */ +#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 +#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 +#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ + ((unsigned int)(entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ + VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES) +#define VNIC_WQ_BUF_BLK_SZ \ + (VNIC_WQ_BUF_DFLT_BLK_ENTRIES * sizeof(struct vnic_wq_buf)) +#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES) +#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ + DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES) +#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) + +struct vnic_wq { + unsigned int index; + struct vnic_dev *vdev; + struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_dev_ring ring; + struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; + struct vnic_wq_buf *to_use; + struct vnic_wq_buf *to_clean; + unsigned int pkts_outstanding; +}; + +static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq) +{ + /* how many does SW own? */ + return wq->ring.desc_avail; +} + +static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq) +{ + /* how many does HW own? */ + return wq->ring.desc_count - wq->ring.desc_avail - 1; +} + +static inline void *svnic_wq_next_desc(struct vnic_wq *wq) +{ + return wq->to_use->desc; +} + +static inline void svnic_wq_post(struct vnic_wq *wq, + void *os_buf, dma_addr_t dma_addr, + unsigned int len, int sop, int eop) +{ + struct vnic_wq_buf *buf = wq->to_use; + + buf->sop = sop; + buf->os_buf = eop ? os_buf : NULL; + buf->dma_addr = dma_addr; + buf->len = len; + + buf = buf->next; + if (eop) { + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &wq->ctrl->posted_index); + } + wq->to_use = buf; + + wq->ring.desc_avail--; +} + +static inline void svnic_wq_service(struct vnic_wq *wq, + struct cq_desc *cq_desc, u16 completed_index, + void (*buf_service)(struct vnic_wq *wq, + struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), + void *opaque) +{ + struct vnic_wq_buf *buf; + + buf = wq->to_clean; + while (1) { + + (*buf_service)(wq, cq_desc, buf, opaque); + + wq->ring.desc_avail++; + + wq->to_clean = buf->next; + + if (buf->index == completed_index) + break; + + buf = wq->to_clean; + } +} + +void svnic_wq_free(struct vnic_wq *wq); +int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, unsigned int desc_count, unsigned int desc_size); +int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size); +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int post_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); + +void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); +unsigned int svnic_wq_error_status(struct vnic_wq *wq); +void svnic_wq_enable(struct vnic_wq *wq); +int svnic_wq_disable(struct vnic_wq *wq); +void svnic_wq_clean(struct vnic_wq *wq, + void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); +#endif /* _VNIC_WQ_H_ */ diff --git a/drivers/scsi/snic/wq_enet_desc.h b/drivers/scsi/snic/wq_enet_desc.h new file mode 100644 index 000000000000..68f62b6d105b --- /dev/null +++ b/drivers/scsi/snic/wq_enet_desc.h @@ -0,0 +1,96 @@ +/* + * Copyright 2014 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _WQ_ENET_DESC_H_ +#define _WQ_ENET_DESC_H_ + +/* Ethernet work queue descriptor: 16B */ +struct wq_enet_desc { + __le64 address; + __le16 length; + __le16 mss_loopback; + __le16 header_length_flags; + __le16 vlan_tag; +}; + +#define WQ_ENET_ADDR_BITS 64 +#define WQ_ENET_LEN_BITS 14 +#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1) +#define WQ_ENET_MSS_BITS 14 +#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1) +#define WQ_ENET_MSS_SHIFT 2 +#define WQ_ENET_LOOPBACK_SHIFT 1 +#define WQ_ENET_HDRLEN_BITS 10 +#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1) +#define WQ_ENET_FLAGS_OM_BITS 2 +#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1) +#define WQ_ENET_FLAGS_EOP_SHIFT 12 +#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13 +#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14 +#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15 + +#define WQ_ENET_OFFLOAD_MODE_CSUM 0 +#define WQ_ENET_OFFLOAD_MODE_RESERVED 1 +#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2 +#define WQ_ENET_OFFLOAD_MODE_TSO 3 + +static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, + u64 address, u16 length, u16 mss, u16 header_length, + u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, + u8 vlan_tag_insert, u16 vlan_tag, u8 loopback) +{ + desc->address = cpu_to_le64(address); + desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); + desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << + WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT); + desc->header_length_flags = cpu_to_le16( + (header_length & WQ_ENET_HDRLEN_MASK) | + (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS | + (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT | + (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | + (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT | + (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT); + desc->vlan_tag = cpu_to_le16(vlan_tag); +} + +static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, + u64 *address, u16 *length, u16 *mss, u16 *header_length, + u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, + u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback) +{ + *address = le64_to_cpu(desc->address); + *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; + *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & + WQ_ENET_MSS_MASK; + *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> + WQ_ENET_LOOPBACK_SHIFT) & 1); + *header_length = le16_to_cpu(desc->header_length_flags) & + WQ_ENET_HDRLEN_MASK; + *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK); + *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_EOP_SHIFT) & 1); + *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1); + *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1); + *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> + WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1); + *vlan_tag = le16_to_cpu(desc->vlan_tag); +} + +#endif /* _WQ_ENET_DESC_H_ */ diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 9a1c34205254..3f25b8fa921d 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -471,6 +471,47 @@ static void st_release_request(struct st_request *streq) kfree(streq); } +static void st_do_stats(struct scsi_tape *STp, struct request *req) +{ + ktime_t now; + + now = ktime_get(); + if (req->cmd[0] == WRITE_6) { + now = ktime_sub(now, STp->stats->write_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); + atomic64_inc(&STp->stats->write_cnt); + if (req->errors) { + atomic64_add(atomic_read(&STp->stats->last_write_size) + - STp->buffer->cmdstat.residual, + &STp->stats->write_byte_cnt); + if (STp->buffer->cmdstat.residual > 0) + atomic64_inc(&STp->stats->resid_cnt); + } else + atomic64_add(atomic_read(&STp->stats->last_write_size), + &STp->stats->write_byte_cnt); + } else if (req->cmd[0] == READ_6) { + now = ktime_sub(now, STp->stats->read_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); + atomic64_inc(&STp->stats->read_cnt); + if (req->errors) { + atomic64_add(atomic_read(&STp->stats->last_read_size) + - STp->buffer->cmdstat.residual, + &STp->stats->read_byte_cnt); + if (STp->buffer->cmdstat.residual > 0) + atomic64_inc(&STp->stats->resid_cnt); + } else + atomic64_add(atomic_read(&STp->stats->last_read_size), + &STp->stats->read_byte_cnt); + } else { + now = ktime_sub(now, STp->stats->other_time); + atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time); + atomic64_inc(&STp->stats->other_cnt); + } + atomic64_dec(&STp->stats->in_flight); +} + static void st_scsi_execute_end(struct request *req, int uptodate) { struct st_request *SRpnt = req->end_io_data; @@ -480,6 +521,8 @@ static void st_scsi_execute_end(struct request *req, int uptodate) STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; STp->buffer->cmdstat.residual = req->resid_len; + st_do_stats(STp, req); + tmp = SRpnt->bio; if (SRpnt->waiting) complete(SRpnt->waiting); @@ -496,6 +539,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; int err = 0; int write = (data_direction == DMA_TO_DEVICE); + struct scsi_tape *STp = SRpnt->stp; req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL); @@ -516,6 +560,17 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, } } + atomic64_inc(&STp->stats->in_flight); + if (cmd[0] == WRITE_6) { + atomic_set(&STp->stats->last_write_size, bufflen); + STp->stats->write_time = ktime_get(); + } else if (cmd[0] == READ_6) { + atomic_set(&STp->stats->last_read_size, bufflen); + STp->stats->read_time = ktime_get(); + } else { + STp->stats->other_time = ktime_get(); + } + SRpnt->bio = req->bio; req->cmd_len = COMMAND_SIZE(cmd[0]); memset(req->cmd, 0, BLK_MAX_CDB); @@ -4222,6 +4277,12 @@ static int st_probe(struct device *dev) } tpnt->index = error; sprintf(disk->disk_name, "st%d", tpnt->index); + tpnt->stats = kzalloc(sizeof(struct scsi_tape_stats), GFP_KERNEL); + if (tpnt->stats == NULL) { + sdev_printk(KERN_ERR, SDp, + "st: Can't allocate statistics.\n"); + goto out_idr_remove; + } dev_set_drvdata(dev, tpnt); @@ -4241,6 +4302,8 @@ static int st_probe(struct device *dev) out_remove_devs: remove_cdevs(tpnt); + kfree(tpnt->stats); +out_idr_remove: spin_lock(&st_index_lock); idr_remove(&st_index_idr, tpnt->index); spin_unlock(&st_index_lock); @@ -4298,6 +4361,7 @@ static void scsi_tape_release(struct kref *kref) disk->private_data = NULL; put_disk(disk); + kfree(tpnt->stats); kfree(tpnt); return; } @@ -4513,6 +4577,184 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR_RO(options); +/* Support for tape stats */ + +/** + * read_cnt_show - return read count - count of reads made from tape drive + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t read_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->read_cnt)); +} +static DEVICE_ATTR_RO(read_cnt); + +/** + * read_byte_cnt_show - return read byte count - tape drives + * may use blocks less than 512 bytes this gives the raw byte count of + * of data read from the tape drive. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t read_byte_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->read_byte_cnt)); +} +static DEVICE_ATTR_RO(read_byte_cnt); + +/** + * read_us_show - return read us - overall time spent waiting on reads in ns. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t read_ns_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->tot_read_time)); +} +static DEVICE_ATTR_RO(read_ns); + +/** + * write_cnt_show - write count - number of user calls + * to write(2) that have written data to tape. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t write_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->write_cnt)); +} +static DEVICE_ATTR_RO(write_cnt); + +/** + * write_byte_cnt_show - write byte count - raw count of + * bytes written to tape. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t write_byte_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->write_byte_cnt)); +} +static DEVICE_ATTR_RO(write_byte_cnt); + +/** + * write_ns_show - write ns - number of nanoseconds waiting on write + * requests to complete. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t write_ns_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->tot_write_time)); +} +static DEVICE_ATTR_RO(write_ns); + +/** + * in_flight_show - number of I/Os currently in flight - + * in most cases this will be either 0 or 1. It may be higher if someone + * has also issued other SCSI commands such as via an ioctl. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t in_flight_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->in_flight)); +} +static DEVICE_ATTR_RO(in_flight); + +/** + * io_ns_show - io wait ns - this is the number of ns spent + * waiting on all I/O to complete. This includes tape movement commands + * such as rewinding, seeking to end of file or tape, it also includes + * read and write. To determine the time spent on tape movement + * subtract the read and write ns from this value. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t io_ns_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->tot_io_time)); +} +static DEVICE_ATTR_RO(io_ns); + +/** + * other_cnt_show - other io count - this is the number of + * I/O requests other than read and write requests. + * Typically these are tape movement requests but will include driver + * tape movement. This includes only requests issued by the st driver. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t other_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->other_cnt)); +} +static DEVICE_ATTR_RO(other_cnt); + +/** + * resid_cnt_show - A count of the number of times we get a residual + * count - this should indicate someone issuing reads larger than the + * block size on tape. + * @dev: struct device + * @attr: attribute structure + * @buf: buffer to return formatted data in + */ +static ssize_t resid_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct st_modedef *STm = dev_get_drvdata(dev); + + return sprintf(buf, "%lld", + (long long)atomic64_read(&STm->tape->stats->resid_cnt)); +} +static DEVICE_ATTR_RO(resid_cnt); + static struct attribute *st_dev_attrs[] = { &dev_attr_defined.attr, &dev_attr_default_blksize.attr, @@ -4521,7 +4763,35 @@ static struct attribute *st_dev_attrs[] = { &dev_attr_options.attr, NULL, }; -ATTRIBUTE_GROUPS(st_dev); + +static struct attribute *st_stats_attrs[] = { + &dev_attr_read_cnt.attr, + &dev_attr_read_byte_cnt.attr, + &dev_attr_read_ns.attr, + &dev_attr_write_cnt.attr, + &dev_attr_write_byte_cnt.attr, + &dev_attr_write_ns.attr, + &dev_attr_in_flight.attr, + &dev_attr_io_ns.attr, + &dev_attr_other_cnt.attr, + &dev_attr_resid_cnt.attr, + NULL, +}; + +static struct attribute_group stats_group = { + .name = "stats", + .attrs = st_stats_attrs, +}; + +static struct attribute_group st_group = { + .attrs = st_dev_attrs, +}; + +static const struct attribute_group *st_dev_groups[] = { + &st_group, + &stats_group, + NULL, +}; /* The following functions may be useful for a larger audience. */ static int sgl_map_user_pages(struct st_buffer *STbp, diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index f3eee0f9f40c..b6486b5d8681 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h @@ -92,6 +92,27 @@ struct st_partstat { int drv_file; }; +/* Tape statistics */ +struct scsi_tape_stats { + atomic64_t read_byte_cnt; /* bytes read */ + atomic64_t write_byte_cnt; /* bytes written */ + atomic64_t in_flight; /* Number of I/Os in flight */ + atomic64_t read_cnt; /* Count of read requests */ + atomic64_t write_cnt; /* Count of write requests */ + atomic64_t other_cnt; /* Count of other requests either + * implicit or from user space + * ioctl. */ + atomic64_t resid_cnt; /* Count of resid_len > 0 */ + atomic64_t tot_read_time; /* ktime spent completing reads */ + atomic64_t tot_write_time; /* ktime spent completing writes */ + atomic64_t tot_io_time; /* ktime spent doing any I/O */ + ktime_t read_time; /* holds ktime request was queued */ + ktime_t write_time; /* holds ktime request was queued */ + ktime_t other_time; /* holds ktime request was queued */ + atomic_t last_read_size; /* Number of bytes issued for last read */ + atomic_t last_write_size; /* Number of bytes issued for last write */ +}; + #define ST_NBR_PARTITIONS 4 /* The tape drive descriptor */ @@ -171,6 +192,7 @@ struct scsi_tape { #endif struct gendisk *disk; struct kref kref; + struct scsi_tape_stats *stats; }; /* Bit masks for use_pf */ diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c index 0b7819f3e09b..5bdcbe8fa958 100644 --- a/drivers/scsi/sym53c416.c +++ b/drivers/scsi/sym53c416.c @@ -838,7 +838,6 @@ static struct scsi_host_template driver_template = { .can_queue = 1, .this_id = SYM53C416_SCSI_ID, .sg_tablesize = 32, - .cmd_per_lun = 1, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 8a1f4b355416..e94538362536 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -73,7 +73,7 @@ config SCSI_UFSHCD_PLATFORM config SCSI_UFS_QCOM bool "QCOM specific hooks to UFS controller platform driver" - depends on SCSI_UFSHCD_PLATFORM && ARCH_MSM + depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM select PHY_QCOM_UFS help This selects the QCOM specific additions to UFSHCD platform driver. diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 6652a8171de6..4cdffa46d401 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -307,6 +307,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status) static unsigned long ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) { + struct ufs_qcom_host *host = hba->priv; struct ufs_clk_info *clki; u32 core_clk_period_in_ns; u32 tx_clk_cycles_per_us = 0; @@ -330,6 +331,16 @@ ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) {UFS_HS_G2, 0x49}, }; + /* + * The Qunipro controller does not use following registers: + * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG & + * UFS_REG_PA_LINK_STARTUP_TIMER + * But UTP controller uses SYS1CLK_1US_REG register for Interrupt + * Aggregation logic. + */ + if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) + goto out; + if (gear == 0) { dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); goto out_error; @@ -683,6 +694,16 @@ out: return ret; } +static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = hba->priv; + + if (host->hw_ver.major == 0x1) + return UFSHCI_VERSION_11; + else + return UFSHCI_VERSION_20; +} + /** * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks * @hba: host controller instance @@ -696,13 +717,24 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) { struct ufs_qcom_host *host = hba->priv; - if (host->hw_ver.major == 0x1) - hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS; + if (host->hw_ver.major == 0x01) { + hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS + | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP + | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE; + + if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) + hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; + } if (host->hw_ver.major >= 0x2) { + hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; + hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; + if (!ufs_qcom_cap_qunipro(host)) /* Legacy UniPro mode still need following quirks */ - hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS; + hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS + | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE + | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP); } } @@ -1005,6 +1037,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .name = "qcom", .init = ufs_qcom_init, .exit = ufs_qcom_exit, + .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, .clk_scale_notify = ufs_qcom_clk_scale_notify, .setup_clocks = ufs_qcom_setup_clocks, .hce_enable_notify = ufs_qcom_hce_enable_notify, diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 648a44675880..b0ade73f8c6a 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -188,6 +188,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_config_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *desired_pwr_mode); +static int ufshcd_change_power_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_mode); static inline int ufshcd_enable_irq(struct ufs_hba *hba) { @@ -269,6 +271,11 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) */ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) { + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) { + if (hba->vops && hba->vops->get_ufs_hci_version) + return hba->vops->get_ufs_hci_version(hba); + } + return ufshcd_readl(hba, REG_UFS_VERSION); } @@ -481,6 +488,15 @@ ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) } /** + * ufshcd_disable_intr_aggr - Disables interrupt aggregation. + * @hba: per adapter instance + */ +static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) +{ + ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** * ufshcd_enable_run_stop_reg - Enable run-stop registers, * When run-stop registers are set to 1, it indicates the * host controller that it can process the requests @@ -1326,7 +1342,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->sense_buffer = cmd->sense_buffer; lrbp->task_tag = tag; lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); - lrbp->intr_cmd = false; + lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; lrbp->command_type = UTP_CMD_TYPE_SCSI; /* form UPIU before issuing the command */ @@ -2147,6 +2163,31 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, }; const char *get = action[!!peer]; int ret; + struct ufs_pa_layer_attr orig_pwr_info; + struct ufs_pa_layer_attr temp_pwr_info; + bool pwr_mode_change = false; + + if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { + orig_pwr_info = hba->pwr_info; + temp_pwr_info = orig_pwr_info; + + if (orig_pwr_info.pwr_tx == FAST_MODE || + orig_pwr_info.pwr_rx == FAST_MODE) { + temp_pwr_info.pwr_tx = FASTAUTO_MODE; + temp_pwr_info.pwr_rx = FASTAUTO_MODE; + pwr_mode_change = true; + } else if (orig_pwr_info.pwr_tx == SLOW_MODE || + orig_pwr_info.pwr_rx == SLOW_MODE) { + temp_pwr_info.pwr_tx = SLOWAUTO_MODE; + temp_pwr_info.pwr_rx = SLOWAUTO_MODE; + pwr_mode_change = true; + } + if (pwr_mode_change) { + ret = ufshcd_change_power_mode(hba, &temp_pwr_info); + if (ret) + goto out; + } + } uic_cmd.command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; @@ -2161,6 +2202,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, if (mib_val) *mib_val = uic_cmd.argument3; + + if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) + && pwr_mode_change) + ufshcd_change_power_mode(hba, &orig_pwr_info); out: return ret; } @@ -2249,6 +2294,16 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) struct uic_command uic_cmd = {0}; int ret; + if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { + ret = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1); + if (ret) { + dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", + __func__, ret); + goto out; + } + } + uic_cmd.command = UIC_CMD_DME_SET; uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); uic_cmd.argument3 = mode; @@ -2256,6 +2311,7 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); ufshcd_release(hba); +out: return ret; } @@ -2522,7 +2578,10 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); /* Configure interrupt aggregation */ - ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); + if (ufshcd_is_intr_aggr_allowed(hba)) + ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); + else + ufshcd_disable_intr_aggr(hba); /* Configure UTRL and UTMRL base address registers */ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), @@ -2628,6 +2687,42 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) return 0; } +static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) +{ + int tx_lanes, i, err = 0; + + if (!peer) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + else + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), + &tx_lanes); + for (i = 0; i < tx_lanes; i++) { + if (!peer) + err = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(TX_LCC_ENABLE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), + 0); + else + err = ufshcd_dme_peer_set(hba, + UIC_ARG_MIB_SEL(TX_LCC_ENABLE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), + 0); + if (err) { + dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", + __func__, peer, i, err); + break; + } + } + + return err; +} + +static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) +{ + return ufshcd_disable_tx_lcc(hba, true); +} + /** * ufshcd_link_startup - Initialize unipro link startup * @hba: per adapter instance @@ -2665,6 +2760,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba) /* failed to get the link up... retire */ goto out; + if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { + ret = ufshcd_disable_device_tx_lcc(hba); + if (ret) + goto out; + } + /* Include any host controller configuration via UIC commands */ if (hba->vops && hba->vops->link_startup_notify) { ret = hba->vops->link_startup_notify(hba, POST_CHANGE); @@ -3073,7 +3174,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) * false interrupt if device completes another request after resetting * aggregation and before reading the DB. */ - ufshcd_reset_intr_aggr(hba); + if (ufshcd_is_intr_aggr_allowed(hba)) + ufshcd_reset_intr_aggr(hba); tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); completed_reqs = tr_doorbell ^ hba->outstanding_reqs; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index b47ff07698e8..c40a0e78a6c4 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -246,6 +246,7 @@ struct ufs_pwr_mode_info { * @name: variant name * @init: called when the driver is initialized * @exit: called to cleanup everything done in init + * @get_ufs_hci_version: called to get UFS HCI version * @clk_scale_notify: notifies that clks are scaled up/down * @setup_clocks: called before touching any of the controller registers * @setup_regulators: called before accessing the host controller @@ -263,6 +264,7 @@ struct ufs_hba_variant_ops { const char *name; int (*init)(struct ufs_hba *); void (*exit)(struct ufs_hba *); + u32 (*get_ufs_hci_version)(struct ufs_hba *); void (*clk_scale_notify)(struct ufs_hba *); int (*setup_clocks)(struct ufs_hba *, bool); int (*setup_regulators)(struct ufs_hba *, bool); @@ -417,11 +419,45 @@ struct ufs_hba { unsigned int irq; bool is_irq_enabled; + /* Interrupt aggregation support is broken */ + #define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0) + /* * delay before each dme command is required as the unipro * layer has shown instabilities */ - #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(0) + #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1) + + /* + * If UFS host controller is having issue in processing LCC (Line + * Control Command) coming from device then enable this quirk. + * When this quirk is enabled, host controller driver should disable + * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE + * attribute of device to 0). + */ + #define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2) + + /* + * The attribute PA_RXHSUNTERMCAP specifies whether or not the + * inbound Link supports unterminated line in HS mode. Setting this + * attribute to 1 fixes moving to HS gear. + */ + #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3) + + /* + * This quirk needs to be enabled if the host contoller only allows + * accessing the peer dme attributes in AUTO mode (FAST AUTO or + * SLOW AUTO). + */ + #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4) + + /* + * This quirk needs to be enabled if the host contoller doesn't + * advertise the correct version in UFS_VER register. If this quirk + * is enabled, standard UFS host driver will call the vendor specific + * ops (get_ufs_hci_version) to get the correct version. + */ + #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5) unsigned int quirks; /* Deviations from standard UFSHCI spec. */ @@ -478,6 +514,12 @@ struct ufs_hba { #define UFSHCD_CAP_CLK_SCALING (1 << 2) /* Allow auto bkops to enabled during runtime suspend */ #define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3) + /* + * This capability allows host controller driver to use the UFS HCI's + * interrupt aggregation capability. + * CAUTION: Enabling this might reduce overall UFS throughput. + */ +#define UFSHCD_CAP_INTR_AGGR (1 << 4) struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; @@ -502,6 +544,15 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; } +static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) +{ + if ((hba->caps & UFSHCD_CAP_INTR_AGGR) && + !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR)) + return true; + else + return false; +} + #define ufshcd_writel(hba, val, reg) \ writel((val), (hba)->mmio_base + (reg)) #define ufshcd_readl(hba, reg) \ diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index d5721199e9cc..0ae0967aaed8 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -89,8 +89,9 @@ enum { /* Controller UFSHCI version */ enum { - UFSHCI_VERSION_10 = 0x00010000, - UFSHCI_VERSION_11 = 0x00010100, + UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */ + UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */ + UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */ }; /* @@ -206,6 +207,9 @@ enum { #define CONFIG_RESULT_CODE_MASK 0xFF #define GENERIC_ERROR_CODE_MASK 0xFF +/* GenSelectorIndex calculation macros for M-PHY attributes */ +#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane) + #define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ ((sel) & 0xFFFF)) #define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0) diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index 3fc3e21b746b..816a8a46efb8 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -198,6 +198,14 @@ enum ufs_hs_gear_tag { #define T_TC0TXMAXSDUSIZE 0x4060 #define T_TC1TXMAXSDUSIZE 0x4061 +#ifdef FALSE +#undef FALSE +#endif + +#ifdef TRUE +#undef TRUE +#endif + /* Boolean attribute values */ enum { FALSE = 0, diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index f164f24a4a55..285f77544c36 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -501,6 +501,7 @@ static void virtio_scsi_init_hdr(struct virtio_device *vdev, cmd->crn = 0; } +#ifdef CONFIG_BLK_DEV_INTEGRITY static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, struct virtio_scsi_cmd_req_pi *cmd_pi, struct scsi_cmnd *sc) @@ -524,6 +525,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, blk_rq_sectors(rq) * bi->tuple_size); } +#endif static int virtscsi_queuecommand(struct virtio_scsi *vscsi, struct virtio_scsi_vq *req_vq, @@ -546,11 +548,14 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); +#ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc); memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd_pi); - } else { + } else +#endif + { virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc); memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd); @@ -1002,6 +1007,7 @@ static int virtscsi_probe(struct virtio_device *vdev) shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; shost->nr_hw_queues = num_queues; +#ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | @@ -1010,6 +1016,7 @@ static int virtscsi_probe(struct virtio_device *vdev) scsi_host_set_prot(shost, host_prot); scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); } +#endif err = scsi_add_host(shost, &vdev->dev); if (err) @@ -1090,7 +1097,9 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_SCSI_F_HOTPLUG, VIRTIO_SCSI_F_CHANGE, +#ifdef CONFIG_BLK_DEV_INTEGRITY VIRTIO_SCSI_F_T10_PI, +#endif }; static struct virtio_driver virtio_scsi_driver = { diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c index 289ad016d925..61346aa73178 100644 --- a/drivers/scsi/wd719x.c +++ b/drivers/scsi/wd719x.c @@ -882,7 +882,6 @@ static struct scsi_host_template wd719x_template = { .can_queue = 255, .this_id = 7, .sg_tablesize = WD719X_SG, - .cmd_per_lun = WD719X_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h index 185e30e4eb93..9c6dd45f95f5 100644 --- a/drivers/scsi/wd719x.h +++ b/drivers/scsi/wd719x.h @@ -2,8 +2,6 @@ #define _WD719X_H_ #define WD719X_SG 255 /* Scatter/gather size */ -#define WD719X_CMD_PER_LUN 1 /* We should be able to do linked commands, but - * this is 1 for now to be safe. */ struct wd719x_sglist { __le32 ptr; diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index fe8875f0d7be..d3d1891cda3c 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c @@ -20,58 +20,15 @@ #include <linux/bitmap.h> #include <linux/slab.h> -#ifdef CONFIG_PM -static int sh_pm_runtime_suspend(struct device *dev) -{ - int ret; - - ret = pm_generic_runtime_suspend(dev); - if (ret) { - dev_err(dev, "failed to suspend device\n"); - return ret; - } - - ret = pm_clk_suspend(dev); - if (ret) { - dev_err(dev, "failed to suspend clock\n"); - pm_generic_runtime_resume(dev); - return ret; - } - - return 0; -} - -static int sh_pm_runtime_resume(struct device *dev) -{ - int ret; - - ret = pm_clk_resume(dev); - if (ret) { - dev_err(dev, "failed to resume clock\n"); - return ret; - } - - return pm_generic_runtime_resume(dev); -} - static struct dev_pm_domain default_pm_domain = { .ops = { - .runtime_suspend = sh_pm_runtime_suspend, - .runtime_resume = sh_pm_runtime_resume, + USE_PM_CLK_RUNTIME_OPS USE_PLATFORM_PM_SLEEP_OPS }, }; -#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain) - -#else - -#define DEFAULT_PM_DOMAIN_PTR NULL - -#endif /* CONFIG_PM */ - static struct pm_clk_notifier_block platform_bus_notifier = { - .pm_domain = DEFAULT_PM_DOMAIN_PTR, + .pm_domain = &default_pm_domain, .con_ids = { NULL, }, }; diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 3bad441de8dc..c41b5575df05 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -647,6 +647,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, kib_dev_t *dev; struct ib_qp_init_attr *init_qp_attr; struct kib_sched_info *sched; + struct ib_cq_init_attr cq_attr = {}; kib_conn_t *conn; struct ib_cq *cq; unsigned long flags; @@ -742,10 +743,11 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, kiblnd_map_rx_descs(conn); + cq_attr.cqe = IBLND_CQ_ENTRIES(version); + cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt); cq = ib_create_cq(cmid->device, kiblnd_cq_completion, kiblnd_cq_event, conn, - IBLND_CQ_ENTRIES(version), - kiblnd_get_completion_vector(conn, cpt)); + &cq_attr); if (IS_ERR(cq)) { CERROR("Can't create CQ: %ld, cqe: %d\n", PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h index 3925db160650..513c81f43d6e 100644 --- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h +++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h @@ -189,22 +189,7 @@ static inline int ll_quota_off(struct super_block *sb, int off, int remount) #endif - -/* - * After 3.1, kernel's nameidata.intent.open.flags is different - * with lustre's lookup_intent.it_flags, as lustre's it_flags' - * lower bits equal to FMODE_xxx while kernel doesn't transliterate - * lower bits of nameidata.intent.open.flags to FMODE_xxx. - * */ #include <linux/version.h> -static inline int ll_namei_to_lookup_intent_flag(int flag) -{ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) - flag = (flag & ~O_ACCMODE) | OPEN_FMODE(flag); -#endif - return flag; -} - #include <linux/fs.h> # define ll_umode_t umode_t diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c index cc3ab351943e..f9262243f935 100644 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c +++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c @@ -87,7 +87,7 @@ static void cfs_cpu_core_siblings(int cpu, cpumask_t *mask) /* return cpumask of HTs in the same core */ static void cfs_cpu_ht_siblings(int cpu, cpumask_t *mask) { - cpumask_copy(mask, topology_thread_cpumask(cpu)); + cpumask_copy(mask, topology_sibling_cpumask(cpu)); } static void cfs_node_to_cpumask(int node, cpumask_t *mask) diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 5f918e3c4683..528af9011653 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -57,12 +57,6 @@ #define VM_FAULT_RETRY 0 #endif -/* Kernel 3.1 kills LOOKUP_CONTINUE, LOOKUP_PARENT is equivalent to it. - * seem kernel commit 49084c3bb2055c401f3493c13edae14d49128ca0 */ -#ifndef LOOKUP_CONTINUE -#define LOOKUP_CONTINUE LOOKUP_PARENT -#endif - /** Only used on client-side for indicating the tail of dir hash/offset. */ #define LL_DIR_END_OFF 0x7fffffffffffffffULL #define LL_DIR_END_OFF_32BIT 0x7fffffffUL diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c index 3711e671a4df..69b203651905 100644 --- a/drivers/staging/lustre/lustre/llite/symlink.c +++ b/drivers/staging/lustre/lustre/llite/symlink.c @@ -118,7 +118,7 @@ failed: return rc; } -static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd) +static const char *ll_follow_link(struct dentry *dentry, void **cookie) { struct inode *inode = d_inode(dentry); struct ptlrpc_request *request = NULL; @@ -126,32 +126,22 @@ static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd) char *symname = NULL; CDEBUG(D_VFSTRACE, "VFS Op\n"); - /* Limit the recursive symlink depth to 5 instead of default - * 8 links when kernel has 4k stack to prevent stack overflow. - * For 8k stacks we need to limit it to 7 for local servers. */ - if (THREAD_SIZE < 8192 && current->link_count >= 6) { - rc = -ELOOP; - } else if (THREAD_SIZE == 8192 && current->link_count >= 8) { - rc = -ELOOP; - } else { - ll_inode_size_lock(inode); - rc = ll_readlink_internal(inode, &request, &symname); - ll_inode_size_unlock(inode); - } + ll_inode_size_lock(inode); + rc = ll_readlink_internal(inode, &request, &symname); + ll_inode_size_unlock(inode); if (rc) { ptlrpc_req_finished(request); - request = NULL; - symname = ERR_PTR(rc); + return ERR_PTR(rc); } - nd_set_link(nd, symname); /* symname may contain a pointer to the request message buffer, * we delay request releasing until ll_put_link then. */ - return request; + *cookie = request; + return symname; } -static void ll_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) +static void ll_put_link(struct inode *unused, void *cookie) { ptlrpc_req_finished(cookie); } diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index 8e61421515cb..344189ac5698 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -557,7 +557,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, * there are. */ /* weight is # of HTs */ - if (cpumask_weight(topology_thread_cpumask(0)) > 1) { + if (cpumask_weight(topology_sibling_cpumask(0)) > 1) { /* depress thread factor for hyper-thread */ factor = factor - (factor >> 1) + (factor >> 3); } @@ -2768,7 +2768,7 @@ int ptlrpc_hr_init(void) init_waitqueue_head(&ptlrpc_hr.hr_waitq); - weight = cpumask_weight(topology_thread_cpumask(0)); + weight = cpumask_weight(topology_sibling_cpumask(0)); cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { hrp->hrp_cpt = i; diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c index d64b6ed9c0c9..aed49bf762b4 100644 --- a/drivers/staging/rts5208/rtsx.c +++ b/drivers/staging/rts5208/rtsx.c @@ -230,7 +230,6 @@ static struct scsi_host_template rtsx_host_template = { /* queue commands only, only one command per LUN */ .can_queue = 1, - .cmd_per_lun = 1, /* unknown initiator id */ .this_id = -1, diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 74e6114ff18f..4672bb1a24d0 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -23,7 +23,7 @@ #include <linux/module.h> #include <linux/idr.h> #include <asm/unaligned.h> -#include <scsi/scsi_device.h> +#include <scsi/scsi_proto.h> #include <scsi/iscsi_proto.h> #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c index 34c3cd1b05ce..5fabcd3d623f 100644 --- a/drivers/target/iscsi/iscsi_target_device.c +++ b/drivers/target/iscsi/iscsi_target_device.c @@ -17,7 +17,6 @@ * GNU General Public License for more details. ******************************************************************************/ -#include <scsi/scsi_device.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index b0224a77e26d..fe9a582ca6af 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -17,7 +17,7 @@ ******************************************************************************/ #include <asm/unaligned.h> -#include <scsi/scsi_device.h> +#include <scsi/scsi_proto.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 18b0f9703ff2..ce81f17ad1ba 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -30,7 +30,7 @@ #include <linux/ctype.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> -#include <scsi/scsi.h> +#include <scsi/scsi_proto.h> #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 4f8d4d459aa4..8ca373774276 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -28,8 +28,7 @@ #include <linux/configfs.h> #include <linux/export.h> #include <linux/file.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_proto.h> #include <asm/unaligned.h> #include <target/target_core_base.h> diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ce5f768181ff..417f88b498c7 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -36,8 +36,8 @@ #include <asm/unaligned.h> #include <net/sock.h> #include <net/tcp.h> -#include <scsi/scsi.h> -#include <scsi/scsi_device.h> +#include <scsi/scsi_common.h> +#include <scsi/scsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 35bfe77160d8..41f4f270f919 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -29,8 +29,8 @@ #include <linux/ctype.h> #include <linux/spinlock.h> #include <linux/export.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> + +#include <scsi/scsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 3f27bfd816d8..d48379a258c7 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -31,8 +31,7 @@ #include <linux/spinlock.h> #include <linux/module.h> #include <linux/falloc.h> -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> +#include <scsi/scsi_proto.h> #include <asm/unaligned.h> #include <target/target_core_base.h> diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 8c965683789f..972ed1781ae2 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -35,8 +35,7 @@ #include <linux/genhd.h> #include <linux/file.h> #include <linux/module.h> -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> +#include <scsi/scsi_proto.h> #include <asm/unaligned.h> #include <target/target_core_base.h> diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index a15411c79ae9..7ca642361f9c 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -28,8 +28,7 @@ #include <linux/spinlock.h> #include <linux/list.h> #include <linux/file.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_proto.h> #include <asm/unaligned.h> #include <target/target_core_base.h> diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index ecc5eaef13d6..26581e215141 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -36,9 +36,7 @@ #include <linux/module.h> #include <asm/unaligned.h> -#include <scsi/scsi.h> #include <scsi/scsi_device.h> -#include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 820d3052b775..6d2007e35df6 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -16,13 +16,13 @@ #define PS_TIMEOUT_OTHER (500*HZ) #include <linux/device.h> -#include <scsi/scsi_driver.h> -#include <scsi/scsi_device.h> #include <linux/kref.h> #include <linux/kobject.h> +struct scsi_device; + struct pscsi_plugin_task { - unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; + unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER]; int pscsi_direction; int pscsi_result; u32 pscsi_resid; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index d16489b6a1a4..b2d8f6f91633 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -29,8 +29,7 @@ #include <linux/timer.h> #include <linux/slab.h> #include <linux/spinlock.h> -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> +#include <scsi/scsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 733824e3825f..43719b393ca9 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -25,7 +25,7 @@ #include <linux/ratelimit.h> #include <linux/crc-t10dif.h> #include <asm/unaligned.h> -#include <scsi/scsi.h> +#include <scsi/scsi_proto.h> #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 7912aa124385..52ea640274f4 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -24,7 +24,8 @@ #include <linux/module.h> #include <asm/unaligned.h> -#include <scsi/scsi.h> +#include <scsi/scsi_proto.h> +#include <scsi/scsi_common.h> #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 03538994d2f7..40f6c1378041 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -33,9 +33,6 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/configfs.h> -#include <scsi/scsi.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_host.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 315ec3458eeb..a5bb0c46e57e 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -27,8 +27,6 @@ #include <linux/spinlock.h> #include <linux/list.h> #include <linux/export.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 47f064415bf6..84de757bd458 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -32,8 +32,7 @@ #include <linux/export.h> #include <net/sock.h> #include <net/tcp.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 675f2d9d1f14..fdf867230e18 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -37,9 +37,7 @@ #include <asm/unaligned.h> #include <net/sock.h> #include <net/tcp.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_tcq.h> +#include <scsi/scsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 1738b1646988..e44cc94b12cb 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -25,8 +25,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 07d2996d8c1f..5efef9a2a3d3 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -19,12 +19,13 @@ #include <linux/spinlock.h> #include <linux/module.h> #include <linux/idr.h> +#include <linux/kernel.h> #include <linux/timer.h> #include <linux/parser.h> -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> #include <linux/uio_driver.h> #include <net/genetlink.h> +#include <scsi/scsi_common.h> +#include <scsi/scsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/target_core_backend.h> diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 8fd680ac941b..5ec0d00edaa3 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -25,8 +25,7 @@ #include <linux/spinlock.h> #include <linux/list.h> #include <linux/configfs.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_proto.h> #include <asm/unaligned.h> #include <target/target_core_base.h> diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index edcafa4490c0..1bf78e7c994c 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -30,10 +30,6 @@ #include <linux/hash.h> #include <linux/percpu_ida.h> #include <asm/unaligned.h> -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 65dce1345966..86b699b94c7b 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -34,10 +34,6 @@ #include <linux/kernel.h> #include <linux/ctype.h> #include <asm/unaligned.h> -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_cmnd.h> #include <scsi/libfc.h> #include <target/target_core_base.h> diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 583e755d8091..fe585d1cce23 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -39,10 +39,6 @@ #include <linux/hash.h> #include <linux/ratelimit.h> #include <asm/unaligned.h> -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_cmnd.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index ccee7e332a4d..f2a616d4f2c4 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -32,10 +32,6 @@ #include <linux/rculist.h> #include <linux/kref.h> #include <asm/unaligned.h> -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_cmnd.h> #include <scsi/libfc.h> #include <target/target_core_base.h> diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c index 0ec756c62bcf..d7b846d41630 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.c +++ b/drivers/tty/serial/serial_mctrl_gpio.c @@ -55,7 +55,7 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl) value_array[count] = !!(mctrl & mctrl_gpios_desc[i].mctrl); count++; } - gpiod_set_array(count, desc_array, value_array); + gpiod_set_array_value(count, desc_array, value_array); } EXPORT_SYMBOL_GPL(mctrl_gpio_set); diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index f9b4882fce52..6ce932f90ef8 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c @@ -16,7 +16,6 @@ #include <linux/usb/composite.h> #include <linux/usb/gadget.h> #include <linux/usb/storage.h> -#include <scsi/scsi.h> #include <scsi/scsi_tcq.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.h b/drivers/usb/gadget/legacy/tcm_usb_gadget.h index 8289219925b8..9fb3544cc80f 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.h +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.h @@ -6,7 +6,6 @@ #include <linux/usb/composite.h> #include <linux/usb/uas.h> #include <linux/usb/storage.h> -#include <scsi/scsi.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index 6431d08c8d9d..a4dbb0cd80da 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c @@ -635,7 +635,6 @@ static struct scsi_host_template mts_scsi_host_template = { .sg_tablesize = SG_ALL, .can_queue = 1, .this_id = -1, - .cmd_per_lun = 1, .use_clustering = 1, .emulated = 1, .slave_alloc = mts_slave_alloc, diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 82503a7ff6c8..cce22ff1c2eb 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -69,12 +69,6 @@ #define USB_DEVICE_ID_LD_HYBRID 0x2090 /* USB Product ID of Automotive Hybrid */ #define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0 /* USB Product ID of Heat control */ -#define USB_VENDOR_ID_VERNIER 0x08f7 -#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002 -#define USB_DEVICE_ID_VERNIER_SKIP 0x0003 -#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 -#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006 - #ifdef CONFIG_USB_DYNAMIC_MINORS #define USB_LD_MINOR_BASE 0 #else @@ -115,10 +109,6 @@ static const struct usb_device_id ld_usb_table[] = { { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, - { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) }, - { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, - { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) }, - { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ld_usb_table); diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index b7cf1982d1d9..56ecb8b5115d 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -171,7 +171,7 @@ config USB_SERIAL_FTDI_SIO ---help--- Say Y here if you want to use a FTDI SIO single port USB to serial converter device. The implementation I have is called the USC-1000. - This driver has also be tested with the 245 and 232 devices. + This driver has also been tested with the 245 and 232 devices. See <http://ftdi-usb-sio.sourceforge.net/> for more information on this driver and the device. diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 0e400f382f3a..996ef1e882d3 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -558,7 +558,6 @@ struct scsi_host_template usb_stor_host_template = { /* queue commands only, only one command per LUN */ .can_queue = 1, - .cmd_per_lun = 1, /* unknown initiator id */ .this_id = -1, diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 6d3122afeed3..f68921909552 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -811,7 +811,6 @@ static struct scsi_host_template uas_host_template = { .can_queue = 65536, /* Is there a limit on the _host_ ? */ .this_id = -1, .sg_tablesize = SG_NONE, - .cmd_per_lun = 1, /* until we override it */ .skip_settle_delay = 1, .use_blk_tags = 1, }; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index ea32b386797f..5b30d27dab50 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -37,7 +37,8 @@ #include <linux/fs.h> #include <linux/miscdevice.h> #include <asm/unaligned.h> -#include <scsi/scsi.h> +#include <scsi/scsi_common.h> +#include <scsi/scsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> diff --git a/drivers/video/fbdev/amifb.c b/drivers/video/fbdev/amifb.c index 35f7900a0573..ee3a703acf23 100644 --- a/drivers/video/fbdev/amifb.c +++ b/drivers/video/fbdev/amifb.c @@ -3705,8 +3705,8 @@ default_chipset: * access the videomem with writethrough cache */ info->fix.smem_start = (u_long)ZTWO_PADDR(videomemory); - videomemory = (u_long)ioremap_writethrough(info->fix.smem_start, - info->fix.smem_len); + videomemory = (u_long)ioremap_wt(info->fix.smem_start, + info->fix.smem_len); if (!videomemory) { dev_warn(&pdev->dev, "Unable to map videomem cached writethrough\n"); diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c index cb9ee2556850..d6ce613e12ad 100644 --- a/drivers/video/fbdev/atafb.c +++ b/drivers/video/fbdev/atafb.c @@ -3185,8 +3185,7 @@ int __init atafb_init(void) /* Map the video memory (physical address given) to somewhere * in the kernel address space. */ - external_screen_base = ioremap_writethrough(external_addr, - external_len); + external_screen_base = ioremap_wt(external_addr, external_len); if (external_vgaiobase) external_vgaiobase = (unsigned long)ioremap(external_vgaiobase, 0x10000); diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c index a1b7e5fa9b09..9476d196f510 100644 --- a/drivers/video/fbdev/hpfb.c +++ b/drivers/video/fbdev/hpfb.c @@ -241,8 +241,8 @@ static int hpfb_init_one(unsigned long phys_base, unsigned long virt_base) fb_info.fix.line_length = fb_width; fb_height = (in_8(fb_regs + HPFB_FBHMSB) << 8) | in_8(fb_regs + HPFB_FBHLSB); fb_info.fix.smem_len = fb_width * fb_height; - fb_start = (unsigned long)ioremap_writethrough(fb_info.fix.smem_start, - fb_info.fix.smem_len); + fb_start = (unsigned long)ioremap_wt(fb_info.fix.smem_start, + fb_info.fix.smem_len); hpfb_defined.xres = (in_8(fb_regs + HPFB_DWMSB) << 8) | in_8(fb_regs + HPFB_DWLSB); hpfb_defined.yres = (in_8(fb_regs + HPFB_DHMSB) << 8) | in_8(fb_regs + HPFB_DHLSB); hpfb_defined.xres_virtual = hpfb_defined.xres; diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index eba1b7ac7294..5447b8186332 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -502,9 +502,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, INIT_LIST_HEAD(&vp_dev->virtqueues); spin_lock_init(&vp_dev->lock); - /* Disable MSI/MSIX to bring device to a known good state. */ - pci_msi_off(pci_dev); - /* enable the device */ rc = pci_enable_device(pci_dev); if (rc) diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c index 3e62ee4b3b66..f4a369429553 100644 --- a/drivers/xen/xen-acpi-cpuhotplug.c +++ b/drivers/xen/xen-acpi-cpuhotplug.c @@ -46,13 +46,7 @@ static int xen_acpi_processor_enable(struct acpi_device *device) unsigned long long value; union acpi_object object = { 0 }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; - struct acpi_processor *pr; - - pr = acpi_driver_data(device); - if (!pr) { - pr_err(PREFIX "Cannot find driver data\n"); - return -EINVAL; - } + struct acpi_processor *pr = acpi_driver_data(device); if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { /* Declared with "Processor" statement; match ProcessorID */ @@ -77,7 +71,7 @@ static int xen_acpi_processor_enable(struct acpi_device *device) pr->id = xen_pcpu_id(pr->acpi_id); - if ((int)pr->id < 0) + if (invalid_logical_cpuid(pr->id)) /* This cpu is not presented at hypervisor, try to hotadd it */ if (ACPI_FAILURE(xen_acpi_cpu_hotadd(pr))) { pr_err(PREFIX "Hotadd CPU (acpi_id = %d) failed.\n", @@ -226,7 +220,7 @@ static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr) return AE_ERROR; pr->id = xen_hotadd_cpu(pr); - if ((int)pr->id < 0) + if (invalid_logical_cpuid(pr->id)) return AE_ERROR; /* diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index b7f51504f85a..39223c3e99ad 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -49,10 +49,7 @@ #include <generated/utsrelease.h> -#include <scsi/scsi.h> -#include <scsi/scsi_dbg.h> -#include <scsi/scsi_eh.h> -#include <scsi/scsi_tcq.h> +#include <scsi/scsi_host.h> /* SG_ALL */ #include <target/target_core_base.h> #include <target/target_core_fabric.h> |